Lines Matching +full:wakeup +full:- +full:rtt +full:- +full:timer

1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
162 * - Matt Mathis's Rate Halving which slowly drops
165 * - Yuchung Cheng's RACK TCP (for which its named) that
168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
186 * TCP output is also over-written with a new version since it
191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 * - 60 seconds */
199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto ->…
217 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
253 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/c…
258 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
265 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
295 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
296 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
311 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
313 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top …
314 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bott…
333 * the way fill-cw interacts with timely and caps how much
334 * timely can boost the fill-cw b/w.
340 * probeRTT as well as fixed-rate-pacing.
346 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
347 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
434 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
588 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
605 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
606 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
607 if (rack->lt_bw_up) { in rack_get_lt_bw()
610 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
611 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
628 tp = rack->rc_tp; in rack_swap_beta_values()
629 if (tp->t_cc == NULL) { in rack_swap_beta_values()
633 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
634 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_swap_beta_values()
635 /* Not new-reno we can't play games with beta! */ in rack_swap_beta_values()
640 if (CC_ALGO(tp)->ctl_output == NULL) { in rack_swap_beta_values()
641 /* Huh, not using new-reno so no swaps.? */ in rack_swap_beta_values()
649 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
656 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
666 opt.val = rack->r_ctl.rc_saved_beta.beta; in rack_swap_beta_values()
667 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
673 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_swap_beta_values()
674 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
680 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); in rack_swap_beta_values()
682 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
687 ptr = ((struct newreno *)tp->t_ccv.cc_data); in rack_swap_beta_values()
690 log.u_bbr.flex1 = ptr->beta; in rack_swap_beta_values()
691 log.u_bbr.flex2 = ptr->beta_ecn; in rack_swap_beta_values()
692 log.u_bbr.flex3 = ptr->newreno_flags; in rack_swap_beta_values()
693 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; in rack_swap_beta_values()
694 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_swap_beta_values()
696 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
698 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
700 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
701 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
711 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
717 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
724 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
730 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
737 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
739 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
741 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
743 rack->rc_always_pace = 0; in rack_remove_pacing()
744 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
745 rack->dgp_on = 0; in rack_remove_pacing()
746 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
747 rack->use_fixed_rate = 0; in rack_remove_pacing()
754 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
760 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
762 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
764 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
767 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
768 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
770 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
771 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
772 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
773 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
775 log.u_bbr.applimited = rsm->r_start; in rack_log_gpset()
776 log.u_bbr.delivered = rsm->r_end; in rack_log_gpset()
777 log.u_bbr.epoch = rsm->r_flags; in rack_log_gpset()
780 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
781 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
782 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
795 if (error || req->newptr == NULL) in sysctl_rack_clear()
907 /* Probe rtt related controls */ in rack_init_sysctls()
918 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); in rack_init_sysctls()
923 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); in rack_init_sysctls()
943 "How many useconds between the lowest rtt falling must past before we enter probertt"); in rack_init_sysctls()
968 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); in rack_init_sysctls()
973 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); in rack_init_sysctls()
998 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); in rack_init_sysctls()
1003 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); in rack_init_sysctls()
1008 "Do we clear I/S counts on exiting probe-rtt"); in rack_init_sysctls()
1018 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); in rack_init_sysctls()
1197 "If we fall below this rate, dis-engage hw pacing?"); in rack_init_sysctls()
1213 "Rack Timely RTT Controls"); in rack_init_sysctls()
1229 "Rack timely multiplier of lowest rtt for rtt_max"); in rack_init_sysctls()
1234 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); in rack_init_sysctls()
1239 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); in rack_init_sysctls()
1305 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); in rack_init_sysctls()
1353 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); in rack_init_sysctls()
1373 "Should we always send the oldest TLP and RACK-TLP"); in rack_init_sysctls()
1388 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); in rack_init_sysctls()
1400 /* Timer related controls */ in rack_init_sysctls()
1406 "Timer related controls"); in rack_init_sysctls()
1411 "When doing recovery -> rto -> recovery do we reset SSthresh?"); in rack_init_sysctls()
1446 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); in rack_init_sysctls()
1451 "Maximum RTO in microseconds -- should be at least as large as min_rto"); in rack_init_sysctls()
1473 "Does a cwnd just-return end the measurement window (app limited)"); in rack_init_sysctls()
1478 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); in rack_init_sysctls()
1535 "Should RACK use mbuf queuing for non-paced connections"); in rack_init_sysctls()
1579 … "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); in rack_init_sysctls()
1594 …feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); in rack_init_sysctls()
1663 "Highest move to non-move ratio seen"); in rack_init_sysctls()
1804 "Total number of times a sends returned enobuf for non-hdwr paced connections"); in rack_init_sysctls()
2001 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
2008 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
2009 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
2010 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
2011 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
2013 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
2035 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2043 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2045 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
2067 /* Record the last obtained us rtt in inflight */ in rack_log_hybrid_bw()
2070 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
2072 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
2073 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
2075 /* Use the last known rtt i.e. the rack-rtt */ in rack_log_hybrid_bw()
2076 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
2081 log.u_bbr.cur_del_rate = cur->deadline; in rack_log_hybrid_bw()
2084 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2085 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2086 log.u_bbr.flex6 = cur->start_seq; in rack_log_hybrid_bw()
2087 log.u_bbr.pkts_out = cur->end_seq; in rack_log_hybrid_bw()
2090 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2091 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2093 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_bw()
2094 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2097 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); in rack_log_hybrid_bw()
2098 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2100 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_bw()
2101 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2103 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
2107 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
2108 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
2109 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; in rack_log_hybrid_bw()
2121 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
2123 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
2125 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
2127 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
2129 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
2130 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
2131 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
2143 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
2152 log.u_bbr.delRate = cur->sent_at_fs; in rack_log_hybrid_sends()
2154 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { in rack_log_hybrid_sends()
2160 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2161 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2167 log.u_bbr.cur_del_rate = cur->sent_at_ls; in rack_log_hybrid_sends()
2168 log.u_bbr.rttProp = cur->rxt_at_ls; in rack_log_hybrid_sends()
2170 log.u_bbr.bw_inuse = cur->rxt_at_fs; in rack_log_hybrid_sends()
2172 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2175 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_sends()
2176 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2178 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_sends()
2179 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2182 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_sends()
2183 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2185 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); in rack_log_hybrid_sends()
2186 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2188 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid_sends()
2189 log.u_bbr.lost = cur->playout_ms; in rack_log_hybrid_sends()
2190 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid_sends()
2193 * where a false retransmit occurred so first_send <-> lastsend may in rack_log_hybrid_sends()
2196 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2197 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2205 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2207 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2209 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2211 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2214 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2215 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2216 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2229 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2230 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2241 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2256 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2259 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2260 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2268 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2271 if (timenow >= ent->deadline) { in rack_rate_cap_bw()
2273 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2275 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2279 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2282 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2284 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2293 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_rate_cap_bw()
2294 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2295 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2298 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2300 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2309 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2310 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2313 if (lengone < (ent->end - ent->start)) in rack_rate_cap_bw()
2314 lenleft = (ent->end - ent->start) - lengone; in rack_rate_cap_bw()
2317 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2319 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2325 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2327 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2337 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2338 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2340 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2341 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2344 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2345 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2346 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2347 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2349 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2352 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2353 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); in rack_rate_cap_bw()
2361 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2363 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2364 rack->rc_catch_up && in rack_rate_cap_bw()
2365 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2366 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2368 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2369 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2372 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2373 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2374 …rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2375 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2379 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2380 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2391 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2405 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2411 * No goodput bw but a long-term b/w does exist in rack_get_gp_est()
2417 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2418 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2421 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2429 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2430 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2437 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2439 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2442 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2444 if (rack->dis_lt_bw) { in rack_get_gp_est()
2445 /* We are not using lt-bw */ in rack_get_gp_est()
2452 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2454 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2486 if (rack->use_fixed_rate) { in rack_get_bw()
2497 if (rack->use_fixed_rate) { in rack_get_output_gain()
2499 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2500 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2501 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2502 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2505 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2506 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2509 } else if (rack->rack_no_prr && in rack_get_output_gain()
2510 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2515 * Here we may have a non-retransmit but we in rack_get_output_gain()
2519 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2524 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2525 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2527 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2535 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. in rack_log_dsack_event()
2538 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh in rack_log_dsack_event()
2540 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. in rack_log_dsack_event()
2542 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2547 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2549 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2551 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2552 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2553 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2557 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2560 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2561 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2562 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2563 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2564 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2575 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2584 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2585 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2586 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2587 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2588 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2601 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2602 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2604 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2605 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2606 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2607 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2608 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2611 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2612 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2613 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2614 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2615 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2616 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2637 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2639 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2641 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2646 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2656 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2657 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2658 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2659 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2660 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2668 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2686 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2693 * 1 - We are retransmitting and this tells the reason. in rack_log_retran_reason()
2694 * 2 - We are clearing a dup-ack count. in rack_log_retran_reason()
2695 * 3 - We are incrementing a dup-ack count. in rack_log_retran_reason()
2705 log.u_bbr.flex3 = rsm->r_flags; in rack_log_retran_reason()
2706 log.u_bbr.flex4 = rsm->r_dupack; in rack_log_retran_reason()
2707 log.u_bbr.flex5 = rsm->r_start; in rack_log_retran_reason()
2708 log.u_bbr.flex6 = rsm->r_end; in rack_log_retran_reason()
2710 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2712 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2713 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2714 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2715 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2716 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2717 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2718 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2719 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2720 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2729 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2734 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2736 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2738 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2739 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2740 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2742 if (rack->rack_no_prr) in rack_log_to_start()
2745 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2746 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2748 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2749 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2750 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2751 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2752 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2753 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2754 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2756 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2757 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2759 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2760 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2761 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2762 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2763 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2772 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2777 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2779 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2780 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2784 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; in rack_log_to_event()
2785 if (rack->rack_no_prr) in rack_log_to_event()
2788 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2790 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2791 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2792 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2793 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2794 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2796 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2797 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2798 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2799 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2812 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2818 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2824 log.u_bbr.flex1 = prev->r_start; in rack_log_map_chg()
2825 log.u_bbr.flex2 = prev->r_end; in rack_log_map_chg()
2829 log.u_bbr.flex3 = rsm->r_start; in rack_log_map_chg()
2830 log.u_bbr.flex4 = rsm->r_end; in rack_log_map_chg()
2834 log.u_bbr.flex5 = next->r_start; in rack_log_map_chg()
2835 log.u_bbr.flex6 = next->r_end; in rack_log_map_chg()
2841 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2842 if (rack->rack_no_prr) in rack_log_map_chg()
2845 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2846 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2848 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2849 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2850 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2851 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2865 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2868 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2869 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2870 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2871 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2873 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2874 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2876 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2877 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2878 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2880 log.u_bbr.pkt_epoch = rsm->r_start; in rack_log_rtt_upd()
2881 log.u_bbr.lost = rsm->r_end; in rack_log_rtt_upd()
2882 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; in rack_log_rtt_upd()
2884 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; in rack_log_rtt_upd()
2887 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2893 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2895 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2897 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2899 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2901 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2903 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2905 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2907 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2908 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2909 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2910 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2911 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2912 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2913 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2916 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); in rack_log_rtt_upd()
2918 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2919 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2928 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) in rack_log_rtt_sample() argument
2931 * Log the rtt sample we are in rack_log_rtt_sample()
2935 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2941 log.u_bbr.flex1 = rtt; in rack_log_rtt_sample()
2942 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2945 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2946 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2947 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2948 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2952 * lower 32 bits as the actual RTT using the arrival in rack_log_rtt_sample()
2955 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2957 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2961 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2964 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2965 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2967 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2968 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2969 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2970 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2977 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time… in rack_log_rtt_sample_calc() argument
2979 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2985 log.u_bbr.flex1 = rtt; in rack_log_rtt_sample_calc()
2991 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2993 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2994 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2995 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2996 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
3006 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
3018 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
3020 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
3021 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
3022 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
3023 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
3033 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
3038 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
3041 log.u_bbr.flex3 = tp->t_maxunacktime; in rack_log_progress_event()
3042 log.u_bbr.flex4 = tp->t_acktime; in rack_log_progress_event()
3045 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
3046 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
3047 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
3048 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
3049 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
3051 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
3053 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
3054 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
3063 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
3067 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
3069 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
3072 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
3073 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
3075 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
3076 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
3078 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
3079 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
3080 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
3081 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
3082 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
3083 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
3084 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
3093 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
3101 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
3102 if (rack->rack_no_prr) in rack_log_doseg_done()
3105 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
3107 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
3108 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
3110 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
3112 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
3113 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
3114 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
3116 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
3117 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
3119 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
3120 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
3121 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
3122 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
3123 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
3125 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
3126 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
3127 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
3128 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
3129 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
3130 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
3131 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
3132 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
3141 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
3146 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
3147 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
3150 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
3154 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
3155 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3156 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3157 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3158 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3159 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, in rack_log_type_pacing_sizes()
3160 &tptosocket(tp)->so_snd, in rack_log_type_pacing_sizes()
3169 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3174 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3176 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3178 if (rack->rack_no_prr) in rack_log_type_just_return()
3181 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3183 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3186 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3187 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3188 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3189 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3190 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3191 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3193 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3194 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3195 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3196 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3206 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3210 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3212 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3215 if (rack->rack_no_prr) in rack_log_to_cancel()
3218 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3219 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3222 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3224 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3225 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3226 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3227 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3228 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3230 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3231 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3232 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3233 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3246 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3264 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3265 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3266 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3275 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3282 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3283 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3285 if (rack->rack_no_prr) in rack_log_to_processing()
3288 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3289 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3290 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3291 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3293 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3294 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3295 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3296 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3305 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3310 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3311 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3312 if (rack->rack_no_prr) in rack_log_to_prr()
3315 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3316 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3317 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3318 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3323 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3324 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3326 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3327 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3328 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3329 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3407 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3408 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3409 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3411 rack->rc_free_cnt--; in rack_alloc()
3421 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3429 if (rack->rc_free_cnt) { in rack_alloc()
3431 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3432 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3433 rack->rc_free_cnt--; in rack_alloc()
3443 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3445 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3446 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3462 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3463 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3465 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3466 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3476 rsm->r_limit_type = limit_type; in rack_alloc_limit()
3477 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3491 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3492 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3493 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3494 rack->rc_free_cnt--; in rack_free_trim()
3495 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3503 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_free()
3504 if (rack->r_ctl.rc_app_limited_cnt > 0) { in rack_free()
3505 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3508 if (rsm->r_limit_type) { in rack_free()
3510 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3512 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3513 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); in rack_free()
3514 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3515 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3516 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3518 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3520 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3521 rack->r_ctl.rc_resend = NULL; in rack_free()
3522 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3523 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3524 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3525 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3526 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3527 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3530 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3533 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3534 rack->rc_free_cnt++; in rack_free()
3543 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3545 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3558 * 2) We have a minimum number of rtt's usually 1 SRTT in rack_get_measure_window()
3578 * the SRTT has our rtt and then multiply it by the in rack_get_measure_window()
3582 srtt = (uint64_t)tp->t_srtt; in rack_get_measure_window()
3603 * span huge numbers of rtt's between measurements. in rack_get_measure_window()
3624 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_enough_for_measurement()
3628 if ((tp->snd_max == tp->snd_una) || in rack_enough_for_measurement()
3629 (th_ack == tp->snd_max)){ in rack_enough_for_measurement()
3643 if (SEQ_GEQ(th_ack, tp->gput_ack)) { in rack_enough_for_measurement()
3653 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3654 if (SEQ_LT(th_ack, tp->gput_ack) && in rack_enough_for_measurement()
3655 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3659 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3660 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3669 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3670 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3671 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3690 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3696 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3698 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3700 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3702 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3703 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3704 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3705 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3706 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3707 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3714 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3716 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3717 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3718 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3719 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3721 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3723 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3725 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3726 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3727 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3728 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3729 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3816 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3818 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3820 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3821 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3823 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3824 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3831 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3832 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3834 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3835 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3846 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3853 * to a new-reno flow. in rack_increase_bw_mul()
3858 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3859 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3866 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3871 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3873 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3874 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3876 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3878 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3882 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3883 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3884 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3885 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3886 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3888 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3889 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3891 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3893 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3897 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3898 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3899 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3900 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3901 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3903 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3905 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3907 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3910 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3911 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3912 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3913 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3914 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3918 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3920 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3921 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3923 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3925 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3926 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3935 /*- in rack_get_decrease()
3937 * new_per = curper * (1 - B * norm_grad) in rack_get_decrease()
3940 * rtt_dif = input var current rtt-diff in rack_get_decrease()
3953 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_get_decrease()
3956 * reduce_by = (1000000 - inverse); in rack_get_decrease()
3962 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_get_decrease()
3965 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3970 perf = curper - 1; in rack_get_decrease()
3976 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) in rack_decrease_highrtt() argument
3980 * result = curper * (1 - (B * ( 1 - ------ )) in rack_decrease_highrtt()
3989 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3991 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_decrease_highrtt()
3992 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - in rack_decrease_highrtt()
3994 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); in rack_decrease_highrtt()
3995 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3998 log1 = rtt; in rack_decrease_highrtt()
4013 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) in rack_decrease_bw_mul() argument
4018 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
4020 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
4022 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
4023 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
4029 rtt_diff *= -1; in rack_decrease_bw_mul()
4032 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
4035 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
4036 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4042 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4043 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
4044 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
4045 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
4047 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4050 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
4051 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4054 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
4057 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
4058 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4064 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4065 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
4066 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
4067 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
4070 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4074 logvar2 = (uint32_t)rtt; in rack_decrease_bw_mul()
4079 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4084 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
4085 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4087 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
4090 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
4091 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4097 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4098 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
4099 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
4100 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
4102 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4107 logvar2 = (uint32_t)rtt; in rack_decrease_bw_mul()
4112 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4117 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
4118 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4121 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
4122 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
4124 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
4125 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
4136 uint32_t rtt, uint32_t line, uint8_t reas) in rack_log_rtt_shrinks() argument
4138 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
4144 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
4145 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4146 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
4147 log.u_bbr.flex5 = rtt; in rack_log_rtt_shrinks()
4148 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
4150 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
4152 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
4154 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
4156 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
4157 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
4158 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
4159 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
4163 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
4165 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
4166 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
4167 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
4168 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4169 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4170 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4171 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4172 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4173 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4176 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4177 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4178 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4179 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4181 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4186 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) in rack_set_prtt_target() argument
4191 bwdp *= (uint64_t)rtt; in rack_set_prtt_target()
4193 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4194 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4200 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4210 * the RTT as a signal that we saw something new and in rack_enter_probertt()
4221 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4222 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4225 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4229 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4230 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4238 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4239 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4242 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4243 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4244 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4245 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4246 rack->in_probe_rtt = 1; in rack_enter_probertt()
4247 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4248 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4249 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4251 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4253 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4254 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4264 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4265 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4266 rack->in_probe_rtt = 0; in rack_exit_probertt()
4267 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4268 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4276 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4277 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4279 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4283 * probe-rtt. We probably are not interested in in rack_exit_probertt()
4286 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4292 * We need to mark these as app-limited so we in rack_exit_probertt()
4295 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4296 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_exit_probertt()
4297 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4298 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4305 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4306 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4307 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4309 rsm->r_flags |= RACK_APP_LIMITED; in rack_exit_probertt()
4310 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4322 rack->rc_gp_incr = 0; in rack_exit_probertt()
4323 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4324 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4325 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4328 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4329 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4330 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4332 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4333 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4334 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4340 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4343 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4344 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4355 /* Set to min rtt */ in rack_exit_probertt()
4357 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4359 /* Set to current gp rtt */ in rack_exit_probertt()
4361 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4363 /* Set to entry gp rtt */ in rack_exit_probertt()
4365 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4370 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4372 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4380 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4387 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4388 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4389 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4396 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4403 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4406 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4408 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4409 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4410 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4412 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4415 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4418 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4421 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4422 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4423 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4424 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4430 /* Check in on probe-rtt */ in rack_check_probe_rtt()
4432 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4433 /* We do not do p-rtt unless we have gp measurements */ in rack_check_probe_rtt()
4436 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4440 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4441 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4447 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4448 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4450 * Probe RTT safety value triggered! in rack_check_probe_rtt()
4453 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4458 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4459 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4460 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4462 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4463 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4468 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4469 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4472 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4477 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4479 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4481 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4482 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4487 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4489 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4490 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4491 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4496 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4498 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4499 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4500 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4502 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4507 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4514 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4520 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4521 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4522 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4530 uint32_t rtt, int32_t rtt_diff) in rack_update_multiplier() argument
4535 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4536 (rack->use_fixed_rate) || in rack_update_multiplier()
4537 (rack->in_probe_rtt) || in rack_update_multiplier()
4538 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4542 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4545 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4547 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4549 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4551 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4552 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4554 * This is the case where our RTT is above in rack_update_multiplier()
4565 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4567 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); in rack_update_multiplier()
4573 * for b/w with it. This will push the RTT up which in rack_update_multiplier()
4584 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4585 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4587 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4588 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4590 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4596 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4597 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4611 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4612 /* We are not incrementing really no-count */ in rack_update_multiplier()
4613 rack->rc_gp_incr = 0; in rack_update_multiplier()
4614 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4617 * Lets just use the RTT in rack_update_multiplier()
4634 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4635 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4636 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4637 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4644 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4645 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4646 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4653 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4654 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4656 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4673 rack->rc_gp_incr = 0; in rack_update_multiplier()
4674 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4675 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4680 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4681 /* We are not incrementing really no-count */ in rack_update_multiplier()
4682 rack->rc_gp_incr = 0; in rack_update_multiplier()
4683 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4685 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); in rack_update_multiplier()
4687 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4688 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4695 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) in rack_make_timely_judgement() argument
4700 log_rtt_a_diff = rtt; in rack_make_timely_judgement()
4703 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4707 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4711 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4713 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4714 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4717 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4718 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4724 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4747 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4752 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4761 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4762 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4767 * |----------------| in rack_in_gp_window()
4768 * |-----| <or> in rack_in_gp_window()
4769 * |----| in rack_in_gp_window()
4770 * <or> |---| in rack_in_gp_window()
4773 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4774 SEQ_GT(rsm->r_end, tp->gput_seq)){ in rack_in_gp_window()
4777 * |--------------| in rack_in_gp_window()
4778 * |-------->| in rack_in_gp_window()
4781 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4782 SEQ_LT(rsm->r_start, tp->gput_ack) && in rack_in_gp_window()
4783 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4787 * |--------------| in rack_in_gp_window()
4788 * |-------->| in rack_in_gp_window()
4799 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_mark_in_gp_win()
4807 rsm->r_flags |= RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4809 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4818 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4820 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4823 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ in rack_clear_gp_marks()
4824 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_clear_gp_marks()
4825 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4835 if (tp->snd_una == tp->snd_max) { in rack_tend_gp_marks()
4839 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { in rack_tend_gp_marks()
4846 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4849 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) in rack_tend_gp_marks()
4851 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4859 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4867 * *before* we started our measurment. The rsm, if non-null in rack_tend_gp_marks()
4872 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4875 if (SEQ_GT(rsm->r_end, tp->gput_ack)) in rack_tend_gp_marks()
4877 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4884 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4896 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4899 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4900 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4901 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4903 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4917 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4918 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4919 if (TSTMP_GEQ(us_cts, tp->gput_ts)) in rack_do_goodput_measurement()
4920 tim = us_cts - tp->gput_ts; in rack_do_goodput_measurement()
4923 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4924 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4939 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4951 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4962 * rtt we have seen during the measurement and the in rack_do_goodput_measurement()
4971 * were a long way away.. example I am in Europe (100ms rtt) in rack_do_goodput_measurement()
4973 * bytes my time would be 1.2ms, and yet my rtt would say in rack_do_goodput_measurement()
4979 * the lowest RTT we have seen and the highest rwnd. in rack_do_goodput_measurement()
4985 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4986 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4987 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4988 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_do_goodput_measurement()
4996 bytes = (th_ack - tp->gput_seq); in rack_do_goodput_measurement()
5007 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5016 * IW - 2MSS. in rack_do_goodput_measurement()
5018 reqbytes -= (2 * segsiz); in rack_do_goodput_measurement()
5020 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5022 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
5024 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5032 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
5033 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5035 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
5037 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
5041 * expect to be reducing the RTT when we in rack_do_goodput_measurement()
5044 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
5045 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
5049 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5050 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
5051 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
5055 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
5059 * on our BDP (highest rwnd and lowest rtt in rack_do_goodput_measurement()
5066 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
5068 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
5071 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5074 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
5075 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
5076 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
5077 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5080 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5083 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
5084 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
5087 * where we transition from un-paced to paced. in rack_do_goodput_measurement()
5093 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
5094 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
5095 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
5098 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
5100 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
5101 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5102 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5103 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
5105 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5120 * 10ms rtt we only want to take a much smaller portion. in rack_do_goodput_measurement()
5124 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
5125 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5127 srtt = (uint64_t)tp->t_srtt; in rack_do_goodput_measurement()
5132 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
5133 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
5148 * and non-dynamic... but considering lots of folks in rack_do_goodput_measurement()
5153 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
5154 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5156 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5175 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5180 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5181 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5193 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5204 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5208 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5209 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5211 * For probe-rtt we only add it in in rack_do_goodput_measurement()
5217 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5218 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5225 * or first-slowstart that ensues. If we ever needed to watch in rack_do_goodput_measurement()
5229 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5230 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5234 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5240 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5241 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5243 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5248 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5249 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5256 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5257 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5258 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5265 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5266 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5270 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5276 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5278 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5280 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5285 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5286 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5287 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5289 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5293 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5294 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5296 rack->gp_ready = 1; in rack_do_goodput_measurement()
5297 if (rack->dgp_on || in rack_do_goodput_measurement()
5298 rack->rack_hibeta) in rack_do_goodput_measurement()
5300 if (rack->defer_options) in rack_do_goodput_measurement()
5305 /* We do not update any multipliers if we are in or have seen a probe-rtt */ in rack_do_goodput_measurement()
5307 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5308 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5309 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5311 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5312 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5321 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5325 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5327 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5333 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5334 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5335 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5336 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5337 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5345 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, in rack_do_goodput_measurement()
5352 if (tp->t_stats_gput_prev > 0) in rack_do_goodput_measurement()
5353 stats_voi_update_abs_s32(tp->t_stats, in rack_do_goodput_measurement()
5355 ((gput - tp->t_stats_gput_prev) * 100) / in rack_do_goodput_measurement()
5356 tp->t_stats_gput_prev); in rack_do_goodput_measurement()
5358 tp->t_stats_gput_prev = gput; in rack_do_goodput_measurement()
5360 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5365 * We don't do the other case i.e. non-applimited here since in rack_do_goodput_measurement()
5368 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5369 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_goodput_measurement()
5370 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5371 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5372 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5379 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5380 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5381 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5382 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5383 tp->gput_seq = th_ack; in rack_do_goodput_measurement()
5384 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5385 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5386 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5387 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5388 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5389 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5391 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5394 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5395 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_do_goodput_measurement()
5399 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5400 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5405 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_do_goodput_measurement()
5411 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { in rack_do_goodput_measurement()
5416 tp->t_flags |= TF_GPUTINPROG; in rack_do_goodput_measurement()
5418 * Now we need to find the timestamp of the send at tp->gput_seq in rack_do_goodput_measurement()
5421 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5422 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5424 /* Ok send-based limit is set */ in rack_do_goodput_measurement()
5425 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { in rack_do_goodput_measurement()
5432 tp->gput_seq = rsm->r_start; in rack_do_goodput_measurement()
5434 if (rsm->r_flags & RACK_ACKED) { in rack_do_goodput_measurement()
5437 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; in rack_do_goodput_measurement()
5438 tp->gput_seq = rsm->r_end; in rack_do_goodput_measurement()
5439 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5443 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5446 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5448 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5452 * send-limit set the current time, which in rack_do_goodput_measurement()
5453 * basically disables the send-limit. in rack_do_goodput_measurement()
5458 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5462 tp->gput_seq, in rack_do_goodput_measurement()
5463 tp->gput_ack, in rack_do_goodput_measurement()
5465 tp->gput_ts, in rack_do_goodput_measurement()
5466 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5469 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5491 tp->t_ccv.nsegs = nsegs; in rack_ack_received()
5492 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); in rack_ack_received()
5493 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5496 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5497 if (tp->t_ccv.bytes_this_ack > max) { in rack_ack_received()
5498 tp->t_ccv.bytes_this_ack = max; in rack_ack_received()
5502 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, in rack_ack_received()
5503 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5505 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5514 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5515 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5516 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5517 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5518 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5520 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5521 rack->lt_bw_up = 0; in rack_ack_received()
5524 if ((tp->t_flags & TF_GPUTINPROG) && in rack_ack_received()
5530 if (tp->snd_cwnd <= tp->snd_wnd) in rack_ack_received()
5531 tp->t_ccv.flags |= CCF_CWND_LIMITED; in rack_ack_received()
5533 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; in rack_ack_received()
5534 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_ack_received()
5535 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, in rack_ack_received()
5538 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5539 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5540 tp->t_ccv.flags |= CCF_ABC_SENTAWND; in rack_ack_received()
5543 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_ack_received()
5544 tp->t_bytes_acked = 0; in rack_ack_received()
5546 prior_cwnd = tp->snd_cwnd; in rack_ack_received()
5547 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5548 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5549 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5550 labc_to_use = rack->rc_labc; in rack_ack_received()
5553 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5560 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_ack_received()
5561 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_ack_received()
5562 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_ack_received()
5570 if (CC_ALGO(tp)->ack_received != NULL) { in rack_ack_received()
5572 tp->t_ccv.curack = th_ack; in rack_ack_received()
5573 tp->t_ccv.labc = labc_to_use; in rack_ack_received()
5574 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; in rack_ack_received()
5575 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); in rack_ack_received()
5578 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; in rack_ack_received()
5580 if (rack->r_must_retran) { in rack_ack_received()
5581 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5586 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5587 rack->r_must_retran = 0; in rack_ack_received()
5588 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { in rack_ack_received()
5595 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5596 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5598 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5603 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5605 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5606 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5608 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5609 (tp->snd_cwnd >= tp->snd_ssthresh)) { in rack_ack_received()
5614 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5623 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5632 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5633 rack->rack_no_prr) in tcp_rack_partialack()
5634 rack->r_wanted_output = 1; in tcp_rack_partialack()
5643 EXIT_RECOVERY(tp->t_flags); in rack_exit_recovery()
5652 orig_cwnd = tp->snd_cwnd; in rack_post_recovery()
5654 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5656 if (CC_ALGO(tp)->post_recovery != NULL) { in rack_post_recovery()
5657 tp->t_ccv.curack = th_ack; in rack_post_recovery()
5658 CC_ALGO(tp)->post_recovery(&tp->t_ccv); in rack_post_recovery()
5659 if (tp->snd_cwnd < tp->snd_ssthresh) { in rack_post_recovery()
5663 * snd_ssthresh per RFC-6582 (option 2). in rack_post_recovery()
5665 tp->snd_cwnd = tp->snd_ssthresh; in rack_post_recovery()
5668 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5675 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_post_recovery()
5676 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_post_recovery()
5677 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_post_recovery()
5681 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5686 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5687 (rack->no_prr_addback == 0) && in rack_post_recovery()
5688 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5693 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { in rack_post_recovery()
5703 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), in rack_post_recovery()
5704 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5706 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5710 tp->snd_recover = tp->snd_una; in rack_post_recovery()
5711 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5712 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5713 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5714 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5718 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5719 rack->rto_from_rec = 0; in rack_post_recovery()
5720 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5721 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5734 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); in rack_cong_signal()
5736 if (IN_RECOVERY(tp->t_flags) == 0) { in rack_cong_signal()
5738 ssthresh_enter = tp->snd_ssthresh; in rack_cong_signal()
5739 cwnd_enter = tp->snd_cwnd; in rack_cong_signal()
5742 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5745 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5746 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5747 if (!IN_FASTRECOVERY(tp->t_flags)) { in rack_cong_signal()
5748 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ in rack_cong_signal()
5749 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5751 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5753 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5754 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5755 rack->r_fast_output = 0; in rack_cong_signal()
5756 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5757 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5760 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5761 tp->snd_recover = tp->snd_max; in rack_cong_signal()
5762 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5763 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5767 if (!IN_CONGRECOVERY(tp->t_flags) || in rack_cong_signal()
5772 SEQ_GEQ(ack, tp->snd_recover)) { in rack_cong_signal()
5773 EXIT_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5775 rack->r_fast_output = 0; in rack_cong_signal()
5776 tp->snd_recover = tp->snd_max + 1; in rack_cong_signal()
5777 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5778 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5782 tp->t_dupacks = 0; in rack_cong_signal()
5783 tp->t_bytes_acked = 0; in rack_cong_signal()
5784 rack->r_fast_output = 0; in rack_cong_signal()
5785 if (IN_RECOVERY(tp->t_flags)) in rack_cong_signal()
5787 orig_cwnd = tp->snd_cwnd; in rack_cong_signal()
5789 if (CC_ALGO(tp)->cong_signal == NULL) { in rack_cong_signal()
5791 tp->snd_ssthresh = max(2, in rack_cong_signal()
5792 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5794 tp->snd_cwnd = ctf_fixed_maxseg(tp); in rack_cong_signal()
5796 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5797 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5802 tp->snd_cwnd = tp->snd_cwnd_prev; in rack_cong_signal()
5803 tp->snd_ssthresh = tp->snd_ssthresh_prev; in rack_cong_signal()
5804 tp->snd_recover = tp->snd_recover_prev; in rack_cong_signal()
5805 if (tp->t_flags & TF_WASFRECOVERY) { in rack_cong_signal()
5806 ENTER_FASTRECOVERY(tp->t_flags); in rack_cong_signal()
5807 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5809 if (tp->t_flags & TF_WASCRECOVERY) { in rack_cong_signal()
5810 ENTER_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5811 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5813 tp->snd_nxt = tp->snd_max; in rack_cong_signal()
5814 tp->t_badrxtwin = 0; in rack_cong_signal()
5817 if ((CC_ALGO(tp)->cong_signal != NULL) && in rack_cong_signal()
5819 tp->t_ccv.curack = ack; in rack_cong_signal()
5820 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); in rack_cong_signal()
5822 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { in rack_cong_signal()
5824 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5825 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5826 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5827 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5828 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5839 if (CC_ALGO(tp)->after_idle != NULL) in rack_cc_after_idle()
5840 CC_ALGO(tp)->after_idle(&tp->t_ccv); in rack_cc_after_idle()
5842 if (tp->snd_cwnd == 1) in rack_cc_after_idle()
5843 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ in rack_cc_after_idle()
5852 if (tp->snd_cwnd < i_cwnd) { in rack_cc_after_idle()
5853 tp->snd_cwnd = i_cwnd; in rack_cc_after_idle()
5860 * - There is no delayed ack timer in progress.
5861 * - Our last ack wasn't a 0-sized window. We never want to delay
5862 * the ack that opens up a 0-sized window.
5863 * - LRO wasn't used for this segment. We make sure by checking that the
5865 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5869 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5870 ((tp->t_flags & TF_DELACK) == 0) && \
5871 (tlen <= tp->t_maxseg) && \
5872 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5880 * Walk the time-order transmitted list looking for an rsm that is in rack_find_lowest_rsm()
5884 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5885 if (rsm->r_flags & RACK_ACKED) { in rack_find_lowest_rsm()
5906 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5907 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { in rack_find_high_nonack()
5926 * If reorder-fade is configured, then we track the last time we saw in rack_calc_thresh_rack()
5927 * re-ordering occur. If we reach the point where enough time as in rack_calc_thresh_rack()
5930 * Or if reorder-face is 0, then once we see reordering we consider in rack_calc_thresh_rack()
5934 * In the end if lro is non-zero we add the extra time for in rack_calc_thresh_rack()
5939 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5940 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5941 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5942 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5954 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5956 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5966 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5967 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5969 /* Standards based pkt-delay is 1/4 srtt */ in rack_calc_thresh_rack()
5972 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5973 /* It must be set, if not you get 1/4 rtt */ in rack_calc_thresh_rack()
5974 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5975 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5979 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5981 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5986 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
6013 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
6014 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
6019 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
6020 len = rsm->r_end - rsm->r_start; in rack_calc_thresh_tlp()
6021 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
6023 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
6026 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6032 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
6038 * possible inter-packet delay (if any). in rack_calc_thresh_tlp()
6043 idx = rsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6044 nidx = prsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6045 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { in rack_calc_thresh_tlp()
6047 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; in rack_calc_thresh_tlp()
6052 * Possibly compensate for delayed-ack. in rack_calc_thresh_tlp()
6060 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
6065 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6073 if (thresh > tp->t_rxtcur) { in rack_calc_thresh_tlp()
6074 thresh = tp->t_rxtcur; in rack_calc_thresh_tlp()
6092 * last rtt we measured. However if that in rack_grab_rtt()
6098 if (rack->rc_rack_rtt) in rack_grab_rtt()
6099 return (rack->rc_rack_rtt); in rack_grab_rtt()
6100 else if (tp->t_srtt == 0) in rack_grab_rtt()
6102 return (tp->t_srtt); in rack_grab_rtt()
6118 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
6119 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
6122 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
6127 if (rsm->r_flags & RACK_ACKED) { in rack_check_recovery_mode()
6132 idx = rsm->r_rtr_cnt - 1; in rack_check_recovery_mode()
6135 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { in rack_check_recovery_mode()
6138 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { in rack_check_recovery_mode()
6141 /* Ok if we reach here we are over-due and this guy can be sent */ in rack_check_recovery_mode()
6142 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_check_recovery_mode()
6153 t = (tp->t_srtt + (tp->t_rttvar << 2)); in rack_get_persists_timer_val()
6154 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], in rack_get_persists_timer_val()
6155 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
6156 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6165 * Start the FR timer, we do this based on getting the first one in in rack_timer_start()
6166 * the rc_tmap. Note that if its NULL we must stop the timer. in all in rack_timer_start()
6167 * events we need to stop the running timer (if its running) before in rack_timer_start()
6176 if (rack->t_timers_stopped) { in rack_timer_start()
6180 if (rack->rc_in_persist) { in rack_timer_start()
6181 /* We can't start any timer in persists */ in rack_timer_start()
6184 rack->rc_on_min_to = 0; in rack_timer_start()
6185 if ((tp->t_state < TCPS_ESTABLISHED) || in rack_timer_start()
6186 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_timer_start()
6189 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6194 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6197 * Should we discount the RTX timer any? in rack_timer_start()
6200 * If a timer (Rack/TLP or RXT) has gone off more in rack_timer_start()
6201 * recently thats the discount we want to use (now - timer time). in rack_timer_start()
6203 * we want to use that (now - oldest-packet-last_transmit_time). in rack_timer_start()
6206 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6207 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6208 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6210 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6212 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6214 if (SEQ_LT(tp->snd_una, tp->snd_max) || in rack_timer_start()
6215 sbavail(&tptosocket(tp)->so_snd)) { in rack_timer_start()
6216 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6217 to = tp->t_rxtcur; in rack_timer_start()
6219 to -= time_since_sent; in rack_timer_start()
6221 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6225 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timer_start()
6229 * We have to put a ceiling on the rxt timer in rack_timer_start()
6230 * of the keep-init timeout. in rack_timer_start()
6235 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { in rack_timer_start()
6236 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); in rack_timer_start()
6238 max_time -= red; in rack_timer_start()
6250 if (rsm->r_flags & RACK_ACKED) { in rack_timer_start()
6258 if ((rsm->r_flags & RACK_SACK_PASSED) || in rack_timer_start()
6259 (rsm->r_flags & RACK_RWND_COLLAPSED) || in rack_timer_start()
6260 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in rack_timer_start()
6261 if ((tp->t_flags & TF_SENTFIN) && in rack_timer_start()
6262 ((tp->snd_max - tp->snd_una) == 1) && in rack_timer_start()
6263 (rsm->r_flags & RACK_HAS_FIN)) { in rack_timer_start()
6265 * We don't start a rack timer if all we have is a in rack_timer_start()
6270 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6271 (IN_FASTRECOVERY(tp->t_flags)) && in rack_timer_start()
6272 (rack->rack_no_prr == 0) && in rack_timer_start()
6273 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6280 * get to use the rack-cheat. in rack_timer_start()
6286 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6287 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; in rack_timer_start()
6289 to = exp - cts; in rack_timer_start()
6290 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6291 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6292 if (rack->r_rr_config == 3) in rack_timer_start()
6293 rack->rc_on_min_to = 1; in rack_timer_start()
6296 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6297 if (rack->r_rr_config == 3) in rack_timer_start()
6298 rack->rc_on_min_to = 1; in rack_timer_start()
6303 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6304 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6311 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6316 if (rsm->r_flags & RACK_HAS_FIN) { in rack_timer_start()
6321 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6323 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6324 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6326 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6328 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6330 if (tp->t_srtt) { in rack_timer_start()
6331 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6332 (tp->t_srtt == 1)) { in rack_timer_start()
6339 srtt_cur = tp->t_srtt; in rack_timer_start()
6346 * rack RTT has spiked we want to use in rack_timer_start()
6347 * the last RTT not the smoothed one. in rack_timer_start()
6350 tp->t_srtt && in rack_timer_start()
6356 to = thresh - time_since_sent; in rack_timer_start()
6358 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6363 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6364 (uint32_t)rsm->r_tim_lastsent[idx], in rack_timer_start()
6380 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6382 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6392 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6393 if (tp->t_flags & TF_GPUTINPROG) { in rack_enter_persist()
6398 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6402 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6403 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6404 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6407 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6408 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6409 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6410 if (rack->lt_bw_up) { in rack_enter_persist()
6414 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6415 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6416 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6417 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6418 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6420 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6421 rack->lt_bw_up = 0; in rack_enter_persist()
6422 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6425 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6426 rack->probe_not_answered = 0; in rack_enter_persist()
6427 rack->forced_ack = 0; in rack_enter_persist()
6428 tp->t_rxtshift = 0; in rack_enter_persist()
6429 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_enter_persist()
6430 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6431 rack->rc_in_persist = 1; in rack_enter_persist()
6438 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6439 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6440 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6443 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6444 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6445 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6448 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6449 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6450 (rack->rc_always_pace)) { in rack_exit_persist()
6452 * Do we count this as if a probe-rtt just in rack_exit_persist()
6457 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6461 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6467 /* Yes, we count it as a probe-rtt. */ in rack_exit_persist()
6471 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6472 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6473 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6474 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6475 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6481 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6483 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6484 rack->lt_bw_up = 1; in rack_exit_persist()
6485 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6487 rack->rc_in_persist = 0; in rack_exit_persist()
6488 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6489 tp->t_rxtshift = 0; in rack_exit_persist()
6490 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_exit_persist()
6491 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6492 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6493 rack->r_early = 0; in rack_exit_persist()
6494 rack->r_late = 0; in rack_exit_persist()
6495 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6502 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6506 log.u_bbr.flex1 = diag->p_nxt_slot; in rack_log_hpts_diag()
6507 log.u_bbr.flex2 = diag->p_cur_slot; in rack_log_hpts_diag()
6508 log.u_bbr.flex3 = diag->slot_req; in rack_log_hpts_diag()
6509 log.u_bbr.flex4 = diag->inp_hptsslot; in rack_log_hpts_diag()
6510 log.u_bbr.flex5 = diag->slot_remaining; in rack_log_hpts_diag()
6511 log.u_bbr.flex6 = diag->need_new_to; in rack_log_hpts_diag()
6512 log.u_bbr.flex7 = diag->p_hpts_active; in rack_log_hpts_diag()
6513 log.u_bbr.flex8 = diag->p_on_min_sleep; in rack_log_hpts_diag()
6515 log.u_bbr.epoch = diag->have_slept; in rack_log_hpts_diag()
6516 log.u_bbr.lt_epoch = diag->yet_to_sleep; in rack_log_hpts_diag()
6517 log.u_bbr.pkts_out = diag->co_ret; in rack_log_hpts_diag()
6518 log.u_bbr.applimited = diag->hpts_sleep_time; in rack_log_hpts_diag()
6519 log.u_bbr.delivered = diag->p_prev_slot; in rack_log_hpts_diag()
6520 log.u_bbr.inflight = diag->p_runningslot; in rack_log_hpts_diag()
6521 log.u_bbr.bw_inuse = diag->wheel_slot; in rack_log_hpts_diag()
6522 log.u_bbr.rttProp = diag->wheel_cts; in rack_log_hpts_diag()
6524 log.u_bbr.delRate = diag->maxslots; in rack_log_hpts_diag()
6525 log.u_bbr.cur_del_rate = diag->p_curtick; in rack_log_hpts_diag()
6527 log.u_bbr.cur_del_rate |= diag->p_lasttick; in rack_log_hpts_diag()
6528 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6529 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6530 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6540 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6545 log.u_bbr.flex1 = sb->sb_flags; in rack_log_wakeup()
6547 log.u_bbr.flex3 = sb->sb_state; in rack_log_wakeup()
6550 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6551 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6552 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6572 if ((tp->t_state == TCPS_CLOSED) || in rack_start_hpts_timer()
6573 (tp->t_state == TCPS_LISTEN)) { in rack_start_hpts_timer()
6580 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6581 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6582 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6584 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6585 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6589 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6596 * penalize the next timer for being awoke in rack_start_hpts_timer()
6597 * by an ack aka the rc_agg_early (non-paced mode). in rack_start_hpts_timer()
6599 slot += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6600 rack->r_early = 0; in rack_start_hpts_timer()
6601 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6603 if ((rack->r_late) && in rack_start_hpts_timer()
6604 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6611 if (rack->r_ctl.rc_agg_delayed >= slot) { in rack_start_hpts_timer()
6620 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); in rack_start_hpts_timer()
6624 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); in rack_start_hpts_timer()
6628 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6629 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6632 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; in rack_start_hpts_timer()
6635 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6636 rack->r_late = 0; in rack_start_hpts_timer()
6638 } else if (rack->r_late) { in rack_start_hpts_timer()
6642 max_red = (slot * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6643 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6644 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6645 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6647 slot -= max_red; in rack_start_hpts_timer()
6648 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6651 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6653 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6655 * We are enforcing a min pacing timer in rack_start_hpts_timer()
6666 if (tp->t_flags & TF_DELACK) { in rack_start_hpts_timer()
6668 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6674 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6677 * wheel, we resort to a keep-alive timer if its configured. in rack_start_hpts_timer()
6681 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_start_hpts_timer()
6682 (tp->t_state <= TCPS_CLOSING)) { in rack_start_hpts_timer()
6684 * Ok we have no timer (persists, rack, tlp, rxt or in rack_start_hpts_timer()
6685 * del-ack), we don't have segments being paced. So in rack_start_hpts_timer()
6686 * all that is left is the keepalive timer. in rack_start_hpts_timer()
6688 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_start_hpts_timer()
6689 /* Get the established keep-alive time */ in rack_start_hpts_timer()
6693 * Get the initial setup keep-alive time, in rack_start_hpts_timer()
6695 * happen, since rack will be running a rxt timer in rack_start_hpts_timer()
6701 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6702 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6706 * exit probe-rtt and initiate a keep-alive ack. in rack_start_hpts_timer()
6707 * This will get us out of probe-rtt and update in rack_start_hpts_timer()
6708 * our min-rtt. in rack_start_hpts_timer()
6715 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6721 * keep-alive, delayed_ack we keep track of what was left in rack_start_hpts_timer()
6722 * and restart the timer with a smaller value. in rack_start_hpts_timer()
6729 * Hack alert for now we can't time-out over 2,147,483 in rack_start_hpts_timer()
6735 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6738 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6739 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6741 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6761 * TF2_MBUF_QUEUE_READY - This flags says that I am busy in rack_start_hpts_timer()
6766 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction in rack_start_hpts_timer()
6774 * our pacing timer expires. If, however, we have a rack in rack_start_hpts_timer()
6775 * timer running, then we don't even want a sack to wake in rack_start_hpts_timer()
6776 * us since the rack timer has to expire before we can send. in rack_start_hpts_timer()
6781 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); in rack_start_hpts_timer()
6783 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6784 rack->r_ctl.rc_last_output_to = us_cts + slot; in rack_start_hpts_timer()
6786 * A pacing timer (slot) is being set, in in rack_start_hpts_timer()
6788 * the timer). So lets tell LRO that it should not in rack_start_hpts_timer()
6793 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; in rack_start_hpts_timer()
6795 * But wait if we have a Rack timer running in rack_start_hpts_timer()
6799 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6800 (IN_RECOVERY(tp->t_flags))) { in rack_start_hpts_timer()
6801 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6802 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6803 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6812 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6815 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6818 * where the pacing timer can be disturbed in rack_start_hpts_timer()
6819 * backout the changes (used for non-paced in rack_start_hpts_timer()
6822 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | in rack_start_hpts_timer()
6825 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6826 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6830 * t-o if the t-o does not cause a send. in rack_start_hpts_timer()
6845 * us up here. Since we are not pacing (no pacing timer), output in rack_start_hpts_timer()
6846 * can happen so we should let it. If its a Rack timer, then any inbound in rack_start_hpts_timer()
6851 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6857 /* No timer starting */ in rack_start_hpts_timer()
6859 if (SEQ_GT(tp->snd_max, tp->snd_una)) { in rack_start_hpts_timer()
6860 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", in rack_start_hpts_timer()
6865 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6879 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6880 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { in rack_mark_lost()
6881 /* Got up to all that were marked sack-passed */ in rack_mark_lost()
6884 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_mark_lost()
6885 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_mark_lost()
6888 nrsm->r_flags |= RACK_WAS_LOST; in rack_mark_lost()
6889 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6899 * RACK Timer, here we simply do logging and house keeping.
6909 * This timer simply provides an internal trigger to send out data. in rack_timeout_rack()
6911 * retransmissions, if so we will enter fast-recovery. The output in rack_timeout_rack()
6918 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6920 rack->rc_on_min_to = 0; in rack_timeout_rack()
6926 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6927 rack->r_timer_override = 1; in rack_timeout_rack()
6928 if (rack->use_rack_rr) { in rack_timeout_rack()
6931 * we are allowing the rack timer to in rack_timeout_rack()
6932 * over-ride pacing i.e. rrr takes precedence in rack_timeout_rack()
6937 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6940 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6942 /* restart a timer and return 1 */ in rack_timeout_rack()
6956 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { in rack_adjust_orig_mlen()
6963 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), in rack_adjust_orig_mlen()
6965 rsm->m, in rack_adjust_orig_mlen()
6967 (intmax_t)M_TRAILINGROOM(rsm->m), in rack_adjust_orig_mlen()
6968 rsm->orig_t_space, in rack_adjust_orig_mlen()
6969 rsm->orig_m_len, in rack_adjust_orig_mlen()
6970 rsm->m->m_len)); in rack_adjust_orig_mlen()
6971 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); in rack_adjust_orig_mlen()
6972 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_orig_mlen()
6974 if (rsm->m->m_len < rsm->orig_m_len) { in rack_adjust_orig_mlen()
6979 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), in rack_adjust_orig_mlen()
6981 rsm->m, rsm->m->m_len, in rack_adjust_orig_mlen()
6982 rsm, rsm->orig_m_len, in rack_adjust_orig_mlen()
6983 rsm->soff)); in rack_adjust_orig_mlen()
6984 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) in rack_adjust_orig_mlen()
6985 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); in rack_adjust_orig_mlen()
6987 rsm->soff = 0; in rack_adjust_orig_mlen()
6988 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_orig_mlen()
6990 } else if (rsm->m->m_len > rsm->orig_m_len) { in rack_adjust_orig_mlen()
6992 rsm, rsm->m); in rack_adjust_orig_mlen()
7003 if (src_rsm->m && in rack_setup_offset_for_rsm()
7004 ((src_rsm->orig_m_len != src_rsm->m->m_len) || in rack_setup_offset_for_rsm()
7005 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { in rack_setup_offset_for_rsm()
7009 m = src_rsm->m; in rack_setup_offset_for_rsm()
7010 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); in rack_setup_offset_for_rsm()
7011 while (soff >= m->m_len) { in rack_setup_offset_for_rsm()
7013 soff -= m->m_len; in rack_setup_offset_for_rsm()
7014 m = m->m_next; in rack_setup_offset_for_rsm()
7020 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7021 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7022 &src_rsm->soff); in rack_setup_offset_for_rsm()
7023 src_rsm->orig_m_len = src_rsm->m->m_len; in rack_setup_offset_for_rsm()
7024 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); in rack_setup_offset_for_rsm()
7025 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7026 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7027 &rsm->soff); in rack_setup_offset_for_rsm()
7028 rsm->orig_m_len = rsm->m->m_len; in rack_setup_offset_for_rsm()
7029 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7033 rsm->m = m; in rack_setup_offset_for_rsm()
7034 rsm->soff = soff; in rack_setup_offset_for_rsm()
7035 rsm->orig_m_len = m->m_len; in rack_setup_offset_for_rsm()
7036 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7045 nrsm->r_start = start; in rack_clone_rsm()
7046 nrsm->r_end = rsm->r_end; in rack_clone_rsm()
7047 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; in rack_clone_rsm()
7048 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; in rack_clone_rsm()
7049 nrsm->r_flags = rsm->r_flags; in rack_clone_rsm()
7050 nrsm->r_dupack = rsm->r_dupack; in rack_clone_rsm()
7051 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; in rack_clone_rsm()
7052 nrsm->r_rtr_bytes = 0; in rack_clone_rsm()
7053 nrsm->r_fas = rsm->r_fas; in rack_clone_rsm()
7054 nrsm->r_bas = rsm->r_bas; in rack_clone_rsm()
7055 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
7056 nrsm->r_just_ret = rsm->r_just_ret; in rack_clone_rsm()
7057 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { in rack_clone_rsm()
7058 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; in rack_clone_rsm()
7061 if (nrsm->r_flags & RACK_HAS_SYN) in rack_clone_rsm()
7062 nrsm->r_flags &= ~RACK_HAS_SYN; in rack_clone_rsm()
7064 if (rsm->r_flags & RACK_HAS_FIN) in rack_clone_rsm()
7065 rsm->r_flags &= ~RACK_HAS_FIN; in rack_clone_rsm()
7067 if (rsm->r_flags & RACK_HAD_PUSH) in rack_clone_rsm()
7068 rsm->r_flags &= ~RACK_HAD_PUSH; in rack_clone_rsm()
7070 nrsm->r_hw_tls = rsm->r_hw_tls; in rack_clone_rsm()
7078 KASSERT(((rsm->m != NULL) || in rack_clone_rsm()
7079 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), in rack_clone_rsm()
7080 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
7081 if (rsm->m) in rack_clone_rsm()
7100 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
7101 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); in rack_merge_rsm()
7102 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
7103 if (l_rsm->r_dupack < r_rsm->r_dupack) in rack_merge_rsm()
7104 l_rsm->r_dupack = r_rsm->r_dupack; in rack_merge_rsm()
7105 if (r_rsm->r_rtr_bytes) in rack_merge_rsm()
7106 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; in rack_merge_rsm()
7107 if (r_rsm->r_in_tmap) { in rack_merge_rsm()
7109 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
7110 r_rsm->r_in_tmap = 0; in rack_merge_rsm()
7114 if (r_rsm->r_flags & RACK_HAS_FIN) in rack_merge_rsm()
7115 l_rsm->r_flags |= RACK_HAS_FIN; in rack_merge_rsm()
7116 if (r_rsm->r_flags & RACK_TLP) in rack_merge_rsm()
7117 l_rsm->r_flags |= RACK_TLP; in rack_merge_rsm()
7118 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) in rack_merge_rsm()
7119 l_rsm->r_flags |= RACK_RWND_COLLAPSED; in rack_merge_rsm()
7120 if ((r_rsm->r_flags & RACK_APP_LIMITED) && in rack_merge_rsm()
7121 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_merge_rsm()
7123 * If both are app-limited then let the in rack_merge_rsm()
7127 l_rsm->r_flags |= RACK_APP_LIMITED; in rack_merge_rsm()
7128 r_rsm->r_flags &= ~RACK_APP_LIMITED; in rack_merge_rsm()
7129 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
7130 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
7132 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
7147 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < in rack_merge_rsm()
7148 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { in rack_merge_rsm()
7149 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; in rack_merge_rsm()
7156 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) in rack_merge_rsm()
7157 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; in rack_merge_rsm()
7159 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { in rack_merge_rsm()
7161 r_rsm->r_limit_type = l_rsm->r_limit_type; in rack_merge_rsm()
7162 l_rsm->r_limit_type = 0; in rack_merge_rsm()
7165 l_rsm->r_flags |= RACK_MERGED; in rack_merge_rsm()
7170 * TLP Timer, here we simply setup what segment we want to
7190 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7196 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_tlp()
7199 * A TLP timer has expired. We have been idle for 2 rtts. So we now in rack_timeout_tlp()
7203 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7204 rack->r_might_revert = 0; in rack_timeout_tlp()
7205 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7207 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7209 avail = sbavail(&so->so_snd); in rack_timeout_tlp()
7210 out = tp->snd_max - tp->snd_una; in rack_timeout_tlp()
7211 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7216 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7217 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7218 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7219 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7223 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_tlp()
7224 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7233 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_tlp()
7234 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7235 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7236 tp->gput_seq, in rack_timeout_tlp()
7243 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7248 amm = avail - out; in rack_timeout_tlp()
7251 if ((amm + out) > tp->snd_wnd) { in rack_timeout_tlp()
7259 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_timeout_tlp()
7261 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7262 if (out + amm <= tp->snd_wnd) { in rack_timeout_tlp()
7263 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7264 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7270 /* Set the send-new override */ in rack_timeout_tlp()
7271 if (out + amm <= tp->snd_wnd) in rack_timeout_tlp()
7272 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7276 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7282 * Ok we need to arrange the last un-acked segment to be re-sent, or in rack_timeout_tlp()
7283 * optionally the first un-acked segment. in rack_timeout_tlp()
7287 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7289 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7290 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { in rack_timeout_tlp()
7305 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7306 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7308 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7315 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { in rack_timeout_tlp()
7325 * off to the RXT timer. in rack_timeout_tlp()
7330 (rsm->r_end - ctf_fixed_maxseg(tp))); in rack_timeout_tlp()
7333 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7335 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7340 if (rsm->r_in_tmap) { in rack_timeout_tlp()
7341 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7342 nrsm->r_in_tmap = 1; in rack_timeout_tlp()
7346 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7350 rack->r_timer_override = 1; in rack_timeout_tlp()
7351 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7354 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7359 * Delayed ack Timer, here we simply need to setup the
7371 tp->t_flags &= ~TF_DELACK; in rack_timeout_delack()
7372 tp->t_flags |= TF_ACKNOW; in rack_timeout_delack()
7374 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7383 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7385 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7386 rack->forced_ack = 1; in rack_send_ack_challange()
7387 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7389 rack->probe_not_answered = 1; in rack_send_ack_challange()
7391 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7392 &t_template->tt_t, (struct mbuf *)NULL, in rack_send_ack_challange()
7393 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7395 /* This does send an ack so kill any D-ack timer */ in rack_send_ack_challange()
7396 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7397 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7405 * Persists timer, here we simply send the
7417 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7422 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7423 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_persist()
7426 * Persistence timer into zero window. Force a byte to be output, if in rack_timeout_persist()
7436 if (tp->t_rxtshift >= V_tcp_retries && in rack_timeout_persist()
7437 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || in rack_timeout_persist()
7438 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { in rack_timeout_persist()
7441 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7442 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7445 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7446 tp->snd_una == tp->snd_max) in rack_timeout_persist()
7448 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7453 if (tp->t_state > TCPS_CLOSE_WAIT && in rack_timeout_persist()
7454 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { in rack_timeout_persist()
7457 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7458 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7463 if (rack->probe_not_answered) { in rack_timeout_persist()
7465 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7470 if (tp->t_rxtshift < V_tcp_retries) in rack_timeout_persist()
7471 tp->t_rxtshift++; in rack_timeout_persist()
7490 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7493 * Keep-alive timer went off; send something or drop connection if in rack_timeout_keepalive()
7497 if (tp->t_state < TCPS_ESTABLISHED) in rack_timeout_keepalive()
7499 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timeout_keepalive()
7500 tp->t_state <= TCPS_CLOSING) { in rack_timeout_keepalive()
7501 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) in rack_timeout_keepalive()
7508 * number tp->snd_una-1 causes the transmitted zero-length in rack_timeout_keepalive()
7521 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_keepalive()
7532 * The retransmit timer went off, all sack'd blocks must be in rack_remxt_tmr()
7533 * un-acked. in rack_remxt_tmr()
7538 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7541 rack->r_timer_override = 1; in rack_remxt_tmr()
7542 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7543 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7544 rack->r_late = 0; in rack_remxt_tmr()
7545 rack->r_early = 0; in rack_remxt_tmr()
7546 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7547 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7548 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7550 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { in rack_remxt_tmr()
7553 * more than rack_rxt_scoreboard_clear_thresh time-outs. in rack_remxt_tmr()
7555 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7556 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7557 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7563 * mark SACK-PASS on anything not acked here. in rack_remxt_tmr()
7567 * so for now we will just let the normal rxt timer in rack_remxt_tmr()
7568 * and tlp timer take care of it. in rack_remxt_tmr()
7572 * sacks that come floating in will "re-ack" the data. in rack_remxt_tmr()
7577 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7579 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7580 rsm->r_dupack = 0; in rack_remxt_tmr()
7583 /* We must re-add it back to the tlist */ in rack_remxt_tmr()
7585 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7587 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7589 rsm->r_in_tmap = 1; in rack_remxt_tmr()
7591 if (rsm->r_flags & RACK_ACKED) in rack_remxt_tmr()
7592 rsm->r_flags |= RACK_WAS_ACKED; in rack_remxt_tmr()
7593 …rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_W… in rack_remxt_tmr()
7594 rsm->r_flags |= RACK_MUST_RXT; in rack_remxt_tmr()
7597 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7598 /* Clear the count (we just un-acked them) */ in rack_remxt_tmr()
7599 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7600 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7602 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7603 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7604 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7605 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7607 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7608 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7609 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7610 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && in rack_remxt_tmr()
7611 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_remxt_tmr()
7613 * For non-sack customers new data in rack_remxt_tmr()
7617 rack->r_must_retran = 1; in rack_remxt_tmr()
7618 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7619 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7627 tp->t_rxtcur = RACK_REXMTVAL(tp); in rack_convert_rtts()
7628 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_convert_rtts()
7629 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); in rack_convert_rtts()
7631 if (tp->t_rxtcur > rack_rto_max) { in rack_convert_rtts()
7632 tp->t_rxtcur = rack_rto_max; in rack_convert_rtts()
7642 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7643 srtt = tp->t_srtt; in rack_cc_conn_init()
7649 if ((srtt == 0) && (tp->t_srtt != 0)) in rack_cc_conn_init()
7657 if (tp->snd_ssthresh < tp->snd_wnd) { in rack_cc_conn_init()
7658 tp->snd_ssthresh = tp->snd_wnd; in rack_cc_conn_init()
7664 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7665 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7669 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7680 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_rxt()
7681 (tp->t_rxtshift)) { in rack_timeout_rxt()
7688 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_rxt()
7689 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7690 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7691 tp->gput_seq, in rack_timeout_rxt()
7697 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_rxt()
7699 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7700 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7701 rack->rc_ack_required = 1; in rack_timeout_rxt()
7702 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7703 if (IN_RECOVERY(tp->t_flags) && in rack_timeout_rxt()
7704 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7711 rack->rto_from_rec = 1; in rack_timeout_rxt()
7712 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7714 if (IN_FASTRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7715 tp->t_flags |= TF_WASFRECOVERY; in rack_timeout_rxt()
7717 tp->t_flags &= ~TF_WASFRECOVERY; in rack_timeout_rxt()
7718 if (IN_CONGRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7719 tp->t_flags |= TF_WASCRECOVERY; in rack_timeout_rxt()
7721 tp->t_flags &= ~TF_WASCRECOVERY; in rack_timeout_rxt()
7722 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_timeout_rxt()
7723 (tp->snd_una == tp->snd_max)) { in rack_timeout_rxt()
7727 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7728 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7729 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7730 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7735 * Rack can only run one timer at a time, so we cannot in rack_timeout_rxt()
7737 * timer for the SYN. So if we are in a front state and in rack_timeout_rxt()
7738 * have a KEEPINIT timer we need to check the first transmit in rack_timeout_rxt()
7742 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timeout_rxt()
7746 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7749 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && in rack_timeout_rxt()
7750 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { in rack_timeout_rxt()
7758 * Retransmission timer went off. Message has not been acked within in rack_timeout_rxt()
7762 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7763 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7770 tp->t_rxtshift++; in rack_timeout_rxt()
7773 if (tp->t_rxtshift > V_tcp_retries) { in rack_timeout_rxt()
7776 tp->t_rxtshift = V_tcp_retries; in rack_timeout_rxt()
7779 MPASS(tp->t_softerror >= 0); in rack_timeout_rxt()
7780 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; in rack_timeout_rxt()
7783 if (tp->t_state == TCPS_SYN_SENT) { in rack_timeout_rxt()
7788 tp->snd_cwnd = 1; in rack_timeout_rxt()
7789 } else if (tp->t_rxtshift == 1) { in rack_timeout_rxt()
7794 * is received within RTT/2 interval; the assumption here is in rack_timeout_rxt()
7796 * End-to-End Network Path Properties" by Allman and Paxson in rack_timeout_rxt()
7799 tp->snd_cwnd_prev = tp->snd_cwnd; in rack_timeout_rxt()
7800 tp->snd_ssthresh_prev = tp->snd_ssthresh; in rack_timeout_rxt()
7801 tp->snd_recover_prev = tp->snd_recover; in rack_timeout_rxt()
7802 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); in rack_timeout_rxt()
7803 tp->t_flags |= TF_PREVVALID; in rack_timeout_rxt()
7804 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) in rack_timeout_rxt()
7805 tp->t_flags &= ~TF_PREVVALID; in rack_timeout_rxt()
7807 if ((tp->t_state == TCPS_SYN_SENT) || in rack_timeout_rxt()
7808 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_timeout_rxt()
7809 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7811 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7813 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, in rack_timeout_rxt()
7814 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7823 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; in rack_timeout_rxt()
7830 ((tp->t_state == TCPS_ESTABLISHED) || in rack_timeout_rxt()
7831 (tp->t_state == TCPS_FIN_WAIT_1))) { in rack_timeout_rxt()
7834 * 1448 -> 1188 -> 524) should be given 2 chances to recover in rack_timeout_rxt()
7835 * before further clamping down. 'tp->t_rxtshift % 2 == 0' in rack_timeout_rxt()
7838 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == in rack_timeout_rxt()
7840 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && in rack_timeout_rxt()
7841 tp->t_rxtshift % 2 == 0)) { in rack_timeout_rxt()
7843 * Enter Path MTU Black-hole Detection mechanism: - in rack_timeout_rxt()
7844 * Disable Path MTU Discovery (IP "DF" bit). - in rack_timeout_rxt()
7848 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { in rack_timeout_rxt()
7850 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7852 tp->t_pmtud_saved_maxseg = tp->t_maxseg; in rack_timeout_rxt()
7861 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { in rack_timeout_rxt()
7863 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; in rack_timeout_rxt()
7867 tp->t_maxseg = V_tcp_v6mssdflt; in rack_timeout_rxt()
7872 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7880 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { in rack_timeout_rxt()
7882 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; in rack_timeout_rxt()
7886 tp->t_maxseg = V_tcp_mssdflt; in rack_timeout_rxt()
7891 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7904 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && in rack_timeout_rxt()
7905 (tp->t_rxtshift >= 6)) { in rack_timeout_rxt()
7906 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7907 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7908 tp->t_maxseg = tp->t_pmtud_saved_maxseg; in rack_timeout_rxt()
7909 if (tp->t_maxseg < V_tcp_mssdflt) { in rack_timeout_rxt()
7915 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7917 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7925 * our third SYN to work-around some broken terminal servers in rack_timeout_rxt()
7928 * unknown-to-them TCP options. in rack_timeout_rxt()
7930 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && in rack_timeout_rxt()
7931 (tp->t_rxtshift == 3)) in rack_timeout_rxt()
7932 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); in rack_timeout_rxt()
7935 * Clobber it so we'll take the next rtt measurement as our srtt; in rack_timeout_rxt()
7939 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in rack_timeout_rxt()
7941 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_timeout_rxt()
7946 tp->t_rttvar += tp->t_srtt; in rack_timeout_rxt()
7947 tp->t_srtt = 0; in rack_timeout_rxt()
7949 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7950 tp->snd_recover = tp->snd_max; in rack_timeout_rxt()
7951 tp->t_flags |= TF_ACKNOW; in rack_timeout_rxt()
7952 tp->t_rtttime = 0; in rack_timeout_rxt()
7953 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); in rack_timeout_rxt()
7962 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7964 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_timers()
7965 (tp->t_flags & TF_GPUTINPROG)) { in rack_process_timers()
7974 bytes = tp->gput_ack - tp->gput_seq; in rack_process_timers()
7975 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_process_timers()
7976 bytes += tp->gput_seq - tp->snd_una; in rack_process_timers()
7977 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_process_timers()
7983 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7984 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7985 tp->gput_seq, in rack_process_timers()
7987 tp->t_flags &= ~TF_GPUTINPROG; in rack_process_timers()
7993 if (tp->t_state == TCPS_LISTEN) { in rack_process_timers()
7995 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
8000 rack->rc_on_min_to) { in rack_process_timers()
8002 * For the rack timer when we in rack_process_timers()
8003 * are on a min-timeout (which means rrr_conf = 3) in rack_process_timers()
8004 * we don't want to check the timer. It may in rack_process_timers()
8008 * If its on a normal rack timer (non-min) then in rack_process_timers()
8013 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
8016 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
8017 ret = -1; in rack_process_timers()
8028 ret = -2; in rack_process_timers()
8033 * Ok our timer went off early and we are not paced false in rack_process_timers()
8035 * no-sack wakeup on since we no longer have a PKT_OUTPUT in rack_process_timers()
8038 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
8039 ret = -3; in rack_process_timers()
8040 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
8046 rack->rc_tmr_stopped = 0; in rack_process_timers()
8047 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
8051 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8052 rack->r_fast_output = 0; in rack_process_timers()
8055 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8058 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8059 rack->r_fast_output = 0; in rack_process_timers()
8077 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
8079 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
8080 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
8081 ((tp->snd_max - tp->snd_una) == 0))) { in rack_timer_cancel()
8082 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8085 if ((tp->snd_max - tp->snd_una) == 0) in rack_timer_cancel()
8086 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
8089 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
8090 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
8091 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
8092 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
8094 * Canceling timer's when we have no output being in rack_timer_cancel()
8098 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8101 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
8112 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
8113 rack->t_timers_stopped = 1; in rack_stopall()
8128 rack->rc_in_persist = 1; in rack_stop_all_timers()
8130 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
8131 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
8141 rsm->r_rtr_cnt++; in rack_update_rsm()
8142 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { in rack_update_rsm()
8143 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; in rack_update_rsm()
8144 rsm->r_flags |= RACK_OVERMAX; in rack_update_rsm()
8146 rsm->r_act_rxt_cnt++; in rack_update_rsm()
8149 rsm->r_dupack = 0; in rack_update_rsm()
8150 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { in rack_update_rsm()
8151 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8152 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8154 if (rsm->r_flags & RACK_WAS_LOST) { in rack_update_rsm()
8160 rsm->r_flags &= ~RACK_WAS_LOST; in rack_update_rsm()
8161 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_update_rsm()
8163 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_update_rsm()
8164 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_update_rsm()
8166 rack->r_ctl.rc_considered_lost = 0; in rack_update_rsm()
8168 idx = rsm->r_rtr_cnt - 1; in rack_update_rsm()
8169 rsm->r_tim_lastsent[idx] = ts; in rack_update_rsm()
8172 * in snduna <->snd_max. in rack_update_rsm()
8174 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8175 rack->r_ctl.rc_sacked); in rack_update_rsm()
8176 if (rsm->r_flags & RACK_ACKED) { in rack_update_rsm()
8178 rsm->r_flags &= ~RACK_ACKED; in rack_update_rsm()
8179 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8181 if (rsm->r_in_tmap) { in rack_update_rsm()
8182 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8183 rsm->r_in_tmap = 0; in rack_update_rsm()
8187 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8188 rsm->r_in_tmap = 1; in rack_update_rsm()
8189 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); in rack_update_rsm()
8191 if (rsm->r_flags & RACK_MUST_RXT) { in rack_update_rsm()
8192 if (rack->r_must_retran) in rack_update_rsm()
8193 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8194 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8199 rack->r_must_retran = 0; in rack_update_rsm()
8200 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8202 rsm->r_flags &= ~RACK_MUST_RXT; in rack_update_rsm()
8205 rsm->r_flags &= ~RACK_RWND_COLLAPSED; in rack_update_rsm()
8206 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_update_rsm()
8208 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_update_rsm()
8209 rsm->r_flags |= RACK_WAS_SACKPASS; in rack_update_rsm()
8218 * We (re-)transmitted starting at rsm->r_start for some length in rack_update_entry()
8227 c_end = rsm->r_start + len; in rack_update_entry()
8228 if (SEQ_GEQ(c_end, rsm->r_end)) { in rack_update_entry()
8234 if (c_end == rsm->r_end) { in rack_update_entry()
8241 act_len = rsm->r_end - rsm->r_start; in rack_update_entry()
8242 *lenp = (len - act_len); in rack_update_entry()
8243 return (rsm->r_end); in rack_update_entry()
8267 nrsm->r_dupack = 0; in rack_update_entry()
8270 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8272 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8277 if (rsm->r_in_tmap) { in rack_update_entry()
8278 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8279 nrsm->r_in_tmap = 1; in rack_update_entry()
8281 rsm->r_flags &= (~RACK_HAS_FIN); in rack_update_entry()
8308 * won't be able to effectively use the ACK for an RTT on a retran. in rack_log_output()
8319 * -- i.e. return if err != 0 or should we pretend we sent it? -- in rack_log_output()
8325 * We don't log errors -- we could but snd_max does not in rack_log_output()
8337 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8338 snd_una = tp->snd_una; in rack_log_output()
8339 snd_max = tp->snd_max; in rack_log_output()
8347 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) in rack_log_output()
8353 /* Are sending an old segment to induce an ack (keep-alive)? */ in rack_log_output()
8363 len = end - seq_out; in rack_log_output()
8371 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_log_output()
8372 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8388 rsm->r_flags = RACK_HAS_FIN|add_flag; in rack_log_output()
8390 rsm->r_flags = add_flag; in rack_log_output()
8393 rsm->r_hw_tls = 1; in rack_log_output()
8394 rsm->r_tim_lastsent[0] = cts; in rack_log_output()
8395 rsm->r_rtr_cnt = 1; in rack_log_output()
8396 rsm->r_act_rxt_cnt = 0; in rack_log_output()
8397 rsm->r_rtr_bytes = 0; in rack_log_output()
8400 rsm->r_flags |= RACK_HAS_SYN; in rack_log_output()
8402 rsm->r_start = seq_out; in rack_log_output()
8403 rsm->r_end = rsm->r_start + len; in rack_log_output()
8405 rsm->r_dupack = 0; in rack_log_output()
8411 rsm->m = s_mb; in rack_log_output()
8412 rsm->soff = s_moff; in rack_log_output()
8415 * reflected in in snduna <->snd_max in rack_log_output()
8417 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8418 rack->r_ctl.rc_sacked) + in rack_log_output()
8419 (rsm->r_end - rsm->r_start)); in rack_log_output()
8420 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8421 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8422 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8424 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ in rack_log_output()
8425 if (rsm->m) { in rack_log_output()
8426 if (rsm->m->m_len <= rsm->soff) { in rack_log_output()
8432 * within rsm->m. But if the sbsndptr was in rack_log_output()
8438 lm = rsm->m; in rack_log_output()
8439 while (lm->m_len <= rsm->soff) { in rack_log_output()
8440 rsm->soff -= lm->m_len; in rack_log_output()
8441 lm = lm->m_next; in rack_log_output()
8442 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", in rack_log_output()
8443 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8445 rsm->m = lm; in rack_log_output()
8447 rsm->orig_m_len = rsm->m->m_len; in rack_log_output()
8448 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_log_output()
8450 rsm->orig_m_len = 0; in rack_log_output()
8451 rsm->orig_t_space = 0; in rack_log_output()
8453 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); in rack_log_output()
8458 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8460 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8465 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8466 rsm->r_in_tmap = 1; in rack_log_output()
8467 if (rsm->r_flags & RACK_IS_PCM) { in rack_log_output()
8468 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8469 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8471 if (rack->pcm_in_progress == 0) in rack_log_output()
8472 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8480 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_log_output()
8481 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8484 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8486 prsm->r_one_out_nr = 1; in rack_log_output()
8494 if (hintrsm && (hintrsm->r_start == seq_out)) { in rack_log_output()
8501 if ((rsm) && (rsm->r_start == seq_out)) { in rack_log_output()
8511 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8513 if (rsm->r_start == seq_out) { in rack_log_output()
8521 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { in rack_log_output()
8539 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8541 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8546 if (rsm->r_in_tmap) { in rack_log_output()
8547 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8548 nrsm->r_in_tmap = 1; in rack_log_output()
8550 rsm->r_flags &= (~RACK_HAS_FIN); in rack_log_output()
8562 if (seq_out == tp->snd_max) { in rack_log_output()
8564 } else if (SEQ_LT(seq_out, tp->snd_max)) { in rack_log_output()
8566 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", in rack_log_output()
8567 seq_out, len, tp->snd_una, tp->snd_max); in rack_log_output()
8569 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8571 rsm, rsm->r_start, rsm->r_end); in rack_log_output()
8580 * Hmm beyond sndmax? (only if we are using the new rtt-pack in rack_log_output()
8584 seq_out, len, tp->snd_max, tp); in rack_log_output()
8590 * Record one of the RTT updates from an ack into
8595 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, in tcp_rack_xmit_timer() argument
8598 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8599 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8600 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8602 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8603 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8604 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8606 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8607 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8608 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8609 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8610 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8614 (rsm->r_just_ret) || in tcp_rack_xmit_timer()
8615 (rsm->r_one_out_nr && in tcp_rack_xmit_timer()
8616 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8620 * rtt measurement for buffer deterimination in tcp_rack_xmit_timer()
8623 * the r_one_out_nr. If it was a CUM-ACK and in tcp_rack_xmit_timer()
8630 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8631 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8632 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8637 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8638 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8639 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8648 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8649 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8650 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8653 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8654 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8655 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8656 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8660 * Collect new round-trip time estimate
8667 int32_t rtt; in tcp_rack_xmit_timer_commit() local
8669 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8672 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8673 /* We are to use the lowest RTT seen in a single ack */ in tcp_rack_xmit_timer_commit()
8674 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8675 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8676 /* We are to use the highest RTT seen in a single ack */ in tcp_rack_xmit_timer_commit()
8677 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8678 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8679 /* We are to use the average RTT seen in a single ack */ in tcp_rack_xmit_timer_commit()
8680 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8681 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8684 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8688 if (rtt == 0) in tcp_rack_xmit_timer_commit()
8689 rtt = 1; in tcp_rack_xmit_timer_commit()
8690 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8692 * With no RTT we have to accept in tcp_rack_xmit_timer_commit()
8695 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8696 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8697 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8699 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8700 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8702 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8707 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8708 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8710 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8716 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8717 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8718 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8719 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8721 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8725 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8726 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8731 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8732 /* The lowest rtt can be set if its was not retransmited */ in tcp_rack_xmit_timer_commit()
8733 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8734 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8735 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8736 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8739 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8740 if (tp->t_srtt != 0) { in tcp_rack_xmit_timer_commit()
8742 * We keep a simple srtt in microseconds, like our rtt in tcp_rack_xmit_timer_commit()
8749 delta = tp->t_srtt - rtt; in tcp_rack_xmit_timer_commit()
8751 tp->t_srtt -= (tp->t_srtt >> 3); in tcp_rack_xmit_timer_commit()
8752 /* Add in 1/8th of the new RTT just measured */ in tcp_rack_xmit_timer_commit()
8753 tp->t_srtt += (rtt >> 3); in tcp_rack_xmit_timer_commit()
8754 if (tp->t_srtt <= 0) in tcp_rack_xmit_timer_commit()
8755 tp->t_srtt = 1; in tcp_rack_xmit_timer_commit()
8758 delta = -delta; in tcp_rack_xmit_timer_commit()
8760 tp->t_rttvar -= (tp->t_rttvar >> 3); in tcp_rack_xmit_timer_commit()
8762 tp->t_rttvar += (delta >> 3); in tcp_rack_xmit_timer_commit()
8763 if (tp->t_rttvar <= 0) in tcp_rack_xmit_timer_commit()
8764 tp->t_rttvar = 1; in tcp_rack_xmit_timer_commit()
8767 * No rtt measurement yet - use the unsmoothed rtt. Set the in tcp_rack_xmit_timer_commit()
8768 * variance to half the rtt (so our first retransmit happens in tcp_rack_xmit_timer_commit()
8769 * at 3*rtt). in tcp_rack_xmit_timer_commit()
8771 tp->t_srtt = rtt; in tcp_rack_xmit_timer_commit()
8772 tp->t_rttvar = rtt >> 1; in tcp_rack_xmit_timer_commit()
8774 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8776 if (tp->t_rttupdated < UCHAR_MAX) in tcp_rack_xmit_timer_commit()
8777 tp->t_rttupdated++; in tcp_rack_xmit_timer_commit()
8780 /* Send in the microsecond rtt used for rxt timeout purposes */ in tcp_rack_xmit_timer_commit()
8781 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); in tcp_rack_xmit_timer_commit()
8783 /* Send in the millisecond rtt used for rxt timeout purposes */ in tcp_rack_xmit_timer_commit()
8787 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8788 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8790 /* Send in the millisecond rtt has close to the path RTT as we can get */ in tcp_rack_xmit_timer_commit()
8794 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8795 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8797 /* Send in the microsecond rtt has close to the path RTT as we can get */ in tcp_rack_xmit_timer_commit()
8798 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8800 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8802 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8804 * the retransmit should happen at rtt + 4 * rttvar. Because of the in tcp_rack_xmit_timer_commit()
8806 * tick of bias. When we compute the retransmit timer, we want 1/2 in tcp_rack_xmit_timer_commit()
8807 * tick of rounding and 1 extra tick because of +-1/2 tick in tcp_rack_xmit_timer_commit()
8808 * uncertainty in the firing of the timer. The bias will give us in tcp_rack_xmit_timer_commit()
8811 * feasible timer (which is 2 ticks). in tcp_rack_xmit_timer_commit()
8813 tp->t_rxtshift = 0; in tcp_rack_xmit_timer_commit()
8814 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in tcp_rack_xmit_timer_commit()
8815 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8816 rack_log_rtt_sample(rack, rtt); in tcp_rack_xmit_timer_commit()
8817 tp->t_softerror = 0; in tcp_rack_xmit_timer_commit()
8825 * Apply to filter the inbound us-rtt at us_cts. in rack_apply_updated_usrtt()
8829 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8830 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8833 /* We just hit a new lower rtt time */ in rack_apply_updated_usrtt()
8840 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { in rack_apply_updated_usrtt()
8842 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8843 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8844 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8846 * We are seeing a new lower rtt very close in rack_apply_updated_usrtt()
8847 * to the time that we would have entered probe-rtt. in rack_apply_updated_usrtt()
8849 * has entered probe-rtt. Lets go in now too. in rack_apply_updated_usrtt()
8855 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8856 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8857 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8861 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8874 if ((rsm->r_flags & RACK_ACKED) || in rack_update_rtt()
8875 (rsm->r_flags & RACK_WAS_ACKED)) in rack_update_rtt()
8878 if (rsm->r_no_rtt_allowed) { in rack_update_rtt()
8883 if (SEQ_GT(th_ack, rsm->r_end)) { in rack_update_rtt()
8884 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8887 len_acked = th_ack - rsm->r_start; in rack_update_rtt()
8891 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8894 if (rsm->r_rtr_cnt == 1) { in rack_update_rtt()
8896 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8899 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8900 tp->t_rttlow = t; in rack_update_rtt()
8901 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8902 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8903 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8904 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8905 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8908 …if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)… in rack_update_rtt()
8909 …us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr… in rack_update_rtt()
8911 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_update_rtt()
8914 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8915 /* Kick the RTT to the CC */ in rack_update_rtt()
8916 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8918 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8920 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8921 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8934 * When we are not app-limited then we see if in rack_update_rtt()
8942 * in the RTT. We probably need to examine this algorithm in rack_update_rtt()
8951 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_update_rtt()
8956 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8961 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8963 calc_conf, rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8965 if ((rsm->r_flags & RACK_TLP) && in rack_update_rtt()
8966 (!IN_FASTRECOVERY(tp->t_flags))) { in rack_update_rtt()
8968 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8972 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8973 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8974 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8976 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8977 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8978 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8979 rack->rc_rack_rtt = t; in rack_update_rtt()
8988 tp->t_rxtshift = 0; in rack_update_rtt()
8989 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_update_rtt()
8990 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8991 tp->t_softerror = 0; in rack_update_rtt()
8992 if (to && (to->to_flags & TOF_TS) && in rack_update_rtt()
8994 (to->to_tsecr) && in rack_update_rtt()
8995 ((rsm->r_flags & RACK_OVERMAX) == 0)) { in rack_update_rtt()
9000 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
9001 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { in rack_update_rtt()
9002 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9005 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
9007 * Kick the RTT to the CC, here in rack_update_rtt()
9013 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
9014 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9016 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9017 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
9019 if ((i + 1) < rsm->r_rtr_cnt) { in rack_update_rtt()
9031 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
9032 tp->t_rttlow = t; in rack_update_rtt()
9033 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9034 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9035 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9036 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9039 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9040 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9041 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
9043 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
9044 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9045 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9046 rack->rc_rack_rtt = t; in rack_update_rtt()
9048 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
9050 rsm->r_rtr_cnt); in rack_update_rtt()
9055 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
9056 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
9057 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
9065 * time-stamp since its not there or the time the peer last in rack_update_rtt()
9066 * received a segment that moved forward its cum-ack point. in rack_update_rtt()
9069 i = rsm->r_rtr_cnt - 1; in rack_update_rtt()
9070 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9073 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9076 * than the smallest rtt we have observed. We most in rack_update_rtt()
9078 * 6.2 Step 2 point 2 in the rack-draft so we in rack_update_rtt()
9084 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
9089 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
9090 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9091 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9092 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9093 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9096 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9097 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9098 (uint32_t)rsm->r_tim_lastsent[i]))) { in rack_update_rtt()
9100 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9101 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9102 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9103 rack->rc_rack_rtt = t; in rack_update_rtt()
9125 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
9131 if (nrsm->r_flags & RACK_ACKED) { in rack_log_sack_passed()
9139 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { in rack_log_sack_passed()
9147 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_log_sack_passed()
9150 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_log_sack_passed()
9153 nrsm->r_flags |= RACK_WAS_LOST; in rack_log_sack_passed()
9154 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
9157 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_log_sack_passed()
9165 nrsm->r_flags |= RACK_SACK_PASSED; in rack_log_sack_passed()
9166 nrsm->r_flags &= ~RACK_WAS_SACKPASS; in rack_log_sack_passed()
9180 if ((tp->t_flags & TF_GPUTINPROG) && in rack_need_set_test()
9181 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9191 if (rsm->r_rtr_cnt > 1) { in rack_need_set_test()
9204 seq = tp->gput_seq; in rack_need_set_test()
9205 ts = tp->gput_ts; in rack_need_set_test()
9206 rack->app_limited_needs_set = 0; in rack_need_set_test()
9207 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9210 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { in rack_need_set_test()
9218 tp->gput_seq = rsm->r_start; in rack_need_set_test()
9221 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9233 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9239 * way up to where this ack cum-ack moves in rack_need_set_test()
9242 if (SEQ_GT(th_ack, rsm->r_end)) in rack_need_set_test()
9243 tp->gput_seq = th_ack; in rack_need_set_test()
9245 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9247 if (SEQ_LT(tp->gput_seq, tp->snd_max)) in rack_need_set_test()
9248 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9262 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9264 /* If we hit here we have to have *not* sent tp->gput_seq */ in rack_need_set_test()
9265 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9267 rack->app_limited_needs_set = 1; in rack_need_set_test()
9269 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { in rack_need_set_test()
9271 * We moved beyond this guy's range, re-calculate in rack_need_set_test()
9274 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9275 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9277 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9284 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9285 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9286 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9287 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9288 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9289 seq, tp->gput_seq, in rack_need_set_test()
9290 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9291 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9293 if (rack->rc_gp_filled && in rack_need_set_test()
9294 ((tp->gput_ack - tp->gput_seq) < in rack_need_set_test()
9300 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { in rack_need_set_test()
9307 tp->t_flags &= ~TF_GPUTINPROG; in rack_need_set_test()
9308 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9310 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9311 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9317 tp->gput_ack = tp->gput_seq + ideal_amount; in rack_need_set_test()
9321 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9328 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9332 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9336 /* It has to be a sub-part of the original TLP recorded */ in is_rsm_inside_declared_tlp_block()
9352 start = sack->start; in rack_proc_sack_blk()
9353 end = sack->end; in rack_proc_sack_blk()
9358 (SEQ_LT(end, rsm->r_start)) || in rack_proc_sack_blk()
9359 (SEQ_GEQ(start, rsm->r_end)) || in rack_proc_sack_blk()
9360 (SEQ_LT(start, rsm->r_start))) { in rack_proc_sack_blk()
9366 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9373 if (rsm->r_start != start) { in rack_proc_sack_blk()
9374 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9379 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9380 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9385 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9393 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9397 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9398 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9399 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9400 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9402 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9403 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9404 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9405 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9408 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9409 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9410 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9411 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9412 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9419 * rsm |--------------| in rack_proc_sack_blk()
9420 * sackblk |-------> in rack_proc_sack_blk()
9422 * rsm |---| in rack_proc_sack_blk()
9424 * nrsm |----------| in rack_proc_sack_blk()
9436 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9438 (rsm->bindex == next->bindex) && in rack_proc_sack_blk()
9439 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9440 ((next->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9441 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9442 ((next->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9443 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9444 (next->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9449 (next->r_flags & RACK_ACKED) && in rack_proc_sack_blk()
9450 SEQ_GEQ(end, next->r_start)) { in rack_proc_sack_blk()
9457 * rsm |------------| (not-acked) in rack_proc_sack_blk()
9458 * next |-----------| (acked) in rack_proc_sack_blk()
9459 * sackblk |--------> in rack_proc_sack_blk()
9461 * rsm |------| (not-acked) in rack_proc_sack_blk()
9462 * next |-----------------| (acked) in rack_proc_sack_blk()
9463 * nrsm |-----| in rack_proc_sack_blk()
9471 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9472 next->r_start = start; in rack_proc_sack_blk()
9473 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9474 next->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9475 /* Now we must adjust back where next->m is */ in rack_proc_sack_blk()
9495 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9496 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) in rack_proc_sack_blk()
9497 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9501 if (next->r_ack_arrival < in rack_proc_sack_blk()
9502 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9503 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9508 rsm->r_dupack = 0; in rack_proc_sack_blk()
9509 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9512 nrsm->r_start = start; in rack_proc_sack_blk()
9515 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9516 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9517 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9518 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9519 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9522 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9523 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9525 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9526 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9528 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9530 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9531 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9532 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9533 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9537 * one left un-acked) to the next one in rack_proc_sack_blk()
9540 * sack-passed on rsm (The one passed in in rack_proc_sack_blk()
9545 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9551 if (nrsm && nrsm->r_in_tmap) in rack_proc_sack_blk()
9555 if (SEQ_LT(end, next->r_end) || in rack_proc_sack_blk()
9556 (end == next->r_end)) { in rack_proc_sack_blk()
9563 start = next->r_end; in rack_proc_sack_blk()
9564 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9572 * rsm |--------| in rack_proc_sack_blk()
9573 * sackblk |-----> in rack_proc_sack_blk()
9578 * rsm |----| in rack_proc_sack_blk()
9579 * sackblk |-----> in rack_proc_sack_blk()
9580 * nrsm |---| in rack_proc_sack_blk()
9595 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9597 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9599 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9604 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9605 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9606 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9609 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9616 if (end == rsm->r_end) { in rack_proc_sack_blk()
9618 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9620 } else if (SEQ_LT(end, rsm->r_end)) { in rack_proc_sack_blk()
9622 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9630 start = rsm->r_end; in rack_proc_sack_blk()
9631 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9637 if (SEQ_GEQ(end, rsm->r_end)) { in rack_proc_sack_blk()
9641 * rsm --- |-----| in rack_proc_sack_blk()
9642 * end |-----| in rack_proc_sack_blk()
9644 * end |---------| in rack_proc_sack_blk()
9646 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9650 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9651 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9656 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9663 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9667 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9668 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9669 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9670 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9672 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9673 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9674 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9675 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9678 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9679 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9680 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9681 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9682 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9686 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9688 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9691 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9692 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9693 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9695 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9696 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9698 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9700 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9701 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9704 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9705 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9706 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9707 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9708 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9710 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9711 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9712 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9713 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9714 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9715 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9716 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9717 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9723 if (end == rsm->r_end) { in rack_proc_sack_blk()
9724 /* This block only - done, setup for next */ in rack_proc_sack_blk()
9731 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9732 start = rsm->r_end; in rack_proc_sack_blk()
9741 * rsm --- |-----| in rack_proc_sack_blk()
9742 * end |--| in rack_proc_sack_blk()
9744 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9748 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9749 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9754 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9761 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9765 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9766 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9767 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9768 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9770 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9771 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9772 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9773 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9776 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9777 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9778 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9779 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9780 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9788 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9790 (rsm->bindex == prev->bindex) && in rack_proc_sack_blk()
9791 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9792 ((prev->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9793 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9794 ((prev->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9795 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9796 (prev->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9801 (prev->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9804 * in place and span from (rsm->r_start = end) to rsm->r_end. in rack_proc_sack_blk()
9806 * to prev->r_end <- end. in rack_proc_sack_blk()
9808 * prev |--------| (acked) in rack_proc_sack_blk()
9809 * rsm |-------| (non-acked) in rack_proc_sack_blk()
9810 * sackblk |-| in rack_proc_sack_blk()
9812 * prev |----------| (acked) in rack_proc_sack_blk()
9813 * rsm |-----| (non-acked) in rack_proc_sack_blk()
9814 * nrsm |-| (temporary) in rack_proc_sack_blk()
9821 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9822 rsm->r_start = end; in rack_proc_sack_blk()
9823 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9824 prev->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9829 nrsm->r_end = end; in rack_proc_sack_blk()
9830 rsm->r_dupack = 0; in rack_proc_sack_blk()
9849 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9850 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { in rack_proc_sack_blk()
9851 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9857 if(prev->r_ack_arrival < in rack_proc_sack_blk()
9858 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9859 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9870 * to prev). Update the rtt and changed in rack_proc_sack_blk()
9874 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9875 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9876 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9877 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9878 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9881 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9882 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9884 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9885 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9887 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9889 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9890 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9891 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9892 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9908 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9909 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9914 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9921 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9925 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9926 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9927 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9928 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9930 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9931 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9932 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9933 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9936 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9937 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9938 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9939 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9940 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9945 * nrsm->r_start = end; in rack_proc_sack_blk()
9946 * nrsm->r_end = rsm->r_end; in rack_proc_sack_blk()
9947 * which is un-acked. in rack_proc_sack_blk()
9949 * rsm->r_end = nrsm->r_start; in rack_proc_sack_blk()
9950 * i.e. the remaining un-acked in rack_proc_sack_blk()
9955 * rsm |----------| (not acked) in rack_proc_sack_blk()
9956 * sackblk |---| in rack_proc_sack_blk()
9958 * rsm |---| (acked) in rack_proc_sack_blk()
9959 * nrsm |------| (not acked) in rack_proc_sack_blk()
9963 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9964 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9966 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9968 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9973 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9974 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9975 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9977 nrsm->r_dupack = 0; in rack_proc_sack_blk()
9980 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9981 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9984 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9985 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9986 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9988 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9989 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9991 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9993 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9995 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9998 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9999 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
10000 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
10001 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
10002 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
10004 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
10005 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
10006 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
10007 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
10008 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
10010 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
10011 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
10012 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
10023 ((rsm->r_flags & RACK_TLP) == 0) && in rack_proc_sack_blk()
10024 (rsm->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
10030 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10032 if (next->r_flags & RACK_TLP) in rack_proc_sack_blk()
10035 if ((next->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10036 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10039 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10040 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10043 if (rsm->bindex != next->bindex) in rack_proc_sack_blk()
10045 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10047 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10049 if (next->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10051 if (next->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10053 if (next->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10056 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10061 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10063 if (prev->r_flags & RACK_TLP) in rack_proc_sack_blk()
10066 if ((prev->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10067 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10070 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10071 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10074 if (rsm->bindex != prev->bindex) in rack_proc_sack_blk()
10076 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10078 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10080 if (prev->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10082 if (prev->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10084 if (prev->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10087 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10098 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
10099 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
10109 while (rsm && (rsm->r_flags & RACK_ACKED)) { in rack_peer_reneges()
10111 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
10113 if (rsm->r_in_tmap) { in rack_peer_reneges()
10115 rack, rsm, rsm->r_flags); in rack_peer_reneges()
10118 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); in rack_peer_reneges()
10121 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
10124 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
10127 tmap->r_in_tmap = 1; in rack_peer_reneges()
10128 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
10134 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10179 * The cum-ack is being advanced upon the sendmap. in rack_rsm_sender_update()
10185 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_rsm_sender_update()
10192 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { in rack_rsm_sender_update()
10193 tp->gput_ack = rsm->r_end; in rack_rsm_sender_update()
10202 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10220 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= in rack_rsm_sender_update()
10221 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10224 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10225 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10237 * RTT's. in rack_process_to_cumack()
10240 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10245 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10247 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_process_to_cumack()
10249 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10251 rack->r_wanted_output = 1; in rack_process_to_cumack()
10252 if (SEQ_GT(th_ack, tp->snd_una)) in rack_process_to_cumack()
10253 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10256 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10257 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10258 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10261 * tlp retransmit sequence is ahead of the cum-ack. in rack_process_to_cumack()
10262 * This can only happen when the cum-ack moves all in rack_process_to_cumack()
10269 * the cum-ack is by the TLP before checking which is in rack_process_to_cumack()
10273 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10274 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10275 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10276 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10277 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10278 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10279 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10283 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10286 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10287 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10288 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10290 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10291 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10292 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10293 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10294 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10295 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10296 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10297 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10301 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10304 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10306 if ((th_ack - 1) == tp->iss) { in rack_process_to_cumack()
10315 if (tp->t_flags & TF_SENTFIN) { in rack_process_to_cumack()
10322 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10323 tp->snd_una, tp->snd_max); in rack_process_to_cumack()
10327 if (SEQ_LT(th_ack, rsm->r_start)) { in rack_process_to_cumack()
10331 rsm->r_start, in rack_process_to_cumack()
10332 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10339 if ((rsm->r_flags & RACK_TLP) && in rack_process_to_cumack()
10340 (rsm->r_rtr_cnt > 1)) { in rack_process_to_cumack()
10350 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10357 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10361 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10362 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10363 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10364 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10366 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10367 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10368 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10369 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10372 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10373 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10374 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10375 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10376 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10380 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10381 if (SEQ_GEQ(th_ack, rsm->r_end)) { in rack_process_to_cumack()
10386 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10392 rsm->r_flags &= ~RACK_WAS_LOST; in rack_process_to_cumack()
10393 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_process_to_cumack()
10395 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_process_to_cumack()
10396 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_process_to_cumack()
10398 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10400 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10401 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10402 rsm->r_rtr_bytes = 0; in rack_process_to_cumack()
10408 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10409 if (rsm->r_in_tmap) { in rack_process_to_cumack()
10410 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10411 rsm->r_in_tmap = 0; in rack_process_to_cumack()
10414 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10416 * It was acked on the scoreboard -- remove in rack_process_to_cumack()
10419 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10421 } else if (rsm->r_flags & RACK_SACK_PASSED) { in rack_process_to_cumack()
10427 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_process_to_cumack()
10428 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10429 rsm->r_flags |= RACK_ACKED; in rack_process_to_cumack()
10430 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10431 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10432 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10433 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10438 rack->r_might_revert = 1; in rack_process_to_cumack()
10440 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10442 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10444 if ((rsm->r_flags & RACK_TO_REXT) && in rack_process_to_cumack()
10445 (tp->t_flags & TF_RCVD_TSTMP) && in rack_process_to_cumack()
10446 (to->to_flags & TOF_TS) && in rack_process_to_cumack()
10447 (to->to_tsecr != 0) && in rack_process_to_cumack()
10448 (tp->t_flags & TF_PREVVALID)) { in rack_process_to_cumack()
10454 tp->t_flags &= ~TF_PREVVALID; in rack_process_to_cumack()
10455 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { in rack_process_to_cumack()
10460 left = th_ack - rsm->r_end; in rack_process_to_cumack()
10461 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10469 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10470 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { in rack_process_to_cumack()
10478 * given us snd_una up to (rsm->r_end). in rack_process_to_cumack()
10482 * our rsm->r_start in case we get an old ack in rack_process_to_cumack()
10489 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10491 * It was acked on the scoreboard -- remove it from in rack_process_to_cumack()
10492 * total for the part being cum-acked. in rack_process_to_cumack()
10494 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10496 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10499 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10506 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), in rack_process_to_cumack()
10508 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) in rack_process_to_cumack()
10509 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; in rack_process_to_cumack()
10511 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10517 rsm->r_dupack = 0; in rack_process_to_cumack()
10519 if (rsm->r_rtr_bytes) { in rack_process_to_cumack()
10526 ack_am = (th_ack - rsm->r_start); in rack_process_to_cumack()
10527 if (ack_am >= rsm->r_rtr_bytes) { in rack_process_to_cumack()
10528 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10529 rsm->r_rtr_bytes -= ack_am; in rack_process_to_cumack()
10539 if (rsm->m && in rack_process_to_cumack()
10540 ((rsm->orig_m_len != rsm->m->m_len) || in rack_process_to_cumack()
10541 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_process_to_cumack()
10545 rsm->soff += (th_ack - rsm->r_start); in rack_process_to_cumack()
10548 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10554 m = rsm->m; in rack_process_to_cumack()
10555 soff = rsm->soff; in rack_process_to_cumack()
10557 while (soff >= m->m_len) { in rack_process_to_cumack()
10558 soff -= m->m_len; in rack_process_to_cumack()
10559 KASSERT((m->m_next != NULL), in rack_process_to_cumack()
10561 rsm, rsm->soff, soff, m)); in rack_process_to_cumack()
10562 m = m->m_next; in rack_process_to_cumack()
10565 * This is a fall-back that prevents a panic. In reality in rack_process_to_cumack()
10568 * but tqhash_trim did update rsm->r_start so the offset calcuation in rack_process_to_cumack()
10573 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10574 (rsm->r_start - tp->snd_una), in rack_process_to_cumack()
10582 rsm->m = m; in rack_process_to_cumack()
10583 rsm->soff = soff; in rack_process_to_cumack()
10584 rsm->orig_m_len = rsm->m->m_len; in rack_process_to_cumack()
10585 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_process_to_cumack()
10588 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10589 SEQ_GEQ(th_ack, tp->gput_seq)) in rack_process_to_cumack()
10590 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10599 if (rack->r_might_revert) { in rack_handle_might_revert()
10607 * timer clears this from happening. in rack_handle_might_revert()
10610 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10611 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_handle_might_revert()
10623 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10624 orig_cwnd = tp->snd_cwnd; in rack_handle_might_revert()
10625 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10626 tp->snd_recover = tp->snd_una; in rack_handle_might_revert()
10628 if (IN_RECOVERY(tp->t_flags)) { in rack_handle_might_revert()
10630 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10633 * and then re-entered recovery (more sack's arrived) in rack_handle_might_revert()
10635 * the first recovery. We want to be able to slow-start in rack_handle_might_revert()
10639 * so we get no slow-start after our RTO. in rack_handle_might_revert()
10641 rack->rto_from_rec = 0; in rack_handle_might_revert()
10642 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10643 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10647 rack->r_might_revert = 0; in rack_handle_might_revert()
10660 am = end - start; in rack_note_dsack()
10663 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10664 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10665 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10676 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10677 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10678 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10689 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10690 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10691 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10692 rack->r_ctl.num_dsack++; in rack_note_dsack()
10693 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10701 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10702 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10703 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10704 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10709 rack->r_might_revert = 1; in rack_note_dsack()
10710 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10711 rack->r_might_revert = 0; in rack_note_dsack()
10712 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10713 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10721 return (((tp->snd_max - snd_una) - in do_rack_compute_pipe()
10722 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10729 (struct tcp_rack *)tp->t_fb_ptr, in rack_compute_pipe()
10730 tp->snd_una)); in rack_compute_pipe()
10739 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10741 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10745 * Note we use tp->snd_una here and not th_ack because in rack_update_prr()
10748 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10752 if (SEQ_GT(tp->snd_una, th_ack)) { in rack_update_prr()
10753 snd_una = tp->snd_una; in rack_update_prr()
10758 if (pipe > tp->snd_ssthresh) { in rack_update_prr()
10761 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10762 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10763 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10765 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10770 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10771 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10774 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10779 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10780 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10786 if (tp->snd_ssthresh > pipe) { in rack_update_prr()
10787 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10790 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10817 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10819 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10821 th_ack = th->th_ack; in rack_log_ack()
10822 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10827 * credit for larger cum-ack moves). in rack_log_ack()
10831 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10834 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_log_ack()
10836 tp->t_acktime = ticks; in rack_log_ack()
10838 if (rsm && SEQ_GT(th_ack, rsm->r_start)) in rack_log_ack()
10839 changed = th_ack - rsm->r_start; in rack_log_ack()
10842 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10844 if ((to->to_flags & TOF_SACK) == 0) { in rack_log_ack()
10848 * For cases where we struck a dup-ack in rack_log_ack()
10853 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10858 if (SEQ_GT(th_ack, tp->snd_una)) in rack_log_ack()
10861 ack_point = tp->snd_una; in rack_log_ack()
10862 for (i = 0; i < to->to_nsacks; i++) { in rack_log_ack()
10863 bcopy((to->to_sacks + i * TCPOLEN_SACK), in rack_log_ack()
10869 SEQ_LT(sack.start, tp->snd_max) && in rack_log_ack()
10871 SEQ_LEQ(sack.end, tp->snd_max)) { in rack_log_ack()
10882 * Its a D-SACK block. in rack_log_ack()
10887 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10889 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10891 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10899 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10900 num_sack_blks, th->th_ack); in rack_log_ack()
10901 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10948 * Now collapse out the dup-sack and in rack_log_ack()
10956 num_sack_blks--; in rack_log_ack()
10968 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10970 SEQ_GT(sack_blocks[0].end, rsm->r_start) && in rack_log_ack()
10971 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { in rack_log_ack()
10978 rack->r_wanted_output = 1; in rack_log_ack()
10986 * i.e the sack-filter pushes down in rack_log_ack()
10992 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); in rack_log_ack()
11004 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
11008 rack->r_wanted_output = 1; in rack_log_ack()
11016 * you have more than one sack-blk, this in rack_log_ack()
11018 * and the sack-filter is still working, or in rack_log_ack()
11026 /* Something changed cancel the rack timer */ in rack_log_ack()
11027 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
11031 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_log_ack()
11033 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { in rack_log_ack()
11041 if (rack->rack_no_prr == 0) { in rack_log_ack()
11042 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
11045 rack->r_timer_override = 1; in rack_log_ack()
11046 rack->r_early = 0; in rack_log_ack()
11047 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11048 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11050 (rack->r_rr_config == 3)) { in rack_log_ack()
11055 rack->r_timer_override = 1; in rack_log_ack()
11056 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
11057 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
11059 if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11060 (rack->rack_no_prr == 0) && in rack_log_ack()
11063 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
11064 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
11065 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
11070 rack->r_early = 0; in rack_log_ack()
11071 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11072 rack->r_timer_override = 1; in rack_log_ack()
11082 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
11088 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in rack_strike_dupack()
11089 (rsm->r_flags & RACK_MUST_RXT)) { in rack_strike_dupack()
11095 if (rsm && (rsm->r_dupack < 0xff)) { in rack_strike_dupack()
11096 rsm->r_dupack++; in rack_strike_dupack()
11097 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { in rack_strike_dupack()
11103 * we will get a return of the rsm. For a non-sack in rack_strike_dupack()
11108 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
11109 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
11110 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
11111 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
11114 rack->r_wanted_output = 1; in rack_strike_dupack()
11115 rack->r_timer_override = 1; in rack_strike_dupack()
11134 * timer to expire. While you were waiting all of the acknowledgments in rack_check_bottom_drag()
11139 * gauge the inter-ack times). If that occurs we have a real problem in rack_check_bottom_drag()
11152 if (tp->snd_max == tp->snd_una) { in rack_check_bottom_drag()
11164 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
11166 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11168 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
11169 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
11170 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
11173 * Lets use the long-term b/w we have in rack_check_bottom_drag()
11176 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
11188 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11189 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11190 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
11191 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11192 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11193 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11194 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
11195 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11196 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11197 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11198 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11199 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11201 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11202 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
11203 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
11205 rack->gp_ready = 1; in rack_check_bottom_drag()
11206 if (rack->dgp_on || in rack_check_bottom_drag()
11207 rack->rack_hibeta) in rack_check_bottom_drag()
11209 if (rack->defer_options) in rack_check_bottom_drag()
11214 * zero rtt possibly?, settle for just an old increase. in rack_check_bottom_drag()
11216 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11218 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_check_bottom_drag()
11219 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), in rack_check_bottom_drag()
11221 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11222 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11223 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11234 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11235 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11246 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11248 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11269 log.u_bbr.flex2 = cur->start_seq; in rack_log_hybrid()
11270 log.u_bbr.flex3 = cur->end_seq; in rack_log_hybrid()
11271 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid()
11272 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid()
11273 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid()
11274 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid()
11275 log.u_bbr.rttProp = cur->timestamp; in rack_log_hybrid()
11276 log.u_bbr.cur_del_rate = cur->cspr; in rack_log_hybrid()
11277 log.u_bbr.bw_inuse = cur->start; in rack_log_hybrid()
11278 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid()
11279 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11280 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); in rack_log_hybrid()
11281 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11284 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11293 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11295 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11297 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11305 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11307 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11309 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11311 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11313 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11314 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11315 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11316 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11317 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11318 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11319 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11320 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11321 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11336 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11337 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11340 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11342 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11349 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11350 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11351 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11352 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11353 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11355 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11357 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11358 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11360 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11361 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11365 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { in rack_set_dgp_hybrid_mode()
11367 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11368 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11369 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11370 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11372 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11373 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11375 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_set_dgp_hybrid_mode()
11376 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_set_dgp_hybrid_mode()
11377 rc_cur->first_send = cts; in rack_set_dgp_hybrid_mode()
11378 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11379 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11390 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11391 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11392 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11394 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11398 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11399 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11401 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11402 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11403 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11408 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ in rack_set_dgp_hybrid_mode()
11410 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11411 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11413 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11415 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11416 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11417 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11419 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11422 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) in rack_set_dgp_hybrid_mode()
11423 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11425 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11426 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11430 * sendtime not arrival time for catch-up mode. in rack_set_dgp_hybrid_mode()
11432 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; in rack_set_dgp_hybrid_mode()
11434 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && in rack_set_dgp_hybrid_mode()
11435 (rc_cur->cspr > 0)) { in rack_set_dgp_hybrid_mode()
11438 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11443 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { in rack_set_dgp_hybrid_mode()
11449 rc_cur->deadline = cts; in rack_set_dgp_hybrid_mode()
11455 rc_cur->deadline = rc_cur->localtime; in rack_set_dgp_hybrid_mode()
11461 len = rc_cur->end - rc_cur->start; in rack_set_dgp_hybrid_mode()
11462 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { in rack_set_dgp_hybrid_mode()
11467 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); in rack_set_dgp_hybrid_mode()
11477 len /= rc_cur->cspr; in rack_set_dgp_hybrid_mode()
11478 rc_cur->deadline += len; in rack_set_dgp_hybrid_mode()
11480 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11481 rc_cur->deadline = 0; in rack_set_dgp_hybrid_mode()
11483 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11491 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11492 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11493 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11497 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11498 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11508 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11510 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || in rack_chk_req_and_hybrid_on_out()
11511 (SEQ_GEQ(seq, ent->end_seq))) { in rack_chk_req_and_hybrid_on_out()
11514 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11520 if (SEQ_LT(ent->end_seq, (seq + len))) { in rack_chk_req_and_hybrid_on_out()
11531 ent->end_seq = (seq + len); in rack_chk_req_and_hybrid_on_out()
11532 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11536 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_chk_req_and_hybrid_on_out()
11537 ent->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_chk_req_and_hybrid_on_out()
11538 ent->first_send = cts; in rack_chk_req_and_hybrid_on_out()
11539 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11540 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11569 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11570 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), in rack_gain_for_fastoutput()
11571 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11575 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11576 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11578 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11579 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11580 (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11619 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11621 m = sb->sb_mb; in rack_adjust_sendmap_head()
11622 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11628 KASSERT((rsm->m == m), in rack_adjust_sendmap_head()
11629 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", in rack_adjust_sendmap_head()
11631 while (rsm->m && (rsm->m == m)) { in rack_adjust_sendmap_head()
11637 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); in rack_adjust_sendmap_head()
11638 if ((rsm->orig_m_len != m->m_len) || in rack_adjust_sendmap_head()
11639 (rsm->orig_t_space != M_TRAILINGROOM(m))){ in rack_adjust_sendmap_head()
11643 KASSERT((rsm->soff == 0), in rack_adjust_sendmap_head()
11644 ("Rack:%p rsm:%p -- rsm at head but soff not zero", in rack_adjust_sendmap_head()
11648 if ((rsm->soff != soff) || (rsm->m != tm)) { in rack_adjust_sendmap_head()
11657 rsm->m = tm; in rack_adjust_sendmap_head()
11658 rsm->soff = soff; in rack_adjust_sendmap_head()
11660 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11661 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11663 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11664 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11667 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); in rack_adjust_sendmap_head()
11668 if (rsm->m) { in rack_adjust_sendmap_head()
11669 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11670 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11672 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11673 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11676 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11689 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11690 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11695 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11705 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11719 data = ent->end - ent->start; in rack_req_check_for_comp()
11720 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11721 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { in rack_req_check_for_comp()
11722 if (ent->first_send > ent->localtime) in rack_req_check_for_comp()
11723 ftim = ent->first_send; in rack_req_check_for_comp()
11725 ftim = ent->localtime; in rack_req_check_for_comp()
11728 ftim = ent->localtime; in rack_req_check_for_comp()
11730 if (laa > ent->localtime) in rack_req_check_for_comp()
11731 tim = laa - ftim; in rack_req_check_for_comp()
11745 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11746 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11747 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11748 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11749 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11750 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11752 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11753 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11757 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11760 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11761 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11770 * For ret_val if its 0 the TCP is locked, if its non-zero
11790 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11791 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { in rack_process_ack()
11793 tp->t_flags2 |= TF2_NO_ISS_CHECK; in rack_process_ack()
11799 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { in rack_process_ack()
11801 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11804 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { in rack_process_ack()
11806 seq_min = tp->iss + 1; in rack_process_ack()
11813 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11817 if (SEQ_LT(th->th_ack, seq_min)) { in rack_process_ack()
11824 rack->r_wanted_output = 1; in rack_process_ack()
11828 if (SEQ_GT(th->th_ack, tp->snd_max)) { in rack_process_ack()
11830 rack->r_wanted_output = 1; in rack_process_ack()
11833 if (rack->gp_ready && in rack_process_ack()
11834 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11837 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { in rack_process_ack()
11841 in_rec = IN_FASTRECOVERY(tp->t_flags); in rack_process_ack()
11842 if (rack->rc_in_persist) { in rack_process_ack()
11843 tp->t_rxtshift = 0; in rack_process_ack()
11844 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11845 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11848 if ((th->th_ack == tp->snd_una) && in rack_process_ack()
11849 (tiwin == tp->snd_wnd) && in rack_process_ack()
11851 ((to->to_flags & TOF_SACK) == 0)) { in rack_process_ack()
11852 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11855 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), in rack_process_ack()
11859 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_process_ack()
11865 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { in rack_process_ack()
11866 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11867 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11868 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11876 if (tp->t_flags & TF_NEEDSYN) { in rack_process_ack()
11878 * T/TCP: Connection was half-synchronized, and our SYN has in rack_process_ack()
11880 * to non-starred state, increment snd_una for ACK of SYN, in rack_process_ack()
11883 tp->t_flags &= ~TF_NEEDSYN; in rack_process_ack()
11884 tp->snd_una++; in rack_process_ack()
11886 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_process_ack()
11888 tp->rcv_scale = tp->request_r_scale; in rack_process_ack()
11892 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_ack()
11897 * Any time we move the cum-ack forward clear in rack_process_ack()
11898 * keep-alive tied probe-not-answered. The in rack_process_ack()
11901 rack->probe_not_answered = 0; in rack_process_ack()
11911 if ((tp->t_flags & TF_PREVVALID) && in rack_process_ack()
11912 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_process_ack()
11913 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11914 if (tp->t_rxtshift == 1 && in rack_process_ack()
11915 (int)(ticks - tp->t_badrxtwin) < 0) in rack_process_ack()
11916 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_process_ack()
11920 tp->t_rxtshift = 0; in rack_process_ack()
11921 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11922 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11923 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11924 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11926 * If it is the RXT timer we want to in rack_process_ack()
11929 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11930 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11932 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11937 * no timestamp is present but transmit timer is running and timed in rack_process_ack()
11939 * we now have an rtt measurement, cancel the timer backoff (cf., in rack_process_ack()
11941 * timer. in rack_process_ack()
11944 * phase, ignore timestamps of 0 or we could calculate a huge RTT in rack_process_ack()
11945 * and blow up the retransmit timer. in rack_process_ack()
11948 * If all outstanding data is acked, stop retransmit timer and in rack_process_ack()
11950 * data to be acked, restart retransmit timer, using current in rack_process_ack()
11951 * (possibly backed-off) value. in rack_process_ack()
11958 if (IN_RECOVERY(tp->t_flags)) { in rack_process_ack()
11959 if (SEQ_LT(th->th_ack, tp->snd_recover) && in rack_process_ack()
11960 (SEQ_LT(th->th_ack, tp->snd_max))) { in rack_process_ack()
11963 rack_post_recovery(tp, th->th_ack); in rack_process_ack()
11970 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11972 p_cwnd += tp->snd_cwnd; in rack_process_ack()
11974 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11975 SEQ_GEQ(th->th_ack, tp->snd_recover)) { in rack_process_ack()
11978 * and never re-entered recovery. The timeout(s) in rack_process_ack()
11982 rack->rto_from_rec = 0; in rack_process_ack()
11989 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11991 (tp->snd_cwnd > p_cwnd)) { in rack_process_ack()
11992 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_process_ack()
11993 tp->snd_cwnd = p_cwnd; in rack_process_ack()
11996 acked_amount = min(acked, (int)sbavail(&so->so_snd)); in rack_process_ack()
11997 tp->snd_wnd -= acked_amount; in rack_process_ack()
11998 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_process_ack()
11999 if ((sbused(&so->so_snd) == 0) && in rack_process_ack()
12001 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
12002 (tp->t_flags & TF_SENTFIN)) { in rack_process_ack()
12011 tp->snd_una = th->th_ack; in rack_process_ack()
12013 if (acked_amount && sbavail(&so->so_snd)) in rack_process_ack()
12014 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
12015 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
12019 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_process_ack()
12020 tp->snd_recover = tp->snd_una; in rack_process_ack()
12022 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { in rack_process_ack()
12023 tp->snd_nxt = tp->snd_max; in rack_process_ack()
12026 (rack->use_fixed_rate == 0) && in rack_process_ack()
12027 (rack->in_probe_rtt == 0) && in rack_process_ack()
12028 rack->rc_gp_dyn_mul && in rack_process_ack()
12029 rack->rc_always_pace) { in rack_process_ack()
12033 if (tp->snd_una == tp->snd_max) { in rack_process_ack()
12035 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
12036 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
12037 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
12038 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
12039 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
12041 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_process_ack()
12042 tp->t_acktime = 0; in rack_process_ack()
12043 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
12044 rack->rc_suspicious = 0; in rack_process_ack()
12046 rack->r_wanted_output = 1; in rack_process_ack()
12047 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
12048 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
12049 (sbavail(&so->so_snd) == 0) && in rack_process_ack()
12050 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_process_ack()
12057 /* tcp_close will kill the inp pre-log the Reset */ in rack_process_ack()
12074 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
12083 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
12085 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
12095 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
12096 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
12097 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
12098 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
12113 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
12114 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
12115 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
12117 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
12118 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
12119 rack->rc_has_collapsed = 1; in rack_collapsed_window()
12120 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
12121 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
12132 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
12133 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
12134 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12137 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12141 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
12142 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
12143 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
12152 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12154 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
12156 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
12161 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
12162 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
12163 if (rsm->r_in_tmap) { in rack_un_collapse_window()
12164 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
12165 nrsm->r_in_tmap = 1; in rack_un_collapse_window()
12175 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
12177 nrsm->r_flags |= RACK_RWND_COLLAPSED; in rack_un_collapse_window()
12178 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
12184 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12193 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
12194 tp->t_flags |= TF_DELACK; in rack_handle_delayed_ack()
12196 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
12197 tp->t_flags |= TF_ACKNOW; in rack_handle_delayed_ack()
12209 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
12217 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12219 if (out >= tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12221 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12224 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
12225 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12227 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12254 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12255 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_data()
12257 (SEQ_LT(tp->snd_wl1, th->th_seq) || in rack_process_data()
12258 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || in rack_process_data()
12259 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { in rack_process_data()
12262 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) in rack_process_data()
12264 tp->snd_wnd = tiwin; in rack_process_data()
12266 tp->snd_wl1 = th->th_seq; in rack_process_data()
12267 tp->snd_wl2 = th->th_ack; in rack_process_data()
12268 if (tp->snd_wnd > tp->max_sndwnd) in rack_process_data()
12269 tp->max_sndwnd = tp->snd_wnd; in rack_process_data()
12270 rack->r_wanted_output = 1; in rack_process_data()
12272 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { in rack_process_data()
12273 tp->snd_wnd = tiwin; in rack_process_data()
12275 tp->snd_wl1 = th->th_seq; in rack_process_data()
12276 tp->snd_wl2 = th->th_ack; in rack_process_data()
12279 if (tp->snd_wnd < ctf_outstanding(tp)) in rack_process_data()
12281 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12282 else if (rack->rc_has_collapsed) in rack_process_data()
12284 if ((rack->r_collapse_point_valid) && in rack_process_data()
12285 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12286 rack->r_collapse_point_valid = 0; in rack_process_data()
12287 /* Was persist timer active and now we have window space? */ in rack_process_data()
12288 if ((rack->rc_in_persist != 0) && in rack_process_data()
12289 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12290 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12291 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12292 tp->snd_nxt = tp->snd_max; in rack_process_data()
12293 /* Make sure we output to start the timer */ in rack_process_data()
12294 rack->r_wanted_output = 1; in rack_process_data()
12297 if ((rack->rc_in_persist == 0) && in rack_process_data()
12298 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12299 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_process_data()
12300 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12301 sbavail(&tptosocket(tp)->so_snd) && in rack_process_data()
12302 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_process_data()
12309 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12311 if (tp->t_flags2 & TF2_DROP_AF_DATA) { in rack_process_data()
12319 tp->rcv_up = tp->rcv_nxt; in rack_process_data()
12324 * This process logically involves adjusting tp->rcv_wnd as data is in rack_process_data()
12329 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_process_data()
12330 (tp->t_flags & TF_FASTOPEN)); in rack_process_data()
12332 TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12333 tcp_seq save_start = th->th_seq; in rack_process_data()
12334 tcp_seq save_rnxt = tp->rcv_nxt; in rack_process_data()
12349 if (th->th_seq == tp->rcv_nxt && in rack_process_data()
12351 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_process_data()
12356 if (so->so_rcv.sb_shlim) { in rack_process_data()
12359 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_process_data()
12368 tp->rcv_nxt += tlen; in rack_process_data()
12370 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_process_data()
12371 (tp->t_fbyte_in == 0)) { in rack_process_data()
12372 tp->t_fbyte_in = ticks; in rack_process_data()
12373 if (tp->t_fbyte_in == 0) in rack_process_data()
12374 tp->t_fbyte_in = 1; in rack_process_data()
12375 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_process_data()
12376 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_process_data()
12382 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_process_data()
12391 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_process_data()
12396 sbappendstream_locked(&so->so_rcv, m, 0); in rack_process_data()
12398 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12402 if (so->so_rcv.sb_shlim && appended != mcnt) in rack_process_data()
12403 counter_fo_release(so->so_rcv.sb_shlim, in rack_process_data()
12404 mcnt - appended); in rack_process_data()
12416 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12417 if (tp->t_flags & TF_WAKESOR) { in rack_process_data()
12418 tp->t_flags &= ~TF_WAKESOR; in rack_process_data()
12423 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_process_data()
12425 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_process_data()
12433 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { in rack_process_data()
12434 if ((tp->rcv_numsacks >= 1) && in rack_process_data()
12435 (tp->sackblks[0].end == save_start)) { in rack_process_data()
12441 tp->sackblks[0].start, in rack_process_data()
12442 tp->sackblks[0].end); in rack_process_data()
12466 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12470 * If connection is half-synchronized (ie NEEDSYN in rack_process_data()
12476 if (tp->t_flags & TF_NEEDSYN) { in rack_process_data()
12478 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12479 tp->t_flags |= TF_DELACK; in rack_process_data()
12481 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12483 tp->rcv_nxt++; in rack_process_data()
12485 switch (tp->t_state) { in rack_process_data()
12491 tp->t_starttime = ticks; in rack_process_data()
12495 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12505 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12511 * starting the time-wait timer, turning off the in rack_process_data()
12516 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12524 if ((tp->t_flags & TF_ACKNOW) || in rack_process_data()
12525 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { in rack_process_data()
12526 rack->r_wanted_output = 1; in rack_process_data()
12533 * have broken out the fast-data path also just like
12534 * the fast-ack.
12553 if (__predict_false(th->th_seq != tp->rcv_nxt)) { in rack_do_fastnewdata()
12556 if (tiwin && tiwin != tp->snd_wnd) { in rack_do_fastnewdata()
12559 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { in rack_do_fastnewdata()
12562 if (__predict_false((to->to_flags & TOF_TS) && in rack_do_fastnewdata()
12563 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { in rack_do_fastnewdata()
12566 if (__predict_false((th->th_ack != tp->snd_una))) { in rack_do_fastnewdata()
12569 if (__predict_false(tlen > sbspace(&so->so_rcv))) { in rack_do_fastnewdata()
12572 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fastnewdata()
12573 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_do_fastnewdata()
12574 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fastnewdata()
12575 tp->ts_recent = to->to_tsval; in rack_do_fastnewdata()
12577 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12579 * This is a pure, in-sequence data packet with nothing on the in rack_do_fastnewdata()
12582 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_do_fastnewdata()
12585 if (so->so_rcv.sb_shlim) { in rack_do_fastnewdata()
12588 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_do_fastnewdata()
12597 if (tp->rcv_numsacks) in rack_do_fastnewdata()
12600 tp->rcv_nxt += tlen; in rack_do_fastnewdata()
12602 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_do_fastnewdata()
12603 (tp->t_fbyte_in == 0)) { in rack_do_fastnewdata()
12604 tp->t_fbyte_in = ticks; in rack_do_fastnewdata()
12605 if (tp->t_fbyte_in == 0) in rack_do_fastnewdata()
12606 tp->t_fbyte_in = 1; in rack_do_fastnewdata()
12607 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_do_fastnewdata()
12608 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_do_fastnewdata()
12613 tp->snd_wl1 = th->th_seq; in rack_do_fastnewdata()
12617 tp->rcv_up = tp->rcv_nxt; in rack_do_fastnewdata()
12624 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fastnewdata()
12633 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_do_fastnewdata()
12638 sbappendstream_locked(&so->so_rcv, m, 0); in rack_do_fastnewdata()
12641 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12645 if (so->so_rcv.sb_shlim && mcnt != appended) in rack_do_fastnewdata()
12646 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); in rack_do_fastnewdata()
12649 if (tp->snd_una == tp->snd_max) in rack_do_fastnewdata()
12650 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12657 * in sequence to remain in the fast-path. We also add
12661 * slow-path.
12673 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_fastack()
12677 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { in rack_fastack()
12685 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { in rack_fastack()
12689 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { in rack_fastack()
12693 if (__predict_false(IN_RECOVERY(tp->t_flags))) { in rack_fastack()
12697 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12698 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12702 /* Ok if we reach here, we can process a fast-ack */ in rack_fastack()
12703 if (rack->gp_ready && in rack_fastack()
12704 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12707 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_fastack()
12710 if (tiwin != tp->snd_wnd) { in rack_fastack()
12711 tp->snd_wnd = tiwin; in rack_fastack()
12713 tp->snd_wl1 = th->th_seq; in rack_fastack()
12714 if (tp->snd_wnd > tp->max_sndwnd) in rack_fastack()
12715 tp->max_sndwnd = tp->snd_wnd; in rack_fastack()
12718 if ((rack->rc_in_persist != 0) && in rack_fastack()
12719 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12720 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12724 if ((rack->rc_in_persist == 0) && in rack_fastack()
12725 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12726 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_fastack()
12727 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12728 sbavail(&tptosocket(tp)->so_snd) && in rack_fastack()
12729 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_fastack()
12736 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12743 if ((to->to_flags & TOF_TS) != 0 && in rack_fastack()
12744 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_fastack()
12745 tp->ts_recent_age = tcp_ts_getticks(); in rack_fastack()
12746 tp->ts_recent = to->to_tsval; in rack_fastack()
12756 if ((tp->t_flags & TF_PREVVALID) && in rack_fastack()
12757 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_fastack()
12758 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12759 if (tp->t_rxtshift == 1 && in rack_fastack()
12760 (int)(ticks - tp->t_badrxtwin) < 0) in rack_fastack()
12761 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_fastack()
12764 * Recalculate the transmit timer / rtt. in rack_fastack()
12767 * phase, ignore timestamps of 0 or we could calculate a huge RTT in rack_fastack()
12768 * and blow up the retransmit timer. in rack_fastack()
12781 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12783 mfree = sbcut_locked(&so->so_snd, acked); in rack_fastack()
12784 tp->snd_una = th->th_ack; in rack_fastack()
12786 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12788 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12791 tp->t_rxtshift = 0; in rack_fastack()
12792 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_fastack()
12793 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12794 rack->rc_tlp_in_progress = 0; in rack_fastack()
12795 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12797 * If it is the RXT timer we want to in rack_fastack()
12800 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12801 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12804 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12812 if (tp->snd_wnd < ctf_outstanding(tp)) { in rack_fastack()
12814 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12815 } else if (rack->rc_has_collapsed) in rack_fastack()
12817 if ((rack->r_collapse_point_valid) && in rack_fastack()
12818 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12819 rack->r_collapse_point_valid = 0; in rack_fastack()
12823 tp->snd_wl2 = th->th_ack; in rack_fastack()
12824 tp->t_dupacks = 0; in rack_fastack()
12829 * If all outstanding data are acked, stop retransmit timer, in rack_fastack()
12830 * otherwise restart timer using current (possibly backed-off) in rack_fastack()
12831 * value. If process is waiting for space, wakeup/selwakeup/signal. in rack_fastack()
12836 (rack->use_fixed_rate == 0) && in rack_fastack()
12837 (rack->in_probe_rtt == 0) && in rack_fastack()
12838 rack->rc_gp_dyn_mul && in rack_fastack()
12839 rack->rc_always_pace) { in rack_fastack()
12843 if (tp->snd_una == tp->snd_max) { in rack_fastack()
12844 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12845 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12846 rack->rc_suspicious = 0; in rack_fastack()
12847 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12848 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12849 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12850 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12852 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_fastack()
12853 tp->t_acktime = 0; in rack_fastack()
12854 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12856 if (acked && rack->r_fast_output) in rack_fastack()
12858 if (sbavail(&so->so_snd)) { in rack_fastack()
12859 rack->r_wanted_output = 1; in rack_fastack()
12887 * this is an acceptable SYN segment initialize tp->rcv_nxt and in rack_do_syn_sent()
12888 * tp->irs if seg contains ack then advance tp->snd_una if seg in rack_do_syn_sent()
12895 (SEQ_LEQ(th->th_ack, tp->iss) || in rack_do_syn_sent()
12896 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_sent()
12916 tp->irs = th->th_seq; in rack_do_syn_sent()
12918 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12928 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_sent()
12930 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_sent()
12932 tp->rcv_adv += min(tp->rcv_wnd, in rack_do_syn_sent()
12933 TCP_MAXWIN << tp->rcv_scale); in rack_do_syn_sent()
12938 if ((tp->t_flags & TF_FASTOPEN) && in rack_do_syn_sent()
12939 (tp->snd_una != tp->snd_max)) { in rack_do_syn_sent()
12941 if (SEQ_LT(th->th_ack, tp->snd_max)) in rack_do_syn_sent()
12950 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12951 tp->t_flags |= TF_DELACK; in rack_do_syn_sent()
12953 rack->r_wanted_output = 1; in rack_do_syn_sent()
12954 tp->t_flags |= TF_ACKNOW; in rack_do_syn_sent()
12959 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_syn_sent()
12965 * ack-processing since the in rack_do_syn_sent()
12966 * data stream in our send-map in rack_do_syn_sent()
12972 tp->snd_una++; in rack_do_syn_sent()
12973 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { in rack_do_syn_sent()
12982 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12984 if (rsm->r_flags & RACK_HAS_SYN) { in rack_do_syn_sent()
12985 rsm->r_flags &= ~RACK_HAS_SYN; in rack_do_syn_sent()
12986 rsm->r_start++; in rack_do_syn_sent()
12988 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
12994 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 in rack_do_syn_sent()
12996 tp->t_starttime = ticks; in rack_do_syn_sent()
12997 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_sent()
12999 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_sent()
13009 * Received initial SYN in SYN-SENT[*] state => simultaneous in rack_do_syn_sent()
13012 * half-synchronized. Otherwise, do 3-way handshake: in rack_do_syn_sent()
13013 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If in rack_do_syn_sent()
13016 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); in rack_do_syn_sent()
13020 * Advance th->th_seq to correspond to first data byte. If data, in rack_do_syn_sent()
13023 th->th_seq++; in rack_do_syn_sent()
13024 if (tlen > tp->rcv_wnd) { in rack_do_syn_sent()
13025 todrop = tlen - tp->rcv_wnd; in rack_do_syn_sent()
13026 m_adj(m, -todrop); in rack_do_syn_sent()
13027 tlen = tp->rcv_wnd; in rack_do_syn_sent()
13032 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_sent()
13033 tp->rcv_up = th->th_seq; in rack_do_syn_sent()
13041 /* For syn-sent we need to possibly update the rtt */ in rack_do_syn_sent()
13042 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_sent()
13046 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_sent()
13047 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_sent()
13048 tp->t_rttlow = t; in rack_do_syn_sent()
13049 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
13056 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_sent()
13066 * timer is contrary to the specification, in rack_do_syn_sent()
13073 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_sent()
13103 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
13106 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_syn_recv()
13109 (SEQ_LEQ(th->th_ack, tp->snd_una) || in rack_do_syn_recv()
13110 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_recv()
13115 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13128 /* non-initial SYN is ignored */ in rack_do_syn_recv()
13129 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
13130 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
13131 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
13145 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_syn_recv()
13146 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_syn_recv()
13151 * In the SYN-RECEIVED state, validate that the packet belongs to in rack_do_syn_recv()
13157 if (SEQ_LT(th->th_seq, tp->irs)) { in rack_do_syn_recv()
13176 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_syn_recv()
13179 if ((to->to_flags & TOF_TS) != 0 && in rack_do_syn_recv()
13180 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_syn_recv()
13181 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_syn_recv()
13183 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_syn_recv()
13184 tp->ts_recent = to->to_tsval; in rack_do_syn_recv()
13186 tp->snd_wnd = tiwin; in rack_do_syn_recv()
13189 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_syn_recv()
13190 * is on (half-synchronized state), then queue data for later in rack_do_syn_recv()
13194 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13201 if (tp->t_flags & TF_SONOTCONN) { in rack_do_syn_recv()
13202 tp->t_flags &= ~TF_SONOTCONN; in rack_do_syn_recv()
13206 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_recv()
13208 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_recv()
13211 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> in rack_do_syn_recv()
13212 * FIN-WAIT-1 in rack_do_syn_recv()
13214 tp->t_starttime = ticks; in rack_do_syn_recv()
13215 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { in rack_do_syn_recv()
13216 tcp_fastopen_decrement_counter(tp->t_tfo_pending); in rack_do_syn_recv()
13217 tp->t_tfo_pending = NULL; in rack_do_syn_recv()
13219 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_recv()
13221 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_recv()
13232 if (!(tp->t_flags & TF_FASTOPEN)) in rack_do_syn_recv()
13240 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) in rack_do_syn_recv()
13241 tp->snd_una++; in rack_do_syn_recv()
13249 if (tp->t_flags & TF_WAKESOR) { in rack_do_syn_recv()
13250 tp->t_flags &= ~TF_WAKESOR; in rack_do_syn_recv()
13255 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_recv()
13256 /* For syn-recv we need to possibly update the rtt */ in rack_do_syn_recv()
13257 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_recv()
13261 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_recv()
13262 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_recv()
13263 tp->t_rttlow = t; in rack_do_syn_recv()
13264 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13271 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_recv()
13281 * user can proceed. Starting the timer is contrary in rack_do_syn_recv()
13288 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_recv()
13318 * uni-directional data xfer. If the packet has no control flags, in rack_do_established()
13319 * is in-sequence, the window didn't change and we're not in rack_do_established()
13323 * waiting for space. If the length is non-zero and the ack didn't in rack_do_established()
13324 * move, we're the receiver side. If we're getting packets in-order in rack_do_established()
13327 * hidden state-flags are also off. Since we check for in rack_do_established()
13330 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13331 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && in rack_do_established()
13334 __predict_true(th->th_seq == tp->rcv_nxt)) { in rack_do_established()
13337 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13350 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_established()
13365 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_established()
13366 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_established()
13384 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_established()
13387 if ((to->to_flags & TOF_TS) != 0 && in rack_do_established()
13388 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_established()
13389 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_established()
13391 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_established()
13392 tp->ts_recent = to->to_tsval; in rack_do_established()
13395 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_established()
13396 * is on (half-synchronized state), then queue data for later in rack_do_established()
13400 if (tp->t_flags & TF_NEEDSYN) { in rack_do_established()
13404 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_established()
13406 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_established()
13419 if (sbavail(&so->so_snd)) { in rack_do_established()
13446 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_close_wait()
13460 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_close_wait()
13461 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_close_wait()
13479 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_close_wait()
13482 if ((to->to_flags & TOF_TS) != 0 && in rack_do_close_wait()
13483 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_close_wait()
13484 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_close_wait()
13486 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_close_wait()
13487 tp->ts_recent = to->to_tsval; in rack_do_close_wait()
13490 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_close_wait()
13491 * is on (half-synchronized state), then queue data for later in rack_do_close_wait()
13495 if (tp->t_flags & TF_NEEDSYN) { in rack_do_close_wait()
13499 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_close_wait()
13501 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_close_wait()
13514 if (sbavail(&so->so_snd)) { in rack_do_close_wait()
13516 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_close_wait()
13532 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13533 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13536 /* tcp_close will kill the inp pre-log the Reset */ in rack_check_data_after_close()
13543 if (sbavail(&so->so_snd) == 0) in rack_check_data_after_close()
13547 tp->rcv_nxt = th->th_seq + *tlen; in rack_check_data_after_close()
13548 tp->t_flags2 |= TF2_DROP_AF_DATA; in rack_check_data_after_close()
13549 rack->r_wanted_output = 1; in rack_check_data_after_close()
13571 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_1()
13585 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_1()
13586 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_1()
13597 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_1()
13611 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_fin_wait_1()
13614 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_1()
13615 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_1()
13616 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_1()
13618 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_1()
13619 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_1()
13622 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_1()
13623 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_1()
13627 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_1()
13630 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_1()
13632 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_1()
13648 * proceed. Starting the timer is contrary to the in rack_do_fin_wait_1()
13655 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fin_wait_1()
13664 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_1()
13666 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_1()
13693 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_closing()
13707 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_closing()
13708 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_closing()
13726 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_closing()
13729 if ((to->to_flags & TOF_TS) != 0 && in rack_do_closing()
13730 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_closing()
13731 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_closing()
13733 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_closing()
13734 tp->ts_recent = to->to_tsval; in rack_do_closing()
13737 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_closing()
13738 * is on (half-synchronized state), then queue data for later in rack_do_closing()
13742 if (tp->t_flags & TF_NEEDSYN) { in rack_do_closing()
13745 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_closing()
13747 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_closing()
13765 if (sbavail(&so->so_snd)) { in rack_do_closing()
13767 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_closing()
13794 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_lastack()
13808 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_lastack()
13809 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_lastack()
13828 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_lastack()
13831 if ((to->to_flags & TOF_TS) != 0 && in rack_do_lastack()
13832 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_lastack()
13833 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_lastack()
13835 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_lastack()
13836 tp->ts_recent = to->to_tsval; in rack_do_lastack()
13839 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_lastack()
13840 * is on (half-synchronized state), then queue data for later in rack_do_lastack()
13844 if (tp->t_flags & TF_NEEDSYN) { in rack_do_lastack()
13847 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_lastack()
13849 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_lastack()
13867 if (sbavail(&so->so_snd)) { in rack_do_lastack()
13869 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_lastack()
13897 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_2()
13911 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_2()
13912 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_2()
13923 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_2()
13937 * p.869. In such cases, we can still calculate the RTT correctly in rack_do_fin_wait_2()
13940 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_2()
13941 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_2()
13942 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_2()
13944 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_2()
13945 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_2()
13948 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_2()
13949 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_2()
13953 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_2()
13956 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_2()
13958 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_2()
13971 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_2()
13973 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_2()
13986 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13987 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13988 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13999 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
14000 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
14002 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
14003 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
14007 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14008 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14009 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
14010 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
14012 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
14013 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
14014 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14017 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
14018 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14019 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
14021 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
14022 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
14023 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14029 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
14034 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
14035 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
14037 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
14039 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
14040 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
14041 rack->r_ctl.init_rate) { in rack_set_pace_segments()
14046 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14053 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
14055 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
14057 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14058 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14060 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
14061 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
14067 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
14070 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
14072 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
14092 if (rack->r_is_v6) { in rack_init_fsb_block()
14093 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
14094 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14095 if (tp->t_port) { in rack_init_fsb_block()
14096 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14098 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14099 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14100 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14101 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14104 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
14105 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14107 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14108 tp->t_port, in rack_init_fsb_block()
14109 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14110 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
14115 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
14116 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14117 if (tp->t_port) { in rack_init_fsb_block()
14118 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14120 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14121 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14122 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14123 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14126 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14127 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
14129 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14130 tp->t_port, in rack_init_fsb_block()
14131 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14132 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
14135 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
14136 (long)TCP_MAXWIN << tp->rcv_scale); in rack_init_fsb_block()
14137 rack->r_fsb_inited = 1; in rack_init_fsb_block()
14148 …rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
14150 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
14152 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
14154 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
14157 rack->r_fsb_inited = 0; in rack_init_fsb()
14166 * 20 - Initial round setup in rack_log_hystart_event()
14167 * 21 - Rack declares a new round. in rack_log_hystart_event()
14171 tp = rack->rc_tp; in rack_log_hystart_event()
14177 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
14178 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
14180 log.u_bbr.flex4 = tp->snd_max; in rack_log_hystart_event()
14183 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
14184 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
14186 &tptosocket(tp)->so_rcv, in rack_log_hystart_event()
14187 &tptosocket(tp)->so_snd, in rack_log_hystart_event()
14196 rack->rack_deferred_inited = 1; in rack_deferred_init()
14197 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
14198 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
14199 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
14213 * 1 - Use full sized retransmits i.e. limit in rack_init_retransmit_value()
14217 * 2 - Use pacer min granularity as a guide to in rack_init_retransmit_value()
14225 * 0 - The rack default 1 MSS (anything not 0/1/2 in rack_init_retransmit_value()
14230 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14231 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14233 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14234 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14236 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14237 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14247 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14270 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14271 switch (reqr->req) { in rack_chg_query()
14273 if ((reqr->req_param == tp->snd_max) || in rack_chg_query()
14274 (tp->snd_max == tp->snd_una)){ in rack_chg_query()
14278 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14280 /* Can't find that seq -- unlikely */ in rack_chg_query()
14283 reqr->sendmap_start = rsm->r_start; in rack_chg_query()
14284 reqr->sendmap_end = rsm->r_end; in rack_chg_query()
14285 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; in rack_chg_query()
14286 reqr->sendmap_fas = rsm->r_fas; in rack_chg_query()
14287 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) in rack_chg_query()
14288 reqr->sendmap_send_cnt = SNDMAP_NRTX; in rack_chg_query()
14289 for(i=0; i<reqr->sendmap_send_cnt; i++) in rack_chg_query()
14290 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; in rack_chg_query()
14291 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; in rack_chg_query()
14292 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; in rack_chg_query()
14293 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; in rack_chg_query()
14294 reqr->sendmap_dupacks = rsm->r_dupack; in rack_chg_query()
14296 rsm->r_start, in rack_chg_query()
14297 rsm->r_end, in rack_chg_query()
14298 rsm->r_flags); in rack_chg_query()
14302 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14306 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14307 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14308 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14310 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14311 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14314 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14315 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14316 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14321 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14322 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14324 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14325 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14326 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14327 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14328 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14330 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14331 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14332 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14333 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14334 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14335 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14337 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14338 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14339 if (rack->rc_in_persist) { in rack_chg_query()
14340 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14341 reqr->rack_in_persist = 1; in rack_chg_query()
14343 reqr->rack_time_went_idle = 0; in rack_chg_query()
14344 reqr->rack_in_persist = 0; in rack_chg_query()
14346 if (rack->r_wanted_output) in rack_chg_query()
14347 reqr->rack_wanted_output = 1; in rack_chg_query()
14349 reqr->rack_wanted_output = 0; in rack_chg_query()
14353 return (-EINVAL); in rack_chg_query()
14372 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14374 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14375 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14377 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14378 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14379 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_switch_failed()
14380 if (tp->t_in_hpts > IHPTS_NONE) { in rack_switch_failed()
14385 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14386 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14387 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14392 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14393 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14394 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14413 * to not refer to tp->t_fb_ptr. This has the old rack in rack_init_outstanding()
14419 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init_outstanding()
14427 rsm->r_no_rtt_allowed = 1; in rack_init_outstanding()
14428 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14429 rsm->r_rtr_cnt = 1; in rack_init_outstanding()
14430 rsm->r_rtr_bytes = 0; in rack_init_outstanding()
14431 if (tp->t_flags & TF_SENTFIN) in rack_init_outstanding()
14432 rsm->r_flags |= RACK_HAS_FIN; in rack_init_outstanding()
14433 rsm->r_end = tp->snd_max; in rack_init_outstanding()
14434 if (tp->snd_una == tp->iss) { in rack_init_outstanding()
14436 rsm->r_flags |= RACK_HAS_SYN; in rack_init_outstanding()
14437 rsm->r_start = tp->iss; in rack_init_outstanding()
14438 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); in rack_init_outstanding()
14440 rsm->r_start = tp->snd_una; in rack_init_outstanding()
14441 rsm->r_dupack = 0; in rack_init_outstanding()
14442 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14443 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14444 if (rsm->m) { in rack_init_outstanding()
14445 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14446 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14448 rsm->orig_m_len = 0; in rack_init_outstanding()
14449 rsm->orig_t_space = 0; in rack_init_outstanding()
14453 * This can happen if we have a stand-alone FIN or in rack_init_outstanding()
14456 rsm->m = NULL; in rack_init_outstanding()
14457 rsm->orig_m_len = 0; in rack_init_outstanding()
14458 rsm->orig_t_space = 0; in rack_init_outstanding()
14459 rsm->soff = 0; in rack_init_outstanding()
14462 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14467 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14469 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14470 rsm->r_in_tmap = 1; in rack_init_outstanding()
14477 at = tp->snd_una; in rack_init_outstanding()
14478 while (at != tp->snd_max) { in rack_init_outstanding()
14482 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) in rack_init_outstanding()
14494 rsm->r_dupack = qr.sendmap_dupacks; in rack_init_outstanding()
14495 rsm->r_start = qr.sendmap_start; in rack_init_outstanding()
14496 rsm->r_end = qr.sendmap_end; in rack_init_outstanding()
14498 rsm->r_fas = qr.sendmap_end; in rack_init_outstanding()
14500 rsm->r_fas = rsm->r_start - tp->snd_una; in rack_init_outstanding()
14506 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; in rack_init_outstanding()
14507 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; in rack_init_outstanding()
14508 rsm->r_rtr_cnt = qr.sendmap_send_cnt; in rack_init_outstanding()
14509 rsm->r_ack_arrival = qr.sendmap_ack_arrival; in rack_init_outstanding()
14510 for (i=0 ; i<rsm->r_rtr_cnt; i++) in rack_init_outstanding()
14511 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; in rack_init_outstanding()
14512 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14513 (rsm->r_start - tp->snd_una), &rsm->soff); in rack_init_outstanding()
14514 if (rsm->m) { in rack_init_outstanding()
14515 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14516 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14518 rsm->orig_m_len = 0; in rack_init_outstanding()
14519 rsm->orig_t_space = 0; in rack_init_outstanding()
14522 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14527 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14529 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_init_outstanding()
14530 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14531 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > in rack_init_outstanding()
14532 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { in rack_init_outstanding()
14539 rsm->r_in_tmap = 1; in rack_init_outstanding()
14544 if (rsm->r_in_tmap == 0) { in rack_init_outstanding()
14548 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14549 rsm->r_in_tmap = 1; in rack_init_outstanding()
14552 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14553 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14554 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14558 rsm->r_start, in rack_init_outstanding()
14559 rsm->r_end, in rack_init_outstanding()
14560 rsm->r_flags); in rack_init_outstanding()
14581 * will be tp->t_fb_ptr. If its a stack switch that in rack_init()
14585 if (ptr == &tp->t_fb_ptr) in rack_init()
14601 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14602 if (rack->r_ctl.tqh == NULL) { in rack_init()
14606 tqhash_init(rack->r_ctl.tqh); in rack_init()
14607 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14608 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14609 rack->rc_tp = tp; in rack_init()
14610 rack->rc_inp = inp; in rack_init()
14612 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14629 rack->rc_new_rnd_needed = 1; in rack_init()
14630 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14633 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; in rack_init()
14634 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14635 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14636 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14638 rack->rc_pace_to_cwnd = 1; in rack_init()
14640 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14642 rack->use_rack_rr = 1; in rack_init()
14644 rack->rc_pace_dnd = 1; in rack_init()
14647 tp->t_delayed_ack = 1; in rack_init()
14649 tp->t_delayed_ack = 0; in rack_init()
14652 tp->t_flags2 |= TF2_TCP_ACCOUNTING; in rack_init()
14655 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14656 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14657 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14658 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14659 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14662 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; in rack_init()
14664 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14665 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14667 rack->rack_enable_scwnd = 1; in rack_init()
14668 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14669 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14670 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14671 rack->rc_force_max_seg = 0; in rack_init()
14672 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14673 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; in rack_init()
14674 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; in rack_init()
14676 rack->rack_hibeta = 1; in rack_init()
14679 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; in rack_init()
14680 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14683 rack->r_ctl.saved_hibeta = 50; in rack_init()
14688 * will never have all 1's in ms :-) in rack_init()
14690 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14691 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14692 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14693 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14694 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14695 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14696 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14697 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14699 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14700 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14702 rack->r_use_cmp_ack = 1; in rack_init()
14704 rack->rack_no_prr = 1; in rack_init()
14706 rack->rc_gp_no_rec_chg = 1; in rack_init()
14708 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14709 rack->rc_always_pace = 1; in rack_init()
14710 if (rack->rack_hibeta) in rack_init()
14713 rack->rc_always_pace = 0; in rack_init()
14714 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14715 rack->r_mbuf_queue = 1; in rack_init()
14717 rack->r_mbuf_queue = 0; in rack_init()
14720 rack->r_limit_scw = 1; in rack_init()
14722 rack->r_limit_scw = 0; in rack_init()
14724 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14726 rack->r_use_hpts_min = 1; in rack_init()
14727 if (tp->snd_una != 0) { in rack_init()
14728 rack->rc_sendvars_notset = 0; in rack_init()
14736 * syn-cache. This means none of the in rack_init()
14740 rack->rc_sendvars_notset = 1; in rack_init()
14743 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14744 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14745 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14746 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14747 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14748 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; in rack_init()
14749 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14751 rack->r_up_only = 1; in rack_init()
14754 rack->rc_gp_dyn_mul = 1; in rack_init()
14756 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14758 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14759 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14761 rack->rc_skip_timely = 1; in rack_init()
14763 if (rack->rc_skip_timely) { in rack_init()
14764 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14765 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14766 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14768 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14769 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14770 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14772 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14774 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_init()
14775 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14776 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14777 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14778 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14780 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14782 rack->r_ctl.gate_to_fs = 1; in rack_init()
14783 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14789 rack->rc_rack_tmr_std_based = 1; in rack_init()
14793 rack->rc_rack_use_dsack = 1; in rack_init()
14797 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14799 rack->r_ctl.req_measurements = 1; in rack_init()
14801 rack->rack_hdw_pace_ena = 1; in rack_init()
14803 rack->r_rack_hw_rate_caps = 1; in rack_init()
14805 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14814 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_init()
14816 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_init()
14818 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_init()
14826 tp->t_flags &= ~TF_GPUTINPROG; in rack_init()
14827 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14828 (tp->t_state != TCPS_TIME_WAIT)) { in rack_init()
14833 if (SEQ_GT(tp->snd_max, tp->iss)) in rack_init()
14834 snt = tp->snd_max - tp->iss; in rack_init()
14845 if (tp->snd_cwnd < iwin) in rack_init()
14846 tp->snd_cwnd = iwin; in rack_init()
14867 tp->snd_ssthresh = 0xffffffff; in rack_init()
14878 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14879 (tp->t_state != TCPS_TIME_WAIT) && in rack_init()
14881 (tp->snd_una != tp->snd_max)) { in rack_init()
14890 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14891 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_init()
14893 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_init()
14894 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14895 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_init()
14901 * they are non-zero. They are kept with a 5 in rack_init()
14906 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14907 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { in rack_init()
14909 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init()
14919 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14921 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14922 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14923 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14924 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14925 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14926 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14927 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14928 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14929 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14930 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14931 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14932 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14934 rack->rc_tlp_in_progress = 1; in rack_init()
14935 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14937 rack->rc_tlp_in_progress = 0; in rack_init()
14938 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14941 rack->rc_srtt_measure_made = 1; in rack_init()
14943 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14945 if (rack->r_ctl.rc_scw) { in rack_init()
14946 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14947 rack->rack_scwnd_is_idle = 1; in rack_init()
14950 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14951 rack->probe_not_answered = 0; in rack_init()
14952 rack->forced_ack = 0; in rack_init()
14953 tp->t_rxtshift = 0; in rack_init()
14954 rack->rc_in_persist = 1; in rack_init()
14955 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_init()
14956 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14959 rack->r_wanted_output = 1; in rack_init()
14968 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14971 * non-zero return means we have a timer('s) in rack_init()
14972 * to start. Zero means no timer (no keepalive in rack_init()
14977 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14979 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14981 tov = qr.timer_pacing_to - us_cts; in rack_init()
14986 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14989 tov = qr.timer_timer_exp - us_cts; in rack_init()
14995 rack->r_ctl.rc_hpts_flags, in rack_init()
14996 rack->r_ctl.rc_last_output_to, in rack_init()
14997 rack->r_ctl.rc_timer_exp); in rack_init()
15003 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
15007 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
15016 if ((tp->t_state == TCPS_CLOSED) || in rack_handoff_ok()
15017 (tp->t_state == TCPS_LISTEN)) { in rack_handoff_ok()
15021 if ((tp->t_state == TCPS_SYN_SENT) || in rack_handoff_ok()
15022 (tp->t_state == TCPS_SYN_RECEIVED)) { in rack_handoff_ok()
15029 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { in rack_handoff_ok()
15042 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ in rack_handoff_ok()
15056 if (tp->t_fb_ptr) { in rack_fini()
15062 tp->t_flags &= ~TF_FORCEDATA; in rack_fini()
15063 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
15072 if (rack->r_ctl.rc_scw) { in rack_fini()
15075 if (rack->r_limit_scw) in rack_fini()
15076 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
15079 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
15080 rack->r_ctl.rc_scw_index, in rack_fini()
15082 rack->r_ctl.rc_scw = NULL; in rack_fini()
15085 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
15086 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
15087 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
15088 rack->r_ctl.fsb.th = NULL; in rack_fini()
15090 if (rack->rc_always_pace == 1) { in rack_fini()
15094 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
15097 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
15098 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
15102 if (rack->r_ctl.crte != NULL) { in rack_fini()
15103 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
15104 rack->rack_hdrw_pacing = 0; in rack_fini()
15105 rack->r_ctl.crte = NULL; in rack_fini()
15112 * get each one and free it like a cum-ack would and in rack_fini()
15115 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15117 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
15118 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15120 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15122 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15124 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
15125 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15126 rack->rc_free_cnt--; in rack_fini()
15129 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15131 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
15132 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
15133 rack->r_ctl.pcm_s = NULL; in rack_fini()
15134 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
15135 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
15137 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
15144 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
15145 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
15147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
15148 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15150 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15157 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
15160 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
15161 rack->rc_free_cnt = 0; in rack_fini()
15162 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
15163 rack->r_ctl.tqh = NULL; in rack_fini()
15164 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); in rack_fini()
15165 tp->t_fb_ptr = NULL; in rack_fini()
15168 tp->snd_nxt = tp->snd_max; in rack_fini()
15174 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
15175 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
15177 switch (tp->t_state) { in rack_set_state()
15179 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
15180 rack->r_substate = rack_do_syn_sent; in rack_set_state()
15183 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
15184 rack->r_substate = rack_do_syn_recv; in rack_set_state()
15188 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
15189 rack->r_substate = rack_do_established; in rack_set_state()
15192 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
15193 rack->r_substate = rack_do_close_wait; in rack_set_state()
15197 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
15198 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
15202 rack->r_state = TCPS_CLOSING; in rack_set_state()
15203 rack->r_substate = rack_do_closing; in rack_set_state()
15207 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
15208 rack->r_substate = rack_do_lastack; in rack_set_state()
15211 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
15212 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
15220 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
15221 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15231 * hpts was running. Now a timer is up as well, is in rack_timer_audit()
15232 * it the right timer? in rack_timer_audit()
15237 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15238 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15240 * Ok we probably need some timer up, but no in rack_timer_audit()
15244 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15248 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15250 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15251 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && in rack_timer_audit()
15258 if (tp->t_flags & TF_DELACK) { in rack_timer_audit()
15262 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { in rack_timer_audit()
15265 * of nothing outstanding and the RXT up (and the hptsi timer). in rack_timer_audit()
15269 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15270 (tp->t_state <= TCPS_CLOSING)) && in rack_timer_audit()
15272 (tp->snd_max == tp->snd_una)) { in rack_timer_audit()
15277 if (SEQ_GT(tp->snd_max, tp->snd_una) && in rack_timer_audit()
15289 * before the rtx/tlp/rack timer were going to in rack_timer_audit()
15290 * expire, then that would be the timer in control. in rack_timer_audit()
15297 * Ok the timer originally started is not what we want now. in rack_timer_audit()
15301 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15302 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15306 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15307 rack->r_early = 1; in rack_timer_audit()
15308 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15310 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15312 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15314 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15322 if ((SEQ_LT(tp->snd_wl1, seq) || in rack_do_win_updates()
15323 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || in rack_do_win_updates()
15324 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { in rack_do_win_updates()
15326 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) in rack_do_win_updates()
15328 tp->snd_wnd = tiwin; in rack_do_win_updates()
15330 tp->snd_wl1 = seq; in rack_do_win_updates()
15331 tp->snd_wl2 = ack; in rack_do_win_updates()
15332 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15333 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15334 rack->r_wanted_output = 1; in rack_do_win_updates()
15335 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { in rack_do_win_updates()
15336 tp->snd_wnd = tiwin; in rack_do_win_updates()
15338 tp->snd_wl1 = seq; in rack_do_win_updates()
15339 tp->snd_wl2 = ack; in rack_do_win_updates()
15344 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15345 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15347 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15348 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15349 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15353 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15354 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15355 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_win_updates()
15356 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15357 sbavail(&tptosocket(tp)->so_snd) && in rack_do_win_updates()
15358 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_do_win_updates()
15365 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15373 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15386 if (SEQ_GT(ae->ack, tp->snd_una)) { in rack_log_input_packet()
15387 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); in rack_log_input_packet()
15389 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); in rack_log_input_packet()
15393 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15394 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15395 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15398 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15400 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15401 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15402 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15403 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15404 log.u_bbr.pkts_out = tp->t_maxseg; in rack_log_input_packet()
15405 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15407 log.u_bbr.lost = ae->flags; in rack_log_input_packet()
15410 if (ae->flags & TSTMP_HDWR) { in rack_log_input_packet()
15413 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15414 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15418 } else if (ae->flags & TSTMP_LRO) { in rack_log_input_packet()
15421 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15422 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15429 log.u_bbr.delRate = ae->timestamp; in rack_log_input_packet()
15431 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_log_input_packet()
15433 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_log_input_packet()
15435 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_log_input_packet()
15439 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_log_input_packet()
15441 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_log_input_packet()
15442 log.u_bbr.rttProp = tcp_req->timestamp; in rack_log_input_packet()
15443 log.u_bbr.cur_del_rate = tcp_req->start; in rack_log_input_packet()
15444 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_log_input_packet()
15448 log.u_bbr.bw_inuse = tcp_req->end; in rack_log_input_packet()
15450 log.u_bbr.flex6 = tcp_req->start_seq; in rack_log_input_packet()
15451 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_log_input_packet()
15453 log.u_bbr.epoch = tcp_req->end_seq; in rack_log_input_packet()
15459 th->th_seq = ae->seq; in rack_log_input_packet()
15460 th->th_ack = ae->ack; in rack_log_input_packet()
15461 th->th_win = ae->win; in rack_log_input_packet()
15463 th->th_sport = inp->inp_fport; in rack_log_input_packet()
15464 th->th_dport = inp->inp_lport; in rack_log_input_packet()
15465 tcp_set_flags(th, ae->flags); in rack_log_input_packet()
15467 if (ae->flags & HAS_TSTMP) { in rack_log_input_packet()
15471 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); in rack_log_input_packet()
15481 val = htonl(ae->ts_value); in rack_log_input_packet()
15484 val = htonl(ae->ts_echo); in rack_log_input_packet()
15488 th->th_off = (sizeof(struct tcphdr) >> 2); in rack_log_input_packet()
15497 * snd_una was advanced and then un-advancing it so that the in rack_log_input_packet()
15500 if (tp->snd_una != high_seq) { in rack_log_input_packet()
15501 orig_snd_una = tp->snd_una; in rack_log_input_packet()
15502 tp->snd_una = high_seq; in rack_log_input_packet()
15507 &tptosocket(tp)->so_rcv, in rack_log_input_packet()
15508 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, in rack_log_input_packet()
15511 tp->snd_una = orig_snd_una; in rack_log_input_packet()
15522 * A persist or keep-alive was forced out, update our in rack_handle_probe_response()
15523 * min rtt time. Note now worry about lost responses. in rack_handle_probe_response()
15524 * When a subsequent keep-alive or persist times out in rack_handle_probe_response()
15528 * the rtt but with reduced confidence (0). Or we just in rack_handle_probe_response()
15529 * plain don't apply the rtt estimate. Having data flow in rack_handle_probe_response()
15530 * will clear the probe_not_answered flag i.e. cum-ack in rack_handle_probe_response()
15534 rack->forced_ack = 0; in rack_handle_probe_response()
15535 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15536 if ((rack->rc_in_persist && in rack_handle_probe_response()
15537 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15538 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15540 * In persists only apply the RTT update if this is in rack_handle_probe_response()
15550 * us that if we do calculate an RTT it is longer not in rack_handle_probe_response()
15553 if (rack->rc_in_persist) in rack_handle_probe_response()
15555 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15558 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15580 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15581 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15582 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15590 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15597 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15603 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15604 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15606 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15608 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15609 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15624 rack->r_ctl.current_round++; in rack_new_round_setup()
15626 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15627 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15628 (rack->pcm_needed == 0) && in rack_new_round_setup()
15629 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15637 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15638 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15639 rack->pcm_needed = 1; in rack_new_round_setup()
15640 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15642 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15645 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_new_round_setup()
15647 if (CC_ALGO(tp)->newround != NULL) { in rack_new_round_setup()
15648 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15653 * that we are not just pushing on slow-start and just in rack_new_round_setup()
15655 * boost in b/w during the inital slow-start. in rack_new_round_setup()
15657 if (rack->dgp_on && in rack_new_round_setup()
15658 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15659 (tp->snd_cwnd < tp->snd_ssthresh) && in rack_new_round_setup()
15660 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15661 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15662 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15672 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15674 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15680 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15681 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15682 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15683 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15684 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15689 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15690 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15691 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15693 tp->snd_ssthresh = tp->snd_cwnd - 1; in rack_new_round_setup()
15695 rack->r_fast_output = 0; in rack_new_round_setup()
15706 * A) It moves the cum-ack forward in rack_do_compressed_ack_processing()
15707 * B) It is behind the cum-ack. in rack_do_compressed_ack_processing()
15708 * C) It is a window-update ack. in rack_do_compressed_ack_processing()
15709 * D) It is a dup-ack. in rack_do_compressed_ack_processing()
15711 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES in rack_do_compressed_ack_processing()
15736 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15737 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15738 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15741 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15743 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15744 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_compressed_ack_processing()
15753 bytes = tp->gput_ack - tp->gput_seq; in rack_do_compressed_ack_processing()
15754 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_compressed_ack_processing()
15755 bytes += tp->gput_seq - tp->snd_una; in rack_do_compressed_ack_processing()
15756 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_compressed_ack_processing()
15762 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15763 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15764 tp->gput_seq, in rack_do_compressed_ack_processing()
15766 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_compressed_ack_processing()
15770 to->to_flags = 0; in rack_do_compressed_ack_processing()
15771 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), in rack_do_compressed_ack_processing()
15772 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); in rack_do_compressed_ack_processing()
15773 cnt = m->m_len / sizeof(struct tcp_ackent); in rack_do_compressed_ack_processing()
15775 high_seq = tp->snd_una; in rack_do_compressed_ack_processing()
15776 the_win = tp->snd_wnd; in rack_do_compressed_ack_processing()
15777 win_seq = tp->snd_wl1; in rack_do_compressed_ack_processing()
15778 win_upd_ack = tp->snd_wl2; in rack_do_compressed_ack_processing()
15781 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15783 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15784 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15785 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15795 if (ae->flags & TH_FIN) in rack_do_compressed_ack_processing()
15804 tiwin = ae->win << tp->snd_scale; in rack_do_compressed_ack_processing()
15805 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15806 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15808 if (SEQ_LT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15810 ae->ack_val_set = ACK_BEHIND; in rack_do_compressed_ack_processing()
15811 } else if (SEQ_GT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15813 ae->ack_val_set = ACK_CUMACK; in rack_do_compressed_ack_processing()
15814 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15816 ae->ack_val_set = ACK_DUPACK; in rack_do_compressed_ack_processing()
15819 ae->ack_val_set = ACK_RWND; in rack_do_compressed_ack_processing()
15822 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15824 if (ae->flags & HAS_TSTMP) { in rack_do_compressed_ack_processing()
15826 to->to_flags = TOF_TS; in rack_do_compressed_ack_processing()
15827 ae->ts_echo -= tp->ts_offset; in rack_do_compressed_ack_processing()
15828 to->to_tsecr = ae->ts_echo; in rack_do_compressed_ack_processing()
15829 to->to_tsval = ae->ts_value; in rack_do_compressed_ack_processing()
15832 * non RFC1323 RTT calculation. Normalize timestamp if syncookies in rack_do_compressed_ack_processing()
15835 if (TSTMP_GT(ae->ts_echo, ms_cts)) in rack_do_compressed_ack_processing()
15836 to->to_tsecr = 0; in rack_do_compressed_ack_processing()
15837 if (tp->ts_recent && in rack_do_compressed_ack_processing()
15838 TSTMP_LT(ae->ts_value, tp->ts_recent)) { in rack_do_compressed_ack_processing()
15839 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { in rack_do_compressed_ack_processing()
15843 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15844 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15851 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && in rack_do_compressed_ack_processing()
15852 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { in rack_do_compressed_ack_processing()
15853 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_compressed_ack_processing()
15854 tp->ts_recent = ae->ts_value; in rack_do_compressed_ack_processing()
15858 to->to_flags = 0; in rack_do_compressed_ack_processing()
15861 if (tp->t_idle_reduce && in rack_do_compressed_ack_processing()
15862 (tp->snd_max == tp->snd_una) && in rack_do_compressed_ack_processing()
15863 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_compressed_ack_processing()
15867 tp->t_rcvtime = ticks; in rack_do_compressed_ack_processing()
15869 if (tcp_ecn_input_segment(tp, ae->flags, 0, in rack_do_compressed_ack_processing()
15870 tcp_packets_this_ack(tp, ae->ack), in rack_do_compressed_ack_processing()
15871 ae->codepoint)) in rack_do_compressed_ack_processing()
15872 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); in rack_do_compressed_ack_processing()
15875 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15876 tp->tcp_cnt_counters[ae->ack_val_set]++; in rack_do_compressed_ack_processing()
15883 * The non-compressed path through the code has this in rack_do_compressed_ack_processing()
15890 if (ae->ack_val_set == ACK_BEHIND) { in rack_do_compressed_ack_processing()
15893 * or it could be a keep-alive or persists in rack_do_compressed_ack_processing()
15895 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { in rack_do_compressed_ack_processing()
15896 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15897 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15898 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15900 } else if (ae->ack_val_set == ACK_DUPACK) { in rack_do_compressed_ack_processing()
15902 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15903 } else if (ae->ack_val_set == ACK_RWND) { in rack_do_compressed_ack_processing()
15905 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15906 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15907 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15908 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15909 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15911 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15913 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15915 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15920 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15921 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15926 if (SEQ_GT(ae->ack, tp->snd_max)) { in rack_do_compressed_ack_processing()
15931 if ((tp->t_flags & TF_ACKNOW) == 0) { in rack_do_compressed_ack_processing()
15933 if (tp->t_flags && TF_ACKNOW) in rack_do_compressed_ack_processing()
15934 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15939 if (tiwin != tp->snd_wnd) { in rack_do_compressed_ack_processing()
15940 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15941 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15947 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15948 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); in rack_do_compressed_ack_processing()
15951 high_seq = ae->ack; in rack_do_compressed_ack_processing()
15953 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15954 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15955 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15956 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15957 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15959 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15961 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15962 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15966 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15968 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15970 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15976 /* And lets be sure to commit the rtt measurements for this ack */ in rack_do_compressed_ack_processing()
15981 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15982 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15983 if (ae->ack_val_set == ACK_CUMACK) in rack_do_compressed_ack_processing()
15984 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15993 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { in rack_do_compressed_ack_processing()
15995 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15996 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15998 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15999 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
16000 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
16001 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
16014 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
16015 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
16025 * since cum-ack moved forward. in rack_do_compressed_ack_processing()
16027 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
16028 if (tp->t_flags & TF_NEEDSYN) { in rack_do_compressed_ack_processing()
16030 * T/TCP: Connection was half-synchronized, and our SYN has in rack_do_compressed_ack_processing()
16032 * to non-starred state, increment snd_una for ACK of SYN, in rack_do_compressed_ack_processing()
16035 tp->t_flags &= ~TF_NEEDSYN; in rack_do_compressed_ack_processing()
16036 tp->snd_una++; in rack_do_compressed_ack_processing()
16037 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
16039 if (acked > sbavail(&so->so_snd)) in rack_do_compressed_ack_processing()
16040 acked_amount = sbavail(&so->so_snd); in rack_do_compressed_ack_processing()
16041 if (IN_FASTRECOVERY(tp->t_flags) && in rack_do_compressed_ack_processing()
16042 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
16044 if (IN_RECOVERY(tp->t_flags)) { in rack_do_compressed_ack_processing()
16045 if (SEQ_LT(high_seq, tp->snd_recover) && in rack_do_compressed_ack_processing()
16046 (SEQ_LT(high_seq, tp->snd_max))) { in rack_do_compressed_ack_processing()
16052 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
16053 SEQ_GEQ(high_seq, tp->snd_recover)) { in rack_do_compressed_ack_processing()
16056 * and never re-entered recovery. The timeout(s) in rack_do_compressed_ack_processing()
16060 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
16062 /* Handle the rack-log-ack part (sendmap) */ in rack_do_compressed_ack_processing()
16063 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16065 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16066 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16079 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16082 if ((tp->t_flags & TF_PREVVALID) && in rack_do_compressed_ack_processing()
16083 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_do_compressed_ack_processing()
16084 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16085 if (tp->t_rxtshift == 1 && in rack_do_compressed_ack_processing()
16086 (int)(ticks - tp->t_badrxtwin) < 0) in rack_do_compressed_ack_processing()
16102 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
16104 p_cwnd += tp->snd_cwnd; in rack_do_compressed_ack_processing()
16107 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { in rack_do_compressed_ack_processing()
16108 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_do_compressed_ack_processing()
16109 tp->snd_cwnd = p_cwnd; in rack_do_compressed_ack_processing()
16112 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_do_compressed_ack_processing()
16113 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16115 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
16117 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
16122 tp->t_acktime = ticks; in rack_do_compressed_ack_processing()
16123 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
16126 tp->t_rxtshift = 0; in rack_do_compressed_ack_processing()
16127 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_do_compressed_ack_processing()
16128 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
16129 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
16130 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
16132 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_do_compressed_ack_processing()
16133 tp->snd_recover = tp->snd_una; in rack_do_compressed_ack_processing()
16134 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) in rack_do_compressed_ack_processing()
16135 tp->snd_nxt = tp->snd_max; in rack_do_compressed_ack_processing()
16137 * If the RXT timer is running we want to in rack_do_compressed_ack_processing()
16140 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
16141 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16142 tp->snd_wl2 = high_seq; in rack_do_compressed_ack_processing()
16143 tp->t_dupacks = 0; in rack_do_compressed_ack_processing()
16145 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
16146 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
16147 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
16148 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
16152 if (tp->snd_una == tp->snd_max) { in rack_do_compressed_ack_processing()
16153 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16154 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
16155 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
16156 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
16157 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
16158 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
16159 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
16161 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_do_compressed_ack_processing()
16162 tp->t_acktime = 0; in rack_do_compressed_ack_processing()
16164 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16165 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16166 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
16167 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16168 (sbavail(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16169 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_do_compressed_ack_processing()
16175 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16176 /* tcp_close will kill the inp pre-log the Reset */ in rack_do_compressed_ack_processing()
16181 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16182 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16183 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16196 * We would normally do drop-with-reset which would in rack_do_compressed_ack_processing()
16207 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16208 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16209 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16212 * proceed. Starting the timer is contrary to the in rack_do_compressed_ack_processing()
16217 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16226 * We don't change to fin-wait-2 if we have our fin acked in rack_do_compressed_ack_processing()
16234 if (sbavail(&so->so_snd)) { in rack_do_compressed_ack_processing()
16235 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16237 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_compressed_ack_processing()
16246 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16247 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16248 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16259 switch(tp->t_state) { in rack_do_compressed_ack_processing()
16264 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16265 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16266 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16279 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16280 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16281 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16294 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16295 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16296 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16300 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16313 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16322 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16323 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16324 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16331 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16332 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16349 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16350 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16351 (tp->t_flags & TF_ACKNOW )) { in rack_do_compressed_ack_processing()
16361 if (tp->t_flags2 & TF2_HPTS_CALLS) in rack_do_compressed_ack_processing()
16362 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_compressed_ack_processing()
16367 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16389 * cts - is the current time from tv (caller gets ts) in microseconds. in rack_do_segment_nounlock()
16390 * ms_cts - is the current time from tv in milliseconds. in rack_do_segment_nounlock()
16391 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. in rack_do_segment_nounlock()
16414 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16415 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16426 * can happen in the non-LRO path where we are pacing and in rack_do_segment_nounlock()
16428 * anything becase a pacing timer is running. in rack_do_segment_nounlock()
16431 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16436 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16440 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16441 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16442 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16443 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16450 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16451 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16463 optlen = (th->th_off << 2) - sizeof(struct tcphdr); in rack_do_segment_nounlock()
16489 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16495 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16500 nsegs = m->m_pkthdr.lro_nsegs; in rack_do_segment_nounlock()
16507 if ((m->m_flags & M_TSTMP) || in rack_do_segment_nounlock()
16508 (m->m_flags & M_TSTMP_LRO)) { in rack_do_segment_nounlock()
16510 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16511 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16513 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16517 * Unscale the window into a 32-bit value. For the SYN_SENT state in rack_do_segment_nounlock()
16520 tiwin = th->th_win << tp->snd_scale; in rack_do_segment_nounlock()
16549 (th->th_off << 2) - sizeof(struct tcphdr), in rack_do_segment_nounlock()
16551 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", in rack_do_segment_nounlock()
16553 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", in rack_do_segment_nounlock()
16555 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { in rack_do_segment_nounlock()
16563 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_segment_nounlock()
16564 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_segment_nounlock()
16573 bytes = tp->gput_ack - tp->gput_seq; in rack_do_segment_nounlock()
16574 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_segment_nounlock()
16575 bytes += tp->gput_seq - tp->snd_una; in rack_do_segment_nounlock()
16576 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_segment_nounlock()
16582 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16583 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16584 tp->gput_seq, in rack_do_segment_nounlock()
16586 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_segment_nounlock()
16589 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16595 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_segment_nounlock()
16596 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); in rack_do_segment_nounlock()
16598 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); in rack_do_segment_nounlock()
16602 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16603 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16604 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16607 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16609 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16610 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16611 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16612 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16613 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16614 log.u_bbr.flex3 = m->m_flags; in rack_do_segment_nounlock()
16615 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16622 if (m->m_flags & M_TSTMP) { in rack_do_segment_nounlock()
16628 } else if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment_nounlock()
16637 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; in rack_do_segment_nounlock()
16639 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_do_segment_nounlock()
16641 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_do_segment_nounlock()
16643 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_do_segment_nounlock()
16647 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16649 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16650 log.u_bbr.rttProp = tcp_req->timestamp; in rack_do_segment_nounlock()
16651 log.u_bbr.cur_del_rate = tcp_req->start; in rack_do_segment_nounlock()
16652 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_do_segment_nounlock()
16656 log.u_bbr.bw_inuse = tcp_req->end; in rack_do_segment_nounlock()
16658 log.u_bbr.flex6 = tcp_req->start_seq; in rack_do_segment_nounlock()
16659 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_do_segment_nounlock()
16661 log.u_bbr.epoch = tcp_req->end_seq; in rack_do_segment_nounlock()
16665 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, in rack_do_segment_nounlock()
16670 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16679 * If a segment with the ACK-bit set arrives in the SYN-SENT state in rack_do_segment_nounlock()
16682 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && in rack_do_segment_nounlock()
16683 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_segment_nounlock()
16698 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && in rack_do_segment_nounlock()
16706 * Segment received on connection. Reset idle time and keep-alive in rack_do_segment_nounlock()
16707 * timer. XXX: This should be done after segment validation to in rack_do_segment_nounlock()
16710 if (tp->t_idle_reduce && in rack_do_segment_nounlock()
16711 (tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16712 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_segment_nounlock()
16716 tp->t_rcvtime = ticks; in rack_do_segment_nounlock()
16718 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); in rack_do_segment_nounlock()
16720 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16721 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16727 tcp_packets_this_ack(tp, th->th_ack), in rack_do_segment_nounlock()
16729 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); in rack_do_segment_nounlock()
16733 * non RFC1323 RTT calculation. Normalize timestamp if syncookies in rack_do_segment_nounlock()
16737 to.to_tsecr -= tp->ts_offset; in rack_do_segment_nounlock()
16741 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16743 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16744 uint32_t rtt = 0; in rack_do_segment_nounlock() local
16748 * data to do an RTT. We set a flag when we first in rack_do_segment_nounlock()
16750 * and have an RTT to share. We log it as a conf in rack_do_segment_nounlock()
16754 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16755 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16756 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16757 /* Submit and commit the timer */ in rack_do_segment_nounlock()
16758 if (rtt > 0) { in rack_do_segment_nounlock()
16759 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); in rack_do_segment_nounlock()
16767 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16769 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16770 ("%s: rack->rc_inp unexpectedly NULL", __func__)); in rack_do_segment_nounlock()
16771 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16772 rack->rc_inp = inp; in rack_do_segment_nounlock()
16782 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { in rack_do_segment_nounlock()
16786 (tp->t_flags & TF_REQ_SCALE)) { in rack_do_segment_nounlock()
16787 tp->t_flags |= TF_RCVD_SCALE; in rack_do_segment_nounlock()
16788 tp->snd_scale = to.to_wscale; in rack_do_segment_nounlock()
16790 tp->t_flags &= ~TF_REQ_SCALE; in rack_do_segment_nounlock()
16795 tp->snd_wnd = th->th_win; in rack_do_segment_nounlock()
16798 (tp->t_flags & TF_REQ_TSTMP)) { in rack_do_segment_nounlock()
16799 tp->t_flags |= TF_RCVD_TSTMP; in rack_do_segment_nounlock()
16800 tp->ts_recent = to.to_tsval; in rack_do_segment_nounlock()
16801 tp->ts_recent_age = cts; in rack_do_segment_nounlock()
16803 tp->t_flags &= ~TF_REQ_TSTMP; in rack_do_segment_nounlock()
16807 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_do_segment_nounlock()
16809 tp->t_flags &= ~TF_SACK_PERMIT; in rack_do_segment_nounlock()
16810 if (tp->t_flags & TF_FASTOPEN) { in rack_do_segment_nounlock()
16817 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_do_segment_nounlock()
16830 * TF_SACK_PERMIT is set and the sack-not-required is clear. in rack_do_segment_nounlock()
16831 * The code now does do dup-ack counting so if you don't in rack_do_segment_nounlock()
16837 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_do_segment_nounlock()
16839 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, in rack_do_segment_nounlock()
16847 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16851 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16852 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16853 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16854 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16859 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16865 * always. All other times (timers etc) we must have a rack-state in rack_do_segment_nounlock()
16868 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16869 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16871 if (SEQ_GT(th->th_ack, tp->snd_una) && in rack_do_segment_nounlock()
16872 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16874 prev_state = rack->r_state; in rack_do_segment_nounlock()
16876 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && in rack_do_segment_nounlock()
16877 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || in rack_do_segment_nounlock()
16878 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { in rack_do_segment_nounlock()
16880 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16882 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16891 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16892 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16893 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16894 rack->in_probe_rtt && in rack_do_segment_nounlock()
16895 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16902 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16904 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { in rack_do_segment_nounlock()
16906 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16917 * use of 0xf here since we only have 11 counter (0 - 0xa) and in rack_do_segment_nounlock()
16925 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_segment_nounlock()
16926 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); in rack_do_segment_nounlock()
16931 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16932 (tp->t_flags & TF_ACKNOW) || in rack_do_segment_nounlock()
16933 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16946 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { in rack_do_segment_nounlock()
16950 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16952 * We are not in hpts and we had a pacing timer up. Use in rack_do_segment_nounlock()
16953 * the remaining time (slot_remaining) to restart the timer. in rack_do_segment_nounlock()
16960 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) in rack_do_segment_nounlock()
16961 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_segment_nounlock()
16972 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16973 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16979 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16982 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16983 (SEQ_GT(tp->snd_max, tp->snd_una) || in rack_do_segment_nounlock()
16984 (tp->t_flags & TF_DELACK) || in rack_do_segment_nounlock()
16985 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16986 (tp->t_state <= TCPS_CLOSING)))) { in rack_do_segment_nounlock()
16987 /* We could not send (probably in the hpts but stopped the timer earlier)? */ in rack_do_segment_nounlock()
16988 if ((tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16989 ((tp->t_flags & TF_DELACK) == 0) && in rack_do_segment_nounlock()
16990 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16991 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16997 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16999 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
17000 rack->r_early = 1; in rack_do_segment_nounlock()
17001 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
17004 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
17020 /* Do we have the correct timer running? */ in rack_do_segment_nounlock()
17021 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
17027 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
17043 if (!STAILQ_EMPTY(&tp->t_inqueue)) { in rack_do_segment()
17049 if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment()
17068 /* Return the next guy to be re-transmitted */ in tcp_rack_output()
17069 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
17072 if (tp->t_flags & TF_SENTFIN) { in tcp_rack_output()
17077 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
17078 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
17081 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { in tcp_rack_output()
17089 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
17090 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17093 * retransmit (no rack timer would be started). in tcp_rack_output()
17097 if (rsm->r_flags & RACK_ACKED) { in tcp_rack_output()
17100 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && in tcp_rack_output()
17101 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17106 idx = rsm->r_rtr_cnt - 1; in tcp_rack_output()
17107 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; in tcp_rack_output()
17114 if ((tsused - ts_low) < thresh) { in tcp_rack_output()
17118 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in tcp_rack_output()
17119 ((rsm->r_flags & RACK_SACK_PASSED))) { in tcp_rack_output()
17121 * We have passed the dup-ack threshold <or> in tcp_rack_output()
17124 * it is only the dup-ack threshold that in tcp_rack_output()
17128 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
17129 rack->r_fast_output = 0; in tcp_rack_output()
17140 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
17161 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
17162 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
17163 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
17164 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
17165 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
17167 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
17169 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
17171 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
17173 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
17175 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
17177 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
17179 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
17181 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
17182 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
17183 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
17186 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
17191 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
17192 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
17194 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
17203 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
17204 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
17206 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
17208 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
17210 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
17212 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
17214 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
17215 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
17216 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
17227 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
17228 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
17231 if (rack->use_fixed_rate && in rack_get_pacing_len()
17232 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17233 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17238 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17243 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17244 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17247 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17248 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17249 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17251 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17252 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17253 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17266 * nearly zero, maybe because of a time-out? in rack_arrive_at_discounted_rate()
17267 * Lets drop back to the lt-bw. in rack_arrive_at_discounted_rate()
17273 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17278 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17282 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17284 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17316 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17317 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17319 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17321 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17323 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17324 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17325 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17326 /* The rtt is huge, N * smallest, lets not fill */ in pace_to_fill_cwnd()
17329 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17332 * first lets calculate the b/w based on the last us-rtt in pace_to_fill_cwnd()
17335 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17336 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17345 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17346 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17349 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17351 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17352 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17357 * We want to limit fill-cw to the some multiplier in pace_to_fill_cwnd()
17371 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17384 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17397 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17398 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17399 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17402 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17407 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17416 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17417 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17418 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17419 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17420 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17421 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17422 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17429 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17438 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17439 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17441 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17445 * in an rtt (unless it was capped), what does that in pace_to_fill_cwnd()
17470 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17474 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17490 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17491 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17493 srtt = max(tp->t_srtt, 1); in rack_get_pacing_delay()
17494 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17495 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17497 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17517 slot -= reduce; in rack_get_pacing_delay()
17522 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17526 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17531 /* RRS: We insert non-paced call to stats here for len */ in rack_get_pacing_delay()
17539 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17540 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17542 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17544 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17545 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17548 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17554 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17557 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17558 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17561 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17564 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17565 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17567 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17571 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { in rack_get_pacing_delay()
17580 segs = (len + segsiz - 1) / segsiz; in rack_get_pacing_delay()
17582 * We need the diff between 1514 bytes (e-mtu with e-hdr) in rack_get_pacing_delay()
17588 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); in rack_get_pacing_delay()
17589 if (rack->r_is_v6) { in rack_get_pacing_delay()
17607 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17612 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17613 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17614 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17615 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17618 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17619 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17625 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17632 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17633 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17634 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17635 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17638 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17643 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17644 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17645 /* Lets re-allow attempting to setup pacing */ in rack_get_pacing_delay()
17646 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17647 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17652 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17653 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17655 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17656 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17657 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17658 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17664 /* Re-check to make sure we are not exceeding our max b/w */ in rack_get_pacing_delay()
17665 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17666 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17672 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17679 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17680 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17681 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17682 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17683 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17687 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17688 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17689 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17691 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17692 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17697 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17698 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17699 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17702 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17703 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17704 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17705 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17706 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17707 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17709 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17711 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17715 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17716 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17720 if (rack->r_up_only && in rack_get_pacing_delay()
17721 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17726 * previous | this-time in rack_get_pacing_delay()
17727 * A) 0 | 0 -- fill_cw not in the picture in rack_get_pacing_delay()
17728 * B) 1 | 0 -- we were doing a fill-cw but now are not in rack_get_pacing_delay()
17729 * C) 1 | 1 -- all rates from fill_cw in rack_get_pacing_delay()
17730 * D) 0 | 1 -- we were doing non-fill and now we are filling in rack_get_pacing_delay()
17737 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17740 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17741 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17749 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17751 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17752 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17753 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17754 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17755 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17758 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17759 rack->rc_tp, in rack_get_pacing_delay()
17760 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17763 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17769 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17770 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17774 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17776 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17777 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17778 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17779 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17780 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17782 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17784 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17788 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17790 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17792 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17797 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, in rack_get_pacing_delay()
17803 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17804 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17809 * the RTT and compensate for this i.e. the srtt will in rack_get_pacing_delay()
17815 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17816 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17829 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17833 * of gas or we are mis-estimating the time in rack_get_pacing_delay()
17839 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17855 if (tp->t_state < TCPS_ESTABLISHED) { in rack_start_gp_measurement()
17862 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_start_gp_measurement()
17869 if (sbavail(&tptosocket(tp)->so_snd) < in rack_start_gp_measurement()
17876 tp->t_flags |= TF_GPUTINPROG; in rack_start_gp_measurement()
17877 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17878 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17879 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17880 tp->gput_seq = startseq; in rack_start_gp_measurement()
17881 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17882 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17883 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17884 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17885 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17886 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17887 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17888 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17893 tp->gput_ts = tcp_get_usecs(&tv); in rack_start_gp_measurement()
17894 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17900 * initial-windows worth of data to in rack_start_gp_measurement()
17904 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17905 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17906 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17909 tp->gput_seq, in rack_start_gp_measurement()
17910 tp->gput_ack, in rack_start_gp_measurement()
17912 tp->gput_ts, in rack_start_gp_measurement()
17913 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17917 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17926 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17929 * the tp->gput_ts is correctly set based on in rack_start_gp_measurement()
17933 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17935 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17940 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17955 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17959 * after that (after the app-limited). in rack_start_gp_measurement()
17961 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17963 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) in rack_start_gp_measurement()
17965 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17968 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); in rack_start_gp_measurement()
17973 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17976 * the last is the app-limited one. in rack_start_gp_measurement()
17981 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17983 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17989 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17990 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17995 tp->gput_seq = my_rsm->r_end; in rack_start_gp_measurement()
18000 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
18011 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18012 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18013 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18014 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
18015 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { in rack_start_gp_measurement()
18021 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18022 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
18025 tp->gput_seq, in rack_start_gp_measurement()
18026 tp->gput_ack, in rack_start_gp_measurement()
18028 tp->gput_ts, in rack_start_gp_measurement()
18029 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18034 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18041 * idle or if this is the first-send. Lets in rack_start_gp_measurement()
18046 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18047 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18048 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18050 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
18052 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18053 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
18058 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
18059 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
18061 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { in rack_start_gp_measurement()
18063 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
18067 * TSNH unless we have some send-map limit, in rack_start_gp_measurement()
18074 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
18078 tp->gput_seq, in rack_start_gp_measurement()
18079 tp->gput_ack, in rack_start_gp_measurement()
18081 tp->gput_ts, in rack_start_gp_measurement()
18082 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18084 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18094 if (tp->snd_wnd > cwnd_to_use) in rack_what_can_we_send()
18097 sendwin = tp->snd_wnd; in rack_what_can_we_send()
18098 if (ctf_outstanding(tp) >= tp->snd_wnd) { in rack_what_can_we_send()
18099 /* We never want to go over our peers rcv-window */ in rack_what_can_we_send()
18104 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
18109 * >= tp->snd_wnd). in rack_what_can_we_send()
18113 len = sendwin - flight; in rack_what_can_we_send()
18114 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { in rack_what_can_we_send()
18116 len = tp->snd_wnd - ctf_outstanding(tp); in rack_what_can_we_send()
18123 len = avail - sb_offset; in rack_what_can_we_send()
18134 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
18139 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
18144 log.u_bbr.flex5 = tp->rcv_numsacks; in rack_log_fsb()
18145 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
18147 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
18148 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
18156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
18157 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, in rack_log_fsb()
18187 if (hw_tls && (m->m_flags & M_EXTPG)) in rack_fo_base_copym()
18188 tls = m->m_epg_tls; in rack_fo_base_copym()
18202 if (m->m_flags & M_EXTPG) in rack_fo_base_copym()
18203 ntls = m->m_epg_tls; in rack_fo_base_copym()
18219 mlen = min(len, m->m_len - off); in rack_fo_base_copym()
18229 if (m->m_flags & M_EXTPG) { in rack_fo_base_copym()
18250 mlen = (seglimit - frags - 1) * fragsize; in rack_fo_base_copym()
18257 seglimit -= frags; in rack_fo_base_copym()
18261 n = m_get(M_NOWAIT, m->m_type); in rack_fo_base_copym()
18265 n->m_len = mlen; in rack_fo_base_copym()
18267 len_cp += n->m_len; in rack_fo_base_copym()
18268 if (m->m_flags & (M_EXT | M_EXTPG)) { in rack_fo_base_copym()
18269 n->m_data = m->m_data + off; in rack_fo_base_copym()
18273 (u_int)n->m_len); in rack_fo_base_copym()
18275 len -= n->m_len; in rack_fo_base_copym()
18277 m = m->m_next; in rack_fo_base_copym()
18278 np = &n->m_next; in rack_fo_base_copym()
18279 if (len || (soff == smb->m_len)) { in rack_fo_base_copym()
18291 fsb->m = smb; in rack_fo_base_copym()
18292 fsb->off = soff; in rack_fo_base_copym()
18300 fsb->o_m_len = smb->m_len; in rack_fo_base_copym()
18301 fsb->o_t_len = M_TRAILINGROOM(smb); in rack_fo_base_copym()
18311 fsb->o_m_len = 0; in rack_fo_base_copym()
18312 fsb->o_t_len = 0; in rack_fo_base_copym()
18334 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18335 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18342 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18347 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18348 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18349 m->m_len)); in rack_fo_m_copym()
18350 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18351 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18353 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18358 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18360 m, m->m_len, in rack_fo_m_copym()
18361 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18362 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18364 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18365 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18367 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18368 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18370 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18375 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18378 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", in rack_fo_m_copym()
18380 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18383 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18385 &rack->r_ctl.fsb, in rack_fo_m_copym()
18386 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18400 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18401 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18404 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18407 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18408 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18409 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18413 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18415 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18431 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18437 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18458 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18463 /* TSNH -- KASSERT? */ in rack_check_queue_level()
18469 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18472 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18473 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18474 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18478 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18481 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18524 if (rack->r_is_v6) { in rack_fast_rsm_output()
18525 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18530 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18533 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_rsm_output()
18538 rsm->r_flags |= RACK_TLP; in rack_fast_rsm_output()
18541 rsm->r_flags &= ~RACK_TLP; in rack_fast_rsm_output()
18543 startseq = rsm->r_start; in rack_fast_rsm_output()
18544 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18545 inp = rack->rc_inp; in rack_fast_rsm_output()
18547 flags = tcp_outflags[tp->t_state]; in rack_fast_rsm_output()
18551 if (rsm->r_flags & RACK_HAS_FIN) { in rack_fast_rsm_output()
18559 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_rsm_output()
18560 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_rsm_output()
18561 to.to_tsecr = tp->ts_recent; in rack_fast_rsm_output()
18565 /* TCP-MD5 (RFC2385). */ in rack_fast_rsm_output()
18566 if (tp->t_flags & TF_SIGNATURE) in rack_fast_rsm_output()
18571 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18574 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18575 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18576 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18577 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18580 if ((tp->t_flags & TF_TSO) && in rack_fast_rsm_output()
18583 (tp->t_port == 0)) in rack_fast_rsm_output()
18593 m->m_data += max_linkhdr; in rack_fast_rsm_output()
18594 m->m_len = hdrlen; in rack_fast_rsm_output()
18595 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18604 if_hw_tsomax = tp->t_tsomax; in rack_fast_rsm_output()
18605 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_rsm_output()
18606 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_rsm_output()
18613 max_len = (if_hw_tsomax - hdrlen - in rack_fast_rsm_output()
18635 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_rsm_output()
18638 th->th_seq = htonl(rsm->r_start); in rack_fast_rsm_output()
18639 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_rsm_output()
18647 if ((rsm->r_flags & RACK_HAD_PUSH) && in rack_fast_rsm_output()
18648 (len == (rsm->r_end - rsm->r_start))) in rack_fast_rsm_output()
18650 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18651 if (th->th_win == 0) { in rack_fast_rsm_output()
18652 tp->t_sndzerowin++; in rack_fast_rsm_output()
18653 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_rsm_output()
18655 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_rsm_output()
18656 if (rsm->r_flags & RACK_TLP) { in rack_fast_rsm_output()
18664 tp->t_sndrexmitpack++; in rack_fast_rsm_output()
18669 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_fast_rsm_output()
18672 if (rsm->m == NULL) in rack_fast_rsm_output()
18674 if (rsm->m && in rack_fast_rsm_output()
18675 ((rsm->orig_m_len != rsm->m->m_len) || in rack_fast_rsm_output()
18676 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_fast_rsm_output()
18680 …m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxse… in rack_fast_rsm_output()
18690 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_rsm_output()
18694 if (rack->r_is_v6) in rack_fast_rsm_output()
18695 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_rsm_output()
18697 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_rsm_output()
18698 udp->uh_ulen = htons(ulen); in rack_fast_rsm_output()
18700 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_rsm_output()
18701 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_rsm_output()
18702 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_rsm_output()
18704 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_rsm_output()
18705 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_rsm_output()
18706 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_rsm_output()
18708 if (rack->r_is_v6) { in rack_fast_rsm_output()
18709 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_rsm_output()
18710 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_rsm_output()
18715 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_rsm_output()
18716 ip->ip_tos |= ect; in rack_fast_rsm_output()
18719 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18727 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_rsm_output()
18737 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_rsm_output()
18747 if (rack->r_is_v6) { in rack_fast_rsm_output()
18748 if (tp->t_port) { in rack_fast_rsm_output()
18749 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_rsm_output()
18750 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18751 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_rsm_output()
18752 th->th_sum = htons(0); in rack_fast_rsm_output()
18755 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_rsm_output()
18756 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18757 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_rsm_output()
18768 if (tp->t_port) { in rack_fast_rsm_output()
18769 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_rsm_output()
18770 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18771 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18772 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_rsm_output()
18773 th->th_sum = htons(0); in rack_fast_rsm_output()
18776 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_rsm_output()
18777 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18778 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18779 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_rsm_output()
18783 KASSERT(ip->ip_v == IPVERSION, in rack_fast_rsm_output()
18784 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_rsm_output()
18791 * via either fast-path). in rack_fast_rsm_output()
18795 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_rsm_output()
18796 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_rsm_output()
18799 if (rack->r_is_v6) { in rack_fast_rsm_output()
18800 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18801 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_rsm_output()
18802 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_rsm_output()
18803 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18805 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18813 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_rsm_output()
18814 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18815 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_rsm_output()
18816 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18817 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_rsm_output()
18818 ip->ip_off |= htons(IP_DF); in rack_fast_rsm_output()
18821 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18827 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18830 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_rsm_output()
18832 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18835 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18840 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18841 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18844 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_rsm_output()
18846 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_rsm_output()
18848 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18851 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_fast_rsm_output()
18852 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18854 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_fast_rsm_output()
18857 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18858 if (rack->rack_no_prr) in rack_fast_rsm_output()
18861 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18862 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18863 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18866 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18867 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18869 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18876 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_rsm_output()
18878 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18879 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_fast_rsm_output()
18884 log.u_bbr.flex5 = rsm->r_fas; in rack_fast_rsm_output()
18885 log.u_bbr.bbr_substate = rsm->r_bas; in rack_fast_rsm_output()
18892 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_rsm_output()
18894 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18897 log.u_bbr.delRate = rsm->r_flags; in rack_fast_rsm_output()
18899 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18907 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18912 if (rack->r_is_v6) { in rack_fast_rsm_output()
18913 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_rsm_output()
18914 &inp->inp_route6, in rack_fast_rsm_output()
18922 &inp->inp_route, in rack_fast_rsm_output()
18928 lgb->tlb_errno = error; in rack_fast_rsm_output()
18932 tp->snd_nxt = tp->snd_max; in rack_fast_rsm_output()
18935 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18936 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18937 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18938 rack->r_early = 0; in rack_fast_rsm_output()
18939 rack->r_late = 0; in rack_fast_rsm_output()
18940 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18942 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), in rack_fast_rsm_output()
18943 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); in rack_fast_rsm_output()
18945 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18946 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18950 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); in rack_fast_rsm_output()
18952 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18953 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18954 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18955 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18957 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18958 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18960 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18962 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_rsm_output()
18963 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18964 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_fast_rsm_output()
18965 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18971 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_rsm_output()
18975 if (tp->t_rtttime == 0) { in rack_fast_rsm_output()
18976 tp->t_rtttime = ticks; in rack_fast_rsm_output()
18977 tp->t_rtseq = startseq; in rack_fast_rsm_output()
18982 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18983 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18984 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18987 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18988 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18989 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18990 rack->rc_enobuf++; in rack_fast_rsm_output()
18993 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18995 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
19004 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
19005 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_rsm_output()
19007 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
19008 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_rsm_output()
19010 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
19011 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); in rack_fast_rsm_output()
19019 return (-1); in rack_fast_rsm_output()
19030 * delay (eg. trans-continental/oceanic links). Setting the in rack_sndbuf_autoscale()
19052 tp = rack->rc_tp; in rack_sndbuf_autoscale()
19053 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
19054 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
19055 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { in rack_sndbuf_autoscale()
19056 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && in rack_sndbuf_autoscale()
19057 sbused(&so->so_snd) >= in rack_sndbuf_autoscale()
19058 (so->so_snd.sb_hiwat / 8 * 7) && in rack_sndbuf_autoscale()
19059 sbused(&so->so_snd) < V_tcp_autosndbuf_max && in rack_sndbuf_autoscale()
19060 sendwin >= (sbused(&so->so_snd) - in rack_sndbuf_autoscale()
19061 (tp->snd_max - tp->snd_una))) { in rack_sndbuf_autoscale()
19063 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; in rack_sndbuf_autoscale()
19068 scaleup += so->so_snd.sb_hiwat; in rack_sndbuf_autoscale()
19072 so->so_snd.sb_flags &= ~SB_AUTOSIZE; in rack_sndbuf_autoscale()
19087 * the max-burst). We have how much to send and all the info we in rack_fast_output()
19117 if (rack->r_is_v6) { in rack_fast_output()
19118 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19124 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19128 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_output()
19132 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
19133 startseq = tp->snd_max; in rack_fast_output()
19134 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
19135 inp = rack->rc_inp; in rack_fast_output()
19136 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19138 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
19139 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_output()
19140 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_output()
19141 to.to_tsecr = tp->ts_recent; in rack_fast_output()
19145 /* TCP-MD5 (RFC2385). */ in rack_fast_output()
19146 if (tp->t_flags & TF_SIGNATURE) in rack_fast_output()
19151 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
19154 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
19155 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19156 else if (rack->rc_user_set_max_segs) in rack_fast_output()
19157 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
19160 if ((tp->t_flags & TF_TSO) && in rack_fast_output()
19163 (tp->t_port == 0)) in rack_fast_output()
19174 m->m_data += max_linkhdr; in rack_fast_output()
19175 m->m_len = hdrlen; in rack_fast_output()
19176 th = rack->r_ctl.fsb.th; in rack_fast_output()
19185 if_hw_tsomax = tp->t_tsomax; in rack_fast_output()
19186 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_output()
19187 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_output()
19194 max_len = (if_hw_tsomax - hdrlen - in rack_fast_output()
19216 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_output()
19219 sb_offset = tp->snd_max - tp->snd_una; in rack_fast_output()
19220 th->th_seq = htonl(tp->snd_max); in rack_fast_output()
19221 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_output()
19222 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
19223 if (th->th_win == 0) { in rack_fast_output()
19224 tp->t_sndzerowin++; in rack_fast_output()
19225 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_output()
19227 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_output()
19228 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_fast_output()
19232 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_fast_output()
19235 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
19239 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19250 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19251 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19255 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_output()
19259 if (rack->r_is_v6) in rack_fast_output()
19260 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_output()
19262 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_output()
19263 udp->uh_ulen = htons(ulen); in rack_fast_output()
19265 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_output()
19266 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_output()
19267 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_output()
19269 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_output()
19270 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_output()
19271 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_output()
19273 if (rack->r_is_v6) { in rack_fast_output()
19274 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_output()
19275 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_output()
19281 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_output()
19282 ip->ip_tos |= ect; in rack_fast_output()
19287 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_output()
19297 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_output()
19307 if (rack->r_is_v6) { in rack_fast_output()
19308 if (tp->t_port) { in rack_fast_output()
19309 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_output()
19310 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19311 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_output()
19312 th->th_sum = htons(0); in rack_fast_output()
19315 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_output()
19316 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19317 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_output()
19328 if (tp->t_port) { in rack_fast_output()
19329 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_output()
19330 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19331 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19332 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_output()
19333 th->th_sum = htons(0); in rack_fast_output()
19336 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_output()
19337 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19338 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19339 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_output()
19343 KASSERT(ip->ip_v == IPVERSION, in rack_fast_output()
19344 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_output()
19351 * via either fast-path). in rack_fast_output()
19355 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_output()
19356 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_output()
19359 if (rack->r_is_v6) { in rack_fast_output()
19360 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19361 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_output()
19362 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_output()
19363 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19365 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19373 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_output()
19374 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19375 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_output()
19376 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19377 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_output()
19378 ip->ip_off |= htons(IP_DF); in rack_fast_output()
19381 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19385 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_output()
19387 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19390 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19394 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19395 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19398 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_output()
19400 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_output()
19402 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19406 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19410 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19411 if (rack->rack_no_prr) in rack_fast_output()
19414 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19415 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19416 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19419 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19420 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19422 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19426 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_output()
19428 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19430 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19433 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19438 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_output()
19444 if (rack->r_is_v6) { in rack_fast_output()
19445 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_output()
19446 &inp->inp_route6, in rack_fast_output()
19456 &inp->inp_route, in rack_fast_output()
19461 lgb->tlb_errno = error; in rack_fast_output()
19468 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19469 rack->rc_hw_nobuf = 0; in rack_fast_output()
19470 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19471 rack->r_early = 0; in rack_fast_output()
19472 rack->r_late = 0; in rack_fast_output()
19473 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19475 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19477 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); in rack_fast_output()
19478 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19479 rack->lt_bw_up = 1; in rack_fast_output()
19481 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19489 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19490 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19492 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19493 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19494 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19497 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), in rack_fast_output()
19498 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19500 if (tp->snd_una == tp->snd_max) { in rack_fast_output()
19501 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19503 tp->t_acktime = ticks; in rack_fast_output()
19506 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19508 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19510 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_fast_output()
19511 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19512 tp->snd_max += len; in rack_fast_output()
19513 tp->snd_nxt = tp->snd_max; in rack_fast_output()
19514 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19515 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19522 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_output()
19526 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19527 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19529 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19530 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19531 rack->r_fast_output = 0; in rack_fast_output()
19532 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19534 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19536 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19538 if (tp->t_rtttime == 0) { in rack_fast_output()
19539 tp->t_rtttime = ticks; in rack_fast_output()
19540 tp->t_rtseq = startseq; in rack_fast_output()
19543 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19546 max_val -= len; in rack_fast_output()
19548 th = rack->r_ctl.fsb.th; in rack_fast_output()
19554 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_output()
19560 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19561 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_output()
19563 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19564 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_output()
19566 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19567 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); in rack_fast_output()
19575 rack->r_fast_output = 0; in rack_fast_output()
19576 return (-1); in rack_fast_output()
19586 rack->r_fast_output = 1; in rack_setup_fast_output()
19587 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19588 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19589 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19590 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19591 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19592 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19594 rack->r_fast_output = 0; in rack_setup_fast_output()
19598 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19601 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19603 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19604 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19606 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19607 (tp->snd_max - tp->snd_una))); in rack_setup_fast_output()
19608 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19609 rack->r_fast_output = 0; in rack_setup_fast_output()
19611 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19612 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19614 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19625 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19637 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19638 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_check_collapsed()
19640 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19644 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19651 if (rsm->r_flags & RACK_ACKED) { in rack_check_collapsed()
19656 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19658 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19659 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19660 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19666 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19667 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { in rack_check_collapsed()
19668 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19669 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19670 thresh, __LINE__, 6, rsm->r_flags, rsm); in rack_check_collapsed()
19674 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19675 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19676 thresh, __LINE__, 7, rsm->r_flags, rsm); in rack_check_collapsed()
19683 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19684 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19687 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19688 rack->gp_ready) { in rack_validate_sizes()
19777 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19782 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); in rack_output()
19783 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_output()
19785 if (tp->t_flags & TF_TOE) { in rack_output()
19792 if (rack->rack_deferred_inited == 0) { in rack_output()
19803 * SYN|ACK and those sent by the retransmit timer. in rack_output()
19805 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19806 (tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
19807 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ in rack_output()
19808 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19815 if (rack->r_state) { in rack_output()
19817 isipv6 = rack->r_is_v6; in rack_output()
19819 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19825 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19826 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19828 * We are on the hpts for some timer but not hptsi output. in rack_output()
19834 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19835 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19837 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19842 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19859 if (rack->rc_in_persist) { in rack_output()
19860 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19861 /* Timer is not running */ in rack_output()
19869 if ((rack->rc_ack_required == 1) && in rack_output()
19870 (rack->r_timer_override == 0)){ in rack_output()
19872 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19873 /* Timer is not running */ in rack_output()
19881 if ((rack->r_timer_override) || in rack_output()
19882 (rack->rc_ack_can_sendout_data) || in rack_output()
19884 (tp->t_state < TCPS_ESTABLISHED)) { in rack_output()
19885 rack->rc_ack_can_sendout_data = 0; in rack_output()
19886 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19887 tcp_hpts_remove(rack->rc_tp); in rack_output()
19888 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19895 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19896 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); in rack_output()
19898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19899 tp->tcp_cnt_counters[SND_BLOCKED]++; in rack_output()
19907 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19908 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19909 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19912 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19913 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19914 rack->r_late = 1; in rack_output()
19915 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19916 rack->r_ctl.rc_agg_early += early; in rack_output()
19917 rack->r_early = 1; in rack_output()
19918 } else if (rack->rc_always_pace == 0) { in rack_output()
19919 /* Non-paced we are not late */ in rack_output()
19920 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19921 rack->r_early = rack->r_late = 0; in rack_output()
19924 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19925 rack->r_wanted_output = 0; in rack_output()
19926 rack->r_timer_override = 0; in rack_output()
19927 if ((tp->t_state != rack->r_state) && in rack_output()
19928 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
19931 if ((rack->r_fast_output) && in rack_output()
19933 (tp->rcv_numsacks == 0)) { in rack_output()
19941 inp = rack->rc_inp; in rack_output()
19942 so = inp->inp_socket; in rack_output()
19943 sb = &so->so_snd; in rack_output()
19947 inp = rack->rc_inp; in rack_output()
19951 * by the retransmit timer. in rack_output()
19953 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19954 ((tp->t_state == TCPS_SYN_RECEIVED) || in rack_output()
19955 (tp->t_state == TCPS_SYN_SENT)) && in rack_output()
19956 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ in rack_output()
19957 (tp->t_rxtshift == 0)) { /* not a retransmit */ in rack_output()
19958 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
19959 so = inp->inp_socket; in rack_output()
19960 sb = &so->so_snd; in rack_output()
19969 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); in rack_output()
19970 if (tp->t_idle_reduce) { in rack_output()
19971 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) in rack_output()
19974 tp->t_flags &= ~TF_LASTIDLE; in rack_output()
19976 if (tp->t_flags & TF_MORETOCOME) { in rack_output()
19977 tp->t_flags |= TF_LASTIDLE; in rack_output()
19981 if ((tp->snd_una == tp->snd_max) && in rack_output()
19982 rack->r_ctl.rc_went_idle_time && in rack_output()
19983 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19984 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19986 /* Count as a probe rtt */ in rack_output()
19987 if (rack->in_probe_rtt == 0) { in rack_output()
19988 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19989 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19990 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19991 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19998 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19999 (rack->r_fsb_inited == 0) && in rack_output()
20000 (rack->r_state != TCPS_CLOSED)) in rack_output()
20001 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
20002 if (rack->rc_sendvars_notset == 1) { in rack_output()
20003 rack->rc_sendvars_notset = 0; in rack_output()
20005 * Make sure any TCP timers (keep-alive) is not running. in rack_output()
20009 if ((rack->rack_no_prr == 1) && in rack_output()
20010 (rack->rc_always_pace == 0)) { in rack_output()
20013 * no-pacing enabled and prr is turned off that in rack_output()
20021 rack->rack_no_prr = 0; in rack_output()
20023 if ((rack->pcm_enabled == 1) && in rack_output()
20024 (rack->pcm_needed == 0) && in rack_output()
20032 if (tp->t_srtt) in rack_output()
20033 rtts_idle = tot_idle / tp->t_srtt; in rack_output()
20036 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
20037 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
20038 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
20039 rack->pcm_needed = 1; in rack_output()
20040 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
20049 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
20051 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
20052 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
20054 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
20055 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20056 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
20062 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
20063 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
20067 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
20070 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
20073 if (tp->snd_wnd > ctf_outstanding(tp)) in rack_output()
20074 rw_avail = tp->snd_wnd - ctf_outstanding(tp); in rack_output()
20077 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
20078 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
20081 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
20082 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
20084 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
20086 rack->r_fast_output = 0; in rack_output()
20090 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
20093 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20094 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
20095 flags = tcp_outflags[tp->t_state]; in rack_output()
20096 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
20102 so = inp->inp_socket; in rack_output()
20103 sb = &so->so_snd; in rack_output()
20106 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
20107 rack->rc_free_cnt++; in rack_output()
20114 SOCK_SENDBUF_LOCK(inp->inp_socket); in rack_output()
20115 so = inp->inp_socket; in rack_output()
20116 sb = &so->so_snd; in rack_output()
20119 if (rack->r_ctl.rc_resend) { in rack_output()
20120 /* Retransmit timer */ in rack_output()
20121 rsm = rack->r_ctl.rc_resend; in rack_output()
20122 rack->r_ctl.rc_resend = NULL; in rack_output()
20123 len = rsm->r_end - rsm->r_start; in rack_output()
20126 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20129 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20130 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20132 } else if (rack->r_collapse_point_valid && in rack_output()
20139 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
20140 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
20142 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
20143 rack->r_ctl.high_collapse_point)) in rack_output()
20144 rack->r_collapse_point_valid = 0; in rack_output()
20148 len = rsm->r_end - rsm->r_start; in rack_output()
20149 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20154 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_output()
20155 ((rsm->r_flags & RACK_MUST_RXT) == 0) && in rack_output()
20156 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { in rack_output()
20157 /* Enter recovery if not induced by a time-out */ in rack_output()
20158 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_output()
20161 if (SEQ_LT(rsm->r_start, tp->snd_una)) { in rack_output()
20163 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
20166 len = rsm->r_end - rsm->r_start; in rack_output()
20167 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20170 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20171 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20180 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
20191 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
20193 rsm->r_flags |= RACK_TLP; in rack_output()
20194 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
20196 tlen = rsm->r_end - rsm->r_start; in rack_output()
20199 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20202 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20203 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20204 cwin = min(tp->snd_wnd, tlen); in rack_output()
20207 if (rack->r_must_retran && in rack_output()
20209 (SEQ_GT(tp->snd_max, tp->snd_una)) && in rack_output()
20214 * a) This is a non-sack connection, we had a time-out in rack_output()
20228 sendwin = min(tp->snd_wnd, tp->snd_cwnd); in rack_output()
20229 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
20234 so = inp->inp_socket; in rack_output()
20235 sb = &so->so_snd; in rack_output()
20240 * outstanding/not-acked should be marked. in rack_output()
20243 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20246 rack->r_must_retran = 0; in rack_output()
20247 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20248 so = inp->inp_socket; in rack_output()
20249 sb = &so->so_snd; in rack_output()
20252 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { in rack_output()
20257 rack->r_must_retran = 0; in rack_output()
20258 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20263 len = rsm->r_end - rsm->r_start; in rack_output()
20264 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20266 if ((rack->full_size_rxt == 0) && in rack_output()
20267 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20270 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20271 rack->gp_ready) { in rack_output()
20292 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20294 if (!rack->alloc_limit_reported) { in rack_output()
20295 rack->alloc_limit_reported = 1; in rack_output()
20298 so = inp->inp_socket; in rack_output()
20299 sb = &so->so_snd; in rack_output()
20302 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { in rack_output()
20304 len--; in rack_output()
20313 if (rsm && rack->r_fsb_inited && in rack_output()
20315 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { in rack_output()
20322 so = inp->inp_socket; in rack_output()
20323 sb = &so->so_snd; in rack_output()
20329 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && in rack_output()
20330 rack->rack_enable_scwnd) { in rack_output()
20332 if (rack->gp_ready && in rack_output()
20333 (rack->rack_attempted_scwnd == 0) && in rack_output()
20334 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20335 tp->t_lib) { in rack_output()
20338 rack->rack_attempted_scwnd = 1; in rack_output()
20339 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20340 &rack->r_ctl.rc_scw_index, in rack_output()
20343 if (rack->r_ctl.rc_scw && in rack_output()
20344 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20345 sbavail(&so->so_snd)) { in rack_output()
20347 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20348 rack->rack_scwnd_is_idle = 0; in rack_output()
20350 if (rack->r_ctl.rc_scw) { in rack_output()
20352 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20353 rack->r_ctl.rc_scw_index, in rack_output()
20354 tp->snd_cwnd, tp->snd_wnd, segsiz); in rack_output()
20362 if (tp->t_flags & TF_NEEDFIN) in rack_output()
20364 if (tp->t_flags & TF_NEEDSYN) in rack_output()
20368 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20375 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_output()
20376 (tp->t_flags & TF_FASTOPEN))) { in rack_output()
20386 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) in rack_output()
20387 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20390 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20391 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20393 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20394 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20396 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20397 if (tp->snd_wnd > sb_offset) in rack_output()
20398 len = tp->snd_wnd - sb_offset; in rack_output()
20402 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20404 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20408 if ((rack->r_ctl.crte == NULL) && in rack_output()
20409 IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20410 (rack->full_size_rxt == 0) && in rack_output()
20411 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20421 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20422 rack->gp_ready) { in rack_output()
20440 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20441 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20442 if (tp->snd_wnd > outstanding) { in rack_output()
20443 len = tp->snd_wnd - outstanding; in rack_output()
20448 len = avail - sb_offset; in rack_output()
20456 len = avail - sb_offset; in rack_output()
20461 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20462 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20474 * let us send a lot as well :-) in rack_output()
20476 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20488 leftinsb = sbavail(sb) - sb_offset; in rack_output()
20495 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
20502 !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20514 * SYN-SENT state and if segment contains data and if we don't know in rack_output()
20518 SEQ_GT(tp->snd_max, tp->snd_una) && in rack_output()
20520 (tp->t_rxtshift == 0))) { in rack_output()
20525 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20526 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_output()
20534 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { in rack_output()
20541 * - When retransmitting SYN|ACK on a passively-created socket in rack_output()
20543 * - When retransmitting SYN on an actively created socket in rack_output()
20545 * - When sending a zero-length cookie (cookie request) on an in rack_output()
20548 * - When the socket is in the CLOSED state (RST is being sent) in rack_output()
20550 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20551 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || in rack_output()
20552 ((tp->t_state == TCPS_SYN_SENT) && in rack_output()
20553 (tp->t_tfo_client_cookie_len == 0)) || in rack_output()
20558 /* Without fast-open there should never be data sent on a SYN */ in rack_output()
20559 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20573 if ((tp->snd_wnd == 0) && in rack_output()
20574 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20575 (tp->snd_una == tp->snd_max) && in rack_output()
20577 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20587 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20588 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20590 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20600 if (tp->snd_max == tp->snd_una) { in rack_output()
20605 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20608 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20609 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20614 * not send at least a min size (rxt timer in rack_output()
20622 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < in rack_output()
20623 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20624 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20625 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20626 (TCPS_HAVEESTABLISHED(tp->t_state))) { in rack_output()
20636 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20637 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && in rack_output()
20639 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20640 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20660 * defeats the point of hw-pacing (i.e. to help us get in rack_output()
20675 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP in rack_output()
20689 * Pre-calculate here as we save another lookup into the darknesses in rack_output()
20708 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && in rack_output()
20709 (tp->t_port == 0) && in rack_output()
20710 ((tp->t_flags & TF_SIGNATURE) == 0) && in rack_output()
20717 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20718 if (tp->t_flags & TF_SENTFIN) { in rack_output()
20723 outstanding--; in rack_output()
20726 if ((rsm->r_flags & RACK_HAS_FIN) == 0) in rack_output()
20730 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), in rack_output()
20731 (long)TCP_MAXWIN << tp->rcv_scale); in rack_output()
20735 * conditions when len is non-zero: in rack_output()
20737 * - We have a full segment (or more with TSO) - This is the last in rack_output()
20739 * NODELAY - we've timed out (e.g. persist timer) - we have more in rack_output()
20741 * limited the window size) - we need to retransmit in rack_output()
20753 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ in rack_output()
20754 (idle || (tp->t_flags & TF_NODELAY)) && in rack_output()
20756 (tp->t_flags & TF_NOPUSH) == 0) { in rack_output()
20760 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ in rack_output()
20764 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { in rack_output()
20772 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && in rack_output()
20809 * pending (it will get piggy-backed on it) or the remote side in rack_output()
20810 * already has done a half-close and won't send more data. Skip in rack_output()
20811 * this if the connection is in T/TCP half-open state. in rack_output()
20813 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && in rack_output()
20814 !(tp->t_flags & TF_DELACK) && in rack_output()
20815 !TCPS_HAVERCVDFIN(tp->t_state)) { in rack_output()
20819 * tp->rcv_scale. in rack_output()
20825 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { in rack_output()
20826 oldwin = (tp->rcv_adv - tp->rcv_nxt); in rack_output()
20828 adv -= oldwin; in rack_output()
20841 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) in rack_output()
20845 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || in rack_output()
20846 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || in rack_output()
20847 so->so_rcv.sb_hiwat <= 8 * segsiz)) { in rack_output()
20851 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { in rack_output()
20860 * is also a catch-all for the retransmit timer timeout case. in rack_output()
20862 if (tp->t_flags & TF_ACKNOW) { in rack_output()
20866 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { in rack_output()
20875 (tp->snd_max == tp->snd_una)) { in rack_output()
20888 if ((tp->t_flags & TF_FASTOPEN) == 0 && in rack_output()
20891 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20892 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20901 * the peer wait for the delayed-ack timer to run off in rack_output()
20907 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20913 rack->r_fsb_inited && in rack_output()
20914 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20915 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
20916 (rack->r_must_retran == 0) && in rack_output()
20917 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
20920 ((orig_len - len) >= segsiz) && in rack_output()
20927 rack->r_fast_output = 0; in rack_output()
20932 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
20933 tp->snd_nxt = tp->snd_max; in rack_output()
20936 uint32_t seq = tp->gput_ack; in rack_output()
20938 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20941 * Mark the last sent that we just-returned (hinting in rack_output()
20942 * that delayed ack may play a role in any rtt measurement). in rack_output()
20944 rsm->r_just_ret = 1; in rack_output()
20947 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20948 rack->r_early = 0; in rack_output()
20949 rack->r_late = 0; in rack_output()
20950 rack->r_ctl.rc_agg_early = 0; in rack_output()
20952 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20953 minseg)) >= tp->snd_wnd) { in rack_output()
20956 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20957 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20959 /* We are limited by whats available -- app limited */ in rack_output()
20961 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20962 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20964 ((tp->t_flags & TF_NODELAY) == 0) && in rack_output()
20971 * don't send. Another app-limited case. in rack_output()
20974 } else if (tp->t_flags & TF_NOPUSH) { in rack_output()
20985 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20986 (rack->rack_no_prr == 0) && in rack_output()
20987 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
21042 if ((tp->t_flags & TF_GPUTINPROG) && in rack_output()
21043 SEQ_GT(tp->gput_ack, tp->snd_max)) { in rack_output()
21044 tp->gput_ack = tp->snd_max; in rack_output()
21045 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_output()
21049 tp->t_flags &= ~TF_GPUTINPROG; in rack_output()
21050 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
21051 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
21052 tp->gput_seq, in rack_output()
21058 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
21059 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_output()
21060 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
21061 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
21068 if (rack->r_ctl.rc_end_appl) in rack_output()
21069 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
21070 rack->r_ctl.rc_end_appl = rsm; in rack_output()
21072 rsm->r_flags |= RACK_APP_LIMITED; in rack_output()
21073 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
21077 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
21078 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); in rack_output()
21082 if ((tp->snd_max == tp->snd_una) && in rack_output()
21083 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21085 (sbavail(sb) > tp->snd_wnd) && in rack_output()
21086 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
21087 /* Yes lets make sure to move to persist before timer-start */ in rack_output()
21088 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
21095 rack->r_ctl.rc_scw) { in rack_output()
21096 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
21097 rack->rack_scwnd_is_idle = 1; in rack_output()
21103 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21104 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
21106 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21107 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_output()
21109 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21110 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); in rack_output()
21114 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21115 tp->tcp_cnt_counters[SND_LIMITED]++; in rack_output()
21117 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21118 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); in rack_output()
21126 if ((rack->r_ctl.crte != NULL) && in rack_output()
21128 ((rack->rc_hw_nobuf == 1) || in rack_output()
21138 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21139 rack->r_ctl.rc_agg_early = 0; in rack_output()
21140 rack->r_early = 0; in rack_output()
21141 rack->r_late = 0; in rack_output()
21159 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21160 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21161 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21170 * the peer wait for the delayed-ack timer to run off in rack_output()
21183 (rack->pcm_in_progress == 0) && in rack_output()
21184 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
21185 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
21188 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21190 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21196 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; in rack_output()
21198 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; in rack_output()
21220 * be snd_max-1 else its snd_max. in rack_output()
21224 rack_seq = tp->iss; in rack_output()
21226 (tp->t_flags & TF_SENTFIN)) in rack_output()
21227 rack_seq = tp->snd_max - 1; in rack_output()
21229 rack_seq = tp->snd_max; in rack_output()
21231 rack_seq = rsm->r_start; in rack_output()
21235 * established connection segments. Options for SYN-ACK segments in rack_output()
21239 if ((tp->t_flags & TF_NOOPT) == 0) { in rack_output()
21242 to.to_mss = tcp_mssopt(&inp->inp_inc); in rack_output()
21243 if (tp->t_port) in rack_output()
21244 to.to_mss -= V_tcp_udp_tunneling_overhead; in rack_output()
21254 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
21255 (tp->t_rxtshift == 0)) { in rack_output()
21256 if (tp->t_state == TCPS_SYN_RECEIVED) { in rack_output()
21259 (u_int8_t *)&tp->t_tfo_cookie.server; in rack_output()
21262 } else if (tp->t_state == TCPS_SYN_SENT) { in rack_output()
21264 tp->t_tfo_client_cookie_len; in rack_output()
21266 tp->t_tfo_cookie.client; in rack_output()
21281 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { in rack_output()
21282 to.to_wscale = tp->request_r_scale; in rack_output()
21286 if ((tp->t_flags & TF_RCVD_TSTMP) || in rack_output()
21287 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { in rack_output()
21290 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21291 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21299 * our ack-probe. in rack_output()
21305 to.to_tsval = ts_to_use + tp->ts_offset; in rack_output()
21306 to.to_tsecr = tp->ts_recent; in rack_output()
21309 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
21310 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21311 (tp->snd_una == tp->snd_max) && in rack_output()
21314 (rack->r_ctl.current_round != 0) && in rack_output()
21316 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21317 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21318 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21319 rack->r_rcvpath_rtt_up = 1; in rack_output()
21321 rack_seq--; in rack_output()
21325 if (tp->rfbuf_ts == 0 && in rack_output()
21326 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { in rack_output()
21327 tp->rfbuf_ts = ms_cts; in rack_output()
21330 if (tp->t_flags & TF_SACK_PERMIT) { in rack_output()
21333 else if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21334 tp->rcv_numsacks > 0) { in rack_output()
21336 to.to_nsacks = tp->rcv_numsacks; in rack_output()
21337 to.to_sacks = (u_char *)tp->sackblks; in rack_output()
21341 /* TCP-MD5 (RFC2385). */ in rack_output()
21342 if (tp->t_flags & TF_SIGNATURE) in rack_output()
21352 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && in rack_output()
21356 if (tp->t_port) { in rack_output()
21362 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21363 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
21365 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21366 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
21379 if (inp->inp_options) in rack_output()
21380 ipoptlen = inp->inp_options->m_len - in rack_output()
21393 if (len + optlen + ipoptlen > tp->t_maxseg) { in rack_output()
21400 if_hw_tsomax = tp->t_tsomax; in rack_output()
21401 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_output()
21402 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_output()
21412 max_len = (if_hw_tsomax - hdrlen - in rack_output()
21426 max_len = (tp->t_maxseg - optlen); in rack_output()
21431 len -= moff; in rack_output()
21448 if (tp->t_flags & TF_NEEDFIN) { in rack_output()
21453 if (optlen + ipoptlen >= tp->t_maxseg) { in rack_output()
21467 len = tp->t_maxseg - optlen - ipoptlen; in rack_output()
21499 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21500 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21509 * the peer wait for the delayed-ack timer to run off in rack_output()
21521 hw_tls = tp->t_nic_ktls_xmit != 0; in rack_output()
21550 m->m_data += max_linkhdr; in rack_output()
21551 m->m_len = hdrlen; in rack_output()
21560 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { in rack_output()
21570 m->m_len += len; in rack_output()
21585 m->m_next = tcp_m_copym( in rack_output()
21593 if (len <= (tp->t_maxseg - optlen)) { in rack_output()
21602 if (m->m_next == NULL) { in rack_output()
21611 if (rsm && (rsm->r_flags & RACK_TLP)) { in rack_output()
21619 tp->t_sndrexmitpack++; in rack_output()
21624 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_output()
21631 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_output()
21650 if (tp->t_flags & TF_ACKNOW) in rack_output()
21669 m->m_data += max_linkhdr; in rack_output()
21670 m->m_len = hdrlen; in rack_output()
21673 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_output()
21677 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21680 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21684 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21686 th = rack->r_ctl.fsb.th; in rack_output()
21687 udp = rack->r_ctl.fsb.udp; in rack_output()
21691 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21694 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21695 udp->uh_ulen = htons(ulen); in rack_output()
21701 if (tp->t_port) { in rack_output()
21703 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21704 udp->uh_dport = tp->t_port; in rack_output()
21705 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21706 udp->uh_ulen = htons(ulen); in rack_output()
21710 tcpip_fillheaders(inp, tp->t_port, ip6, th); in rack_output()
21716 if (tp->t_port) { in rack_output()
21718 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21719 udp->uh_dport = tp->t_port; in rack_output()
21720 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21721 udp->uh_ulen = htons(ulen); in rack_output()
21725 tcpip_fillheaders(inp, tp->t_port, ip, th); in rack_output()
21734 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { in rack_output()
21738 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_output()
21739 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_output()
21741 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
21742 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_output()
21743 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_output()
21746 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_output()
21747 ip6->ip6_flow |= htonl(ect << 20); in rack_output()
21753 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_output()
21754 ip->ip_tos |= ect; in rack_output()
21758 th->th_seq = htonl(rack_seq); in rack_output()
21759 th->th_ack = htonl(tp->rcv_nxt); in rack_output()
21769 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && in rack_output()
21773 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && in rack_output()
21774 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) in rack_output()
21775 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); in rack_output()
21784 th->th_win = htons((u_short) in rack_output()
21785 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); in rack_output()
21788 recwin = roundup2(recwin, 1 << tp->rcv_scale); in rack_output()
21789 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); in rack_output()
21792 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 in rack_output()
21799 if (th->th_win == 0) { in rack_output()
21800 tp->t_sndzerowin++; in rack_output()
21801 tp->t_flags |= TF_RXWIN0SENT; in rack_output()
21803 tp->t_flags &= ~TF_RXWIN0SENT; in rack_output()
21804 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_output()
21806 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21810 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21830 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21833 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21837 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_output()
21843 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_output()
21853 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_output()
21868 if (tp->t_port) { in rack_output()
21869 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_output()
21870 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21871 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_output()
21872 th->th_sum = htons(0); in rack_output()
21875 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_output()
21876 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21877 th->th_sum = in6_cksum_pseudo(ip6, in rack_output()
21888 if (tp->t_port) { in rack_output()
21889 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_output()
21890 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21891 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21892 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_output()
21893 th->th_sum = htons(0); in rack_output()
21896 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_output()
21897 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21898 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21899 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_output()
21903 KASSERT(ip->ip_v == IPVERSION, in rack_output()
21904 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_output()
21917 KASSERT(len > tp->t_maxseg - optlen, in rack_output()
21919 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_output()
21920 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; in rack_output()
21930 if ((rack->r_ctl.crte != NULL) && in rack_output()
21931 (rack->rc_hw_nobuf == 0) && in rack_output()
21936 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21940 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21941 if (rack->rack_no_prr) in rack_output()
21944 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21945 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21946 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21949 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21950 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21952 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21955 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_output()
21956 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21958 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_output()
21972 log.u_bbr.pkts_out = tp->t_maxseg; in rack_output()
21974 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21975 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_output()
21980 log.u_bbr.flex5 = rsm->r_fas; in rack_output()
21981 log.u_bbr.bbr_substate = rsm->r_bas; in rack_output()
21989 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_output()
21996 log.u_bbr.delRate = rsm->r_flags; in rack_output()
21998 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
22002 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
22006 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, in rack_output()
22017 * m->m_pkthdr.len should have been set before cksum calcuration, in rack_output()
22028 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
22035 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_output()
22037 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_output()
22038 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22040 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22042 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22048 inp->in6p_outputopts, in rack_output()
22049 &inp->inp_route6, in rack_output()
22053 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) in rack_output()
22054 mtu = inp->inp_route6.ro_nh->nh_mtu; in rack_output()
22062 ip->ip_len = htons(m->m_pkthdr.len); in rack_output()
22064 if (inp->inp_vflag & INP_IPV6PROTO) in rack_output()
22065 ip->ip_ttl = in6_selecthlim(inp, NULL); in rack_output()
22067 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
22078 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_output()
22079 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22080 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_output()
22081 ip->ip_off |= htons(IP_DF); in rack_output()
22084 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22087 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22094 inp->inp_options, in rack_output()
22098 &inp->inp_route, in rack_output()
22101 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) in rack_output()
22102 mtu = inp->inp_route.ro_nh->nh_mtu; in rack_output()
22106 lgb->tlb_errno = error; in rack_output()
22122 rack->pcm_in_progress = 1; in rack_output()
22123 rack->pcm_needed = 0; in rack_output()
22124 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
22127 if (rack->lt_bw_up == 0) { in rack_output()
22128 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); in rack_output()
22129 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22130 rack->lt_bw_up = 1; in rack_output()
22131 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
22138 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
22139 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22141 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
22142 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
22143 rack->r_ctl.lt_timemark = tmark; in rack_output()
22147 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
22151 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
22152 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
22153 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
22154 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
22156 if (rack->rc_hw_nobuf) { in rack_output()
22157 rack->rc_hw_nobuf = 0; in rack_output()
22158 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22159 rack->r_early = 0; in rack_output()
22160 rack->r_late = 0; in rack_output()
22161 rack->r_ctl.rc_agg_early = 0; in rack_output()
22165 rack->rc_gp_saw_rec = 1; in rack_output()
22167 if (cwnd_to_use > tp->snd_ssthresh) { in rack_output()
22169 rack->rc_gp_saw_ca = 1; in rack_output()
22172 rack->rc_gp_saw_ss = 1; in rack_output()
22175 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22176 (tp->t_flags & TF_SACK_PERMIT) && in rack_output()
22177 tp->rcv_numsacks > 0) in rack_output()
22187 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_output()
22192 if ((rack->rack_no_prr == 0) && in rack_output()
22195 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
22196 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
22198 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
22206 rsm->r_flags &= ~RACK_TLP; in rack_output()
22212 (tp->snd_una == tp->snd_max)) in rack_output()
22213 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
22220 tcp_seq startseq = tp->snd_max; in rack_output()
22224 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
22235 rack->rc_tlp_in_progress = 0; in rack_output()
22236 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
22244 rack->rc_tlp_in_progress = 1; in rack_output()
22245 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22253 if ((tp->snd_una == tp->snd_max) && (len > 0)) { in rack_output()
22259 tp->t_acktime = ticks; in rack_output()
22266 ((tp->t_flags & TF_SENTSYN) == 0)) { in rack_output()
22267 tp->snd_max++; in rack_output()
22268 tp->t_flags |= TF_SENTSYN; in rack_output()
22271 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_output()
22272 tp->snd_max++; in rack_output()
22273 tp->t_flags |= TF_SENTFIN; in rack_output()
22276 tp->snd_max += len; in rack_output()
22277 if (rack->rc_new_rnd_needed) { in rack_output()
22278 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22286 if (tp->t_rtttime == 0) { in rack_output()
22287 tp->t_rtttime = ticks; in rack_output()
22288 tp->t_rtseq = startseq; in rack_output()
22292 ((tp->t_flags & TF_GPUTINPROG) == 0)) in rack_output()
22303 if (rack->r_fast_output && len) { in rack_output()
22304 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22305 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22307 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22308 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22309 rack->r_fast_output = 0; in rack_output()
22310 if (rack->r_fast_output) { in rack_output()
22311 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22312 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22313 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22320 ((pace_max_seg - len) > segsiz)) { in rack_output()
22328 n_len = (orig_len - len); in rack_output()
22329 orig_len -= len; in rack_output()
22330 pace_max_seg -= len; in rack_output()
22332 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22333 /* Re-lock for the next spin */ in rack_output()
22340 ((orig_len - len) > segsiz)) { in rack_output()
22348 n_len = (orig_len - len); in rack_output()
22349 orig_len -= len; in rack_output()
22351 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22352 /* Re-lock for the next spin */ in rack_output()
22360 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22361 rack->r_early = 0; in rack_output()
22362 rack->r_late = 0; in rack_output()
22363 rack->r_ctl.rc_agg_early = 0; in rack_output()
22369 * with the timer. in rack_output()
22378 tp->t_softerror = error; in rack_output()
22381 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22382 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22384 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22385 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22395 if (rack->r_ctl.crte != NULL) { in rack_output()
22396 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22397 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22400 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22401 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22402 if (rack->rc_enobuf < 0x7f) in rack_output()
22403 rack->rc_enobuf++; in rack_output()
22406 if (rack->r_ctl.crte != NULL) { in rack_output()
22408 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22422 tp->t_flags &= ~TF_TSO; in rack_output()
22426 saved_mtu = tp->t_maxseg; in rack_output()
22427 tcp_mss_update(tp, -1, mtu, NULL, NULL); in rack_output()
22428 if (saved_mtu > tp->t_maxseg) { in rack_output()
22436 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22437 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22439 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22440 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22450 if (TCPS_HAVERCVDSYN(tp->t_state)) { in rack_output()
22451 tp->t_softerror = error; in rack_output()
22460 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22461 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22463 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22464 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22471 rack->rc_enobuf = 0; in rack_output()
22472 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_output()
22473 rack->r_ctl.retran_during_recovery += len; in rack_output()
22482 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) in rack_output()
22483 tp->rcv_adv = tp->rcv_nxt + recwin; in rack_output()
22485 tp->last_ack_sent = tp->rcv_nxt; in rack_output()
22486 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_output()
22516 rack->r_ent_rec_ns = 0; in rack_output()
22517 if (rack->r_must_retran) { in rack_output()
22519 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22520 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22524 rack->r_must_retran = 0; in rack_output()
22525 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22527 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22532 rack->r_must_retran = 0; in rack_output()
22533 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22536 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22537 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && in rack_output()
22538 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22543 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); in rack_output()
22552 rack->r_fsb_inited && in rack_output()
22553 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22554 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22555 (rack->r_must_retran == 0) && in rack_output()
22556 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22559 ((orig_len - len) >= segsiz) && in rack_output()
22566 rack->r_fast_output = 0; in rack_output()
22579 (rack->r_must_retran == 0) && in rack_output()
22580 rack->r_fsb_inited && in rack_output()
22581 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22582 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22583 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22586 ((orig_len - len) >= segsiz) && in rack_output()
22592 if (rack->r_fast_output) { in rack_output()
22606 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
22607 tp->snd_nxt = tp->snd_max; in rack_output()
22610 crtsc = get_cyclecount() - ts_val; in rack_output()
22612 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22613 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
22615 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22616 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; in rack_output()
22618 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22619 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); in rack_output()
22622 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22623 tp->tcp_cnt_counters[SND_OUT_ACK]++; in rack_output()
22625 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22626 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; in rack_output()
22641 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22642 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22643 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22656 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22657 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22666 rack->r_fast_output = 0; in rack_mtu_change()
22667 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22668 rack->r_ctl.rc_sacked); in rack_mtu_change()
22669 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22670 rack->r_must_retran = 1; in rack_mtu_change()
22672 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22673 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); in rack_mtu_change()
22676 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22678 tp->snd_nxt = tp->snd_max; in rack_mtu_change()
22684 if (rack->dgp_on == 1) in rack_set_dgp()
22686 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22687 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22694 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22699 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22700 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22701 rack->dgp_on = 1; in rack_set_dgp()
22702 rack->rc_always_pace = 1; in rack_set_dgp()
22703 rack->rc_pace_dnd = 1; in rack_set_dgp()
22704 rack->use_fixed_rate = 0; in rack_set_dgp()
22705 if (rack->gp_ready) in rack_set_dgp()
22707 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22708 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22710 rack->full_size_rxt = 1; in rack_set_dgp()
22711 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22713 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22714 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22715 rack->r_use_cmp_ack) in rack_set_dgp()
22716 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22718 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22720 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22722 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22724 rack->r_rr_config = 3; in rack_set_dgp()
22726 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22728 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22729 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22730 rack->rtt_limit_mul = 0; in rack_set_dgp()
22732 rack->rack_no_prr = 1; in rack_set_dgp()
22734 rack->r_limit_scw = 1; in rack_set_dgp()
22736 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22758 * fill-cw the same settings that profile5 does in rack_set_profile()
22759 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). in rack_set_profile()
22761 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22764 if (rack->rc_always_pace == 1) { in rack_set_profile()
22768 rack->dgp_on = 0; in rack_set_profile()
22769 rack->rc_hybrid_mode = 0; in rack_set_profile()
22770 rack->use_fixed_rate = 0; in rack_set_profile()
22774 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22776 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22779 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22780 rack->rc_always_pace = 1; in rack_set_profile()
22781 if (rack->rack_hibeta) in rack_set_profile()
22784 rack->rc_always_pace = 0; in rack_set_profile()
22787 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22791 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22794 rack->r_use_cmp_ack = 1; in rack_set_profile()
22796 rack->r_use_cmp_ack = 0; in rack_set_profile()
22798 rack->rack_no_prr = 1; in rack_set_profile()
22800 rack->rack_no_prr = 0; in rack_set_profile()
22802 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22804 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22805 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22806 rack->r_mbuf_queue = 1; in rack_set_profile()
22807 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22808 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22809 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22811 rack->r_mbuf_queue = 0; in rack_set_profile()
22812 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22815 rack->rack_enable_scwnd = 1; in rack_set_profile()
22817 rack->rack_enable_scwnd = 0; in rack_set_profile()
22820 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22822 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22824 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22825 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22827 rack->r_rr_config = 0; in rack_set_profile()
22828 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22829 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22830 rack->rtt_limit_mul = 0; in rack_set_profile()
22833 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22835 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22837 rack->rack_no_prr = 1; in rack_set_profile()
22839 rack->rack_no_prr = 0; in rack_set_profile()
22841 rack->r_limit_scw = 1; in rack_set_profile()
22843 rack->r_limit_scw = 0; in rack_set_profile()
22859 * No space yikes -- fail out.. in rack_add_deferred_option()
22863 dol->optname = sopt_name; in rack_add_deferred_option()
22864 dol->optval = loptval; in rack_add_deferred_option()
22865 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22881 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22882 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22883 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22884 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22886 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); in process_hybrid_pacing()
22888 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22890 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22895 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; in process_hybrid_pacing()
22897 seq = sft->start_seq; in process_hybrid_pacing()
22898 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { in process_hybrid_pacing()
22900 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22902 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22907 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22915 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22924 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22927 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22928 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22932 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22937 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22941 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; in process_hybrid_pacing()
22942 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) in process_hybrid_pacing()
22943 sft->cspr = hybrid->cspr; in process_hybrid_pacing()
22945 sft->cspr = 0; in process_hybrid_pacing()
22946 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) in process_hybrid_pacing()
22947 sft->hint_maxseg = hybrid->hint_maxseg; in process_hybrid_pacing()
22949 sft->hint_maxseg = 0; in process_hybrid_pacing()
22950 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22962 si->bytes_transmitted = tp->t_sndbytes; in rack_stack_information()
22963 si->bytes_retransmitted = tp->t_snd_rxt_bytes; in rack_stack_information()
22994 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22996 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22999 rack->rc_rack_use_dsack = 1; in rack_process_option()
23001 rack->rc_rack_use_dsack = 0; in rack_process_option()
23008 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
23011 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
23013 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
23019 rack->rack_hibeta = 1; in rack_process_option()
23025 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
23026 if (rack->rc_pacing_cc_set) in rack_process_option()
23028 rack->r_ctl.rc_saved_beta.beta = optval; in rack_process_option()
23030 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
23033 rack->rack_hibeta = 0; in rack_process_option()
23034 if (rack->rc_pacing_cc_set) in rack_process_option()
23043 rack->r_ctl.timer_slop = optval; in rack_process_option()
23044 if (rack->rc_tp->t_srtt) { in rack_process_option()
23049 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_option()
23051 rack->r_ctl.timer_slop); in rack_process_option()
23056 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_process_option()
23061 if (rack->rc_pacing_cc_set) { in rack_process_option()
23070 if (CC_ALGO(tp)->ctl_output != NULL) in rack_process_option()
23071 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_process_option()
23079 rack->r_ctl.rc_saved_beta.beta_ecn = optval; in rack_process_option()
23080 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; in rack_process_option()
23086 if (rack->gp_ready) { in rack_process_option()
23091 rack->defer_options = 1; in rack_process_option()
23093 rack->defer_options = 0; in rack_process_option()
23098 rack->r_ctl.req_measurements = optval; in rack_process_option()
23105 rack->r_use_labc_for_rec = 1; in rack_process_option()
23107 rack->r_use_labc_for_rec = 0; in rack_process_option()
23112 rack->rc_labc = optval; in rack_process_option()
23119 rack->r_up_only = 1; in rack_process_option()
23121 rack->r_up_only = 0; in rack_process_option()
23125 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
23129 if ((rack->dgp_on == 1) && in rack_process_option()
23130 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23142 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23144 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23146 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
23153 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
23161 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
23163 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
23171 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { in rack_process_option()
23174 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
23175 rack->r_use_cmp_ack = 1; in rack_process_option()
23176 rack->r_mbuf_queue = 1; in rack_process_option()
23177 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23179 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
23180 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_process_option()
23185 rack->r_limit_scw = 1; in rack_process_option()
23187 rack->r_limit_scw = 0; in rack_process_option()
23195 rack->rc_pace_to_cwnd = 0; in rack_process_option()
23197 rack->rc_pace_to_cwnd = 1; in rack_process_option()
23202 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
23203 rack->rtt_limit_mul = optval; in rack_process_option()
23205 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
23206 rack->rtt_limit_mul = 0; in rack_process_option()
23212 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
23214 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
23221 rack->rack_enable_scwnd = 0; in rack_process_option()
23223 rack->rack_enable_scwnd = 1; in rack_process_option()
23226 /* Now do we use the LRO mbuf-queue feature */ in rack_process_option()
23228 if (optval || rack->r_use_cmp_ack) in rack_process_option()
23229 rack->r_mbuf_queue = 1; in rack_process_option()
23231 rack->r_mbuf_queue = 0; in rack_process_option()
23232 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23233 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23235 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23240 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
23242 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
23247 rack->rack_no_prr = 0; in rack_process_option()
23249 rack->rack_no_prr = 1; in rack_process_option()
23251 rack->no_prr_addback = 1; in rack_process_option()
23257 rack->cspr_is_fcc = 1; in rack_process_option()
23259 rack->cspr_is_fcc = 0; in rack_process_option()
23264 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23266 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23272 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23285 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23290 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23299 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23304 if (rack->rc_always_pace) { in rack_process_option()
23308 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23309 rack->rc_always_pace = 1; in rack_process_option()
23310 if (rack->rack_hibeta) in rack_process_option()
23318 if (rack->rc_always_pace == 1) { in rack_process_option()
23322 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23323 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23325 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23335 rack->r_ctl.init_rate = val; in rack_process_option()
23336 if (rack->rc_always_pace) in rack_process_option()
23345 rack->rc_force_max_seg = 1; in rack_process_option()
23347 rack->rc_force_max_seg = 0; in rack_process_option()
23351 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23357 if ((rack->dgp_on == 1) && in rack_process_option()
23358 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23360 * If we set a max-seg and are doing DGP then in rack_process_option()
23371 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23373 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23376 rack->rc_user_set_max_segs = optval; in rack_process_option()
23378 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23384 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23388 if (rack->dgp_on) { in rack_process_option()
23396 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23397 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23398 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23399 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23400 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23401 rack->use_fixed_rate = 1; in rack_process_option()
23402 if (rack->rack_hibeta) in rack_process_option()
23405 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23406 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23407 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23414 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23418 if (rack->dgp_on) { in rack_process_option()
23426 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23427 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23428 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23429 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23430 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23431 rack->use_fixed_rate = 1; in rack_process_option()
23432 if (rack->rack_hibeta) in rack_process_option()
23435 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23436 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23437 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23444 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23448 if (rack->dgp_on) { in rack_process_option()
23456 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23457 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23458 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23459 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23460 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23461 rack->use_fixed_rate = 1; in rack_process_option()
23462 if (rack->rack_hibeta) in rack_process_option()
23465 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23466 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23467 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23472 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23474 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23475 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23476 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23490 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23492 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23493 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23494 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23508 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23510 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23511 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23512 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23518 rack->r_rr_config = optval; in rack_process_option()
23520 rack->r_rr_config = 0; in rack_process_option()
23524 rack->rc_pace_dnd = 1; in rack_process_option()
23526 rack->rc_pace_dnd = 0; in rack_process_option()
23531 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23532 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23536 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23543 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23545 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23550 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23552 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23554 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23557 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23559 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23566 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23570 rack->rc_initial_ss_comp = 1; in rack_process_option()
23571 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23576 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23581 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23582 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23583 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23587 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23589 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23590 rack->rack_hdrw_pacing = 0; in rack_process_option()
23591 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23592 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23593 rack->r_ctl.crte = NULL; in rack_process_option()
23602 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23605 /* Minimum time between rack t-o's in ms */ in rack_process_option()
23607 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23612 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23617 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_process_option()
23619 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_process_option()
23621 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_process_option()
23623 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_process_option()
23631 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23638 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23644 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23651 rack->use_rack_rr = 1; in rack_process_option()
23653 rack->use_rack_rr = 0; in rack_process_option()
23656 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_process_option()
23658 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23663 tp->t_delayed_ack = 0; in rack_process_option()
23665 tp->t_delayed_ack = 1; in rack_process_option()
23666 if (tp->t_flags & TF_DELACK) { in rack_process_option()
23667 tp->t_flags &= ~TF_DELACK; in rack_process_option()
23668 tp->t_flags |= TF_ACKNOW; in rack_process_option()
23682 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23687 rack->r_use_hpts_min = 1; in rack_process_option()
23689 * Must be between 2 - 80% to be a reduction else in rack_process_option()
23693 rack->r_ctl.max_reduction = optval; in rack_process_option()
23696 rack->r_use_hpts_min = 0; in rack_process_option()
23701 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23703 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23708 rack->rc_skip_timely = 1; in rack_process_option()
23709 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23710 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23711 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23713 rack->rc_skip_timely = 0; in rack_process_option()
23718 rack->use_lesser_lt_bw = 0; in rack_process_option()
23719 rack->dis_lt_bw = 1; in rack_process_option()
23721 rack->use_lesser_lt_bw = 1; in rack_process_option()
23722 rack->dis_lt_bw = 0; in rack_process_option()
23724 rack->use_lesser_lt_bw = 0; in rack_process_option()
23725 rack->dis_lt_bw = 0; in rack_process_option()
23731 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23733 rack->rc_allow_data_af_clo = 0; in rack_process_option()
23748 * apply a read-lock to the parent (we are already in rack_inherit()
23759 if (par->t_fb != tp->t_fb) { in rack_inherit()
23765 dest = (struct tcp_rack *)tp->t_fb_ptr; in rack_inherit()
23766 src = (struct tcp_rack *)par->t_fb_ptr; in rack_inherit()
23772 /* Now copy out anything we wish to inherit i.e. things in socket-options */ in rack_inherit()
23774 if ((src->dgp_on) && (dest->dgp_on == 0)) { in rack_inherit()
23780 if (dest->full_size_rxt != src->full_size_rxt) { in rack_inherit()
23781 dest->full_size_rxt = src->full_size_rxt; in rack_inherit()
23784 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { in rack_inherit()
23785 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; in rack_inherit()
23789 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { in rack_inherit()
23790 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; in rack_inherit()
23793 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { in rack_inherit()
23794 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; in rack_inherit()
23798 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { in rack_inherit()
23799 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; in rack_inherit()
23803 if (src->rack_hibeta != dest->rack_hibeta) { in rack_inherit()
23805 if (src->rack_hibeta) { in rack_inherit()
23806 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; in rack_inherit()
23807 dest->rack_hibeta = 1; in rack_inherit()
23809 dest->rack_hibeta = 0; in rack_inherit()
23813 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { in rack_inherit()
23814 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; in rack_inherit()
23818 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { in rack_inherit()
23819 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; in rack_inherit()
23822 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { in rack_inherit()
23823 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; in rack_inherit()
23828 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { in rack_inherit()
23829 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; in rack_inherit()
23833 if (dest->r_up_only != src->r_up_only) { in rack_inherit()
23834 dest->r_up_only = src->r_up_only; in rack_inherit()
23838 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { in rack_inherit()
23839 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; in rack_inherit()
23843 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { in rack_inherit()
23844 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; in rack_inherit()
23849 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { in rack_inherit()
23850 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; in rack_inherit()
23854 if (dest->r_limit_scw != src->r_limit_scw) { in rack_inherit()
23855 dest->r_limit_scw = src->r_limit_scw; in rack_inherit()
23859 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { in rack_inherit()
23860 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; in rack_inherit()
23863 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { in rack_inherit()
23864 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; in rack_inherit()
23867 if (dest->rtt_limit_mul != src->rtt_limit_mul) { in rack_inherit()
23868 dest->rtt_limit_mul = src->rtt_limit_mul; in rack_inherit()
23872 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { in rack_inherit()
23873 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; in rack_inherit()
23877 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { in rack_inherit()
23878 dest->rack_enable_scwnd = src->rack_enable_scwnd; in rack_inherit()
23882 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { in rack_inherit()
23883 dest->r_use_cmp_ack = src->r_use_cmp_ack; in rack_inherit()
23887 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23888 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23892 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23893 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23896 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { in rack_inherit()
23897 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_inherit()
23899 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_inherit()
23901 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_inherit()
23902 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_inherit()
23905 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { in rack_inherit()
23906 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; in rack_inherit()
23910 if (dest->rack_no_prr != src->rack_no_prr) { in rack_inherit()
23911 dest->rack_no_prr = src->rack_no_prr; in rack_inherit()
23914 if (dest->no_prr_addback != src->no_prr_addback) { in rack_inherit()
23915 dest->no_prr_addback = src->no_prr_addback; in rack_inherit()
23919 if (dest->cspr_is_fcc != src->cspr_is_fcc) { in rack_inherit()
23920 dest->cspr_is_fcc = src->cspr_is_fcc; in rack_inherit()
23924 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { in rack_inherit()
23925 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; in rack_inherit()
23928 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23929 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23933 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { in rack_inherit()
23934 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; in rack_inherit()
23939 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { in rack_inherit()
23940 dest->r_ctl.init_rate = src->r_ctl.init_rate; in rack_inherit()
23944 if (dest->rc_force_max_seg != src->rc_force_max_seg) { in rack_inherit()
23945 dest->rc_force_max_seg = src->rc_force_max_seg; in rack_inherit()
23949 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { in rack_inherit()
23950 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; in rack_inherit()
23955 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { in rack_inherit()
23956 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; in rack_inherit()
23959 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { in rack_inherit()
23960 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; in rack_inherit()
23963 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { in rack_inherit()
23964 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; in rack_inherit()
23968 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { in rack_inherit()
23969 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; in rack_inherit()
23972 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23973 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23977 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { in rack_inherit()
23978 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; in rack_inherit()
23982 if (dest->r_rr_config != src->r_rr_config) { in rack_inherit()
23983 dest->r_rr_config = src->r_rr_config; in rack_inherit()
23987 if (dest->rc_pace_dnd != src->rc_pace_dnd) { in rack_inherit()
23988 dest->rc_pace_dnd = src->rc_pace_dnd; in rack_inherit()
23992 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { in rack_inherit()
23993 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; in rack_inherit()
23997 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { in rack_inherit()
23998 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; in rack_inherit()
24001 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { in rack_inherit()
24002 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; in rack_inherit()
24006 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { in rack_inherit()
24007 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; in rack_inherit()
24010 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { in rack_inherit()
24011 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; in rack_inherit()
24014 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { in rack_inherit()
24015 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; in rack_inherit()
24018 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { in rack_inherit()
24019 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; in rack_inherit()
24023 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { in rack_inherit()
24024 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; in rack_inherit()
24027 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { in rack_inherit()
24028 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; in rack_inherit()
24032 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { in rack_inherit()
24033 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; in rack_inherit()
24037 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { in rack_inherit()
24038 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; in rack_inherit()
24042 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { in rack_inherit()
24043 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; in rack_inherit()
24047 if (par->t_ccv.flags != tp->t_ccv.flags) { in rack_inherit()
24049 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_inherit()
24050 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_inherit()
24052 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_inherit()
24054 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_inherit()
24056 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_inherit()
24060 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { in rack_inherit()
24061 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; in rack_inherit()
24065 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { in rack_inherit()
24066 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; in rack_inherit()
24070 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { in rack_inherit()
24071 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; in rack_inherit()
24075 if (dest->use_rack_rr != src->use_rack_rr) { in rack_inherit()
24076 dest->use_rack_rr = src->use_rack_rr; in rack_inherit()
24080 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { in rack_inherit()
24081 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; in rack_inherit()
24086 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { in rack_inherit()
24087 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; in rack_inherit()
24091 if (dest->r_use_hpts_min != src->r_use_hpts_min) { in rack_inherit()
24092 dest->r_use_hpts_min = src->r_use_hpts_min; in rack_inherit()
24095 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { in rack_inherit()
24096 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; in rack_inherit()
24100 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { in rack_inherit()
24101 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; in rack_inherit()
24104 if (dest->rc_skip_timely != src->rc_skip_timely) { in rack_inherit()
24105 dest->rc_skip_timely = src->rc_skip_timely; in rack_inherit()
24109 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { in rack_inherit()
24110 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; in rack_inherit()
24114 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { in rack_inherit()
24115 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; in rack_inherit()
24118 if (dest->dis_lt_bw != src->dis_lt_bw) { in rack_inherit()
24119 dest->dis_lt_bw = src->dis_lt_bw; in rack_inherit()
24132 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
24133 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
24135 s_optval = (uint32_t)dol->optval; in rack_apply_deferred_options()
24136 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
24147 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
24149 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
24151 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
24169 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
24170 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
24172 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
24174 * Pacing timer is up, check if we are ready. in rack_wake_check()
24176 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
24178 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
24180 * A timer is up, check if we are ready. in rack_wake_check()
24182 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
24216 * socket option arguments. When it re-acquires the lock after the copy, it
24232 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
24238 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
24241 switch (sopt->sopt_level) { in rack_set_sockopt()
24244 MPASS(inp->inp_vflag & INP_IPV6PROTO); in rack_set_sockopt()
24245 switch (sopt->sopt_name) { in rack_set_sockopt()
24255 switch (sopt->sopt_name) { in rack_set_sockopt()
24260 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24266 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24274 switch (sopt->sopt_name) { in rack_set_sockopt()
24275 case SO_PEERPRIO: /* SC-URL:bs */ in rack_set_sockopt()
24277 if (inp->inp_socket) { in rack_set_sockopt()
24278 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24286 switch (sopt->sopt_name) { in rack_set_sockopt()
24303 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ in rack_set_sockopt()
24304 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ in rack_set_sockopt()
24364 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_set_sockopt()
24365 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { in rack_set_sockopt()
24368 * We truncate it down to 32 bits for the socket-option trace this in rack_set_sockopt()
24372 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { in rack_set_sockopt()
24382 if (tp->t_fb != &__tcp_rack) { in rack_set_sockopt()
24386 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24387 (sopt->sopt_name != TCP_DEFER_OPTIONS) && in rack_set_sockopt()
24388 (sopt->sopt_name != TCP_HYBRID_PACING) && in rack_set_sockopt()
24389 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && in rack_set_sockopt()
24390 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && in rack_set_sockopt()
24391 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { in rack_set_sockopt()
24393 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24402 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24414 ti->tcpi_state = tp->t_state; in rack_fill_info()
24415 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) in rack_fill_info()
24416 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; in rack_fill_info()
24417 if (tp->t_flags & TF_SACK_PERMIT) in rack_fill_info()
24418 ti->tcpi_options |= TCPI_OPT_SACK; in rack_fill_info()
24419 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { in rack_fill_info()
24420 ti->tcpi_options |= TCPI_OPT_WSCALE; in rack_fill_info()
24421 ti->tcpi_snd_wscale = tp->snd_scale; in rack_fill_info()
24422 ti->tcpi_rcv_wscale = tp->rcv_scale; in rack_fill_info()
24424 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) in rack_fill_info()
24425 ti->tcpi_options |= TCPI_OPT_ECN; in rack_fill_info()
24426 if (tp->t_flags & TF_FASTOPEN) in rack_fill_info()
24427 ti->tcpi_options |= TCPI_OPT_TFO; in rack_fill_info()
24429 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; in rack_fill_info()
24431 ti->tcpi_rtt = tp->t_srtt; in rack_fill_info()
24432 ti->tcpi_rttvar = tp->t_rttvar; in rack_fill_info()
24433 ti->tcpi_rto = tp->t_rxtcur; in rack_fill_info()
24434 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; in rack_fill_info()
24435 ti->tcpi_snd_cwnd = tp->snd_cwnd; in rack_fill_info()
24437 * FreeBSD-specific extension fields for tcp_info. in rack_fill_info()
24439 ti->tcpi_rcv_space = tp->rcv_wnd; in rack_fill_info()
24440 ti->tcpi_rcv_nxt = tp->rcv_nxt; in rack_fill_info()
24441 ti->tcpi_snd_wnd = tp->snd_wnd; in rack_fill_info()
24442 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ in rack_fill_info()
24443 ti->tcpi_snd_nxt = tp->snd_nxt; in rack_fill_info()
24444 ti->tcpi_snd_mss = tp->t_maxseg; in rack_fill_info()
24445 ti->tcpi_rcv_mss = tp->t_maxseg; in rack_fill_info()
24446 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; in rack_fill_info()
24447 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; in rack_fill_info()
24448 ti->tcpi_snd_zerowin = tp->t_sndzerowin; in rack_fill_info()
24449 ti->tcpi_total_tlp = tp->t_sndtlppack; in rack_fill_info()
24450 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; in rack_fill_info()
24451 ti->tcpi_rttmin = tp->t_rttlow; in rack_fill_info()
24453 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); in rack_fill_info()
24456 if (tp->t_flags & TF_TOE) { in rack_fill_info()
24457 ti->tcpi_options |= TCPI_OPT_TOE; in rack_fill_info()
24478 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24483 switch (sopt->sopt_name) { in rack_get_sockopt()
24487 /* Fix up the rtt related fields if needed */ in rack_get_sockopt()
24508 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24510 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24511 optval = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_get_sockopt()
24518 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24519 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; in rack_get_sockopt()
24526 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24529 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24535 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_get_sockopt()
24537 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) in rack_get_sockopt()
24539 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) in rack_get_sockopt()
24550 optval = rack->rack_hibeta; in rack_get_sockopt()
24553 optval = rack->defer_options; in rack_get_sockopt()
24556 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24559 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24562 optval = rack->rc_labc; in rack_get_sockopt()
24565 optval= rack->r_up_only; in rack_get_sockopt()
24568 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24571 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24578 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24585 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24588 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24591 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24594 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24597 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24600 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24602 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24608 if (rack->dis_lt_bw) { in rack_get_sockopt()
24611 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24623 /* Now do we use the LRO mbuf-queue feature */ in rack_get_sockopt()
24624 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24627 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24630 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24637 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24640 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24647 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24650 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24654 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24658 optval = rack->rc_always_pace; in rack_get_sockopt()
24662 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24665 /* Minimum time between rack t-o's in ms */ in rack_get_sockopt()
24666 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24669 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24673 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24677 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24680 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24683 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24685 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24686 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24693 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24697 optval = rack->use_rack_rr; in rack_get_sockopt()
24700 optval = rack->r_rr_config; in rack_get_sockopt()
24703 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24706 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24710 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24713 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_get_sockopt()
24714 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24717 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24720 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24723 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24726 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24729 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24732 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24734 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24737 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24740 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24743 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24746 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24749 optval = tp->t_delayed_ack; in rack_get_sockopt()
24752 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24755 optval = rack->r_limit_scw; in rack_get_sockopt()
24758 if (rack->r_use_hpts_min) in rack_get_sockopt()
24759 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24764 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24767 optval = rack->rc_skip_timely; in rack_get_sockopt()
24770 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()
24778 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_get_sockopt()
24779 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) in rack_get_sockopt()
24790 if (sopt->sopt_dir == SOPT_SET) { in rack_ctloutput()
24792 } else if (sopt->sopt_dir == SOPT_GET) { in rack_ctloutput()
24795 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); in rack_ctloutput()
24866 printf("Failed to register rack module -- err:%d\n", err); in tcp_addrack()