Lines Matching +full:counts +full:- +full:beta +full:- +full:np
1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
162 * - Matt Mathis's Rate Halving which slowly drops
165 * - Yuchung Cheng's RACK TCP (for which its named) that
168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
186 * TCP output is also over-written with a new version since it
191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 * - 60 seconds */
199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto ->…
216 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
252 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/c…
257 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
264 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
294 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
295 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
310 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
312 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top …
313 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bott…
332 * the way fill-cw interacts with timely and caps how much
333 * timely can boost the fill-cw b/w.
339 * probeRTT as well as fixed-rate-pacing.
347 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */
431 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
602 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
603 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
604 if (rack->lt_bw_up) { in rack_get_lt_bw()
607 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
608 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
626 tp = rack->rc_tp; in rack_swap_beta_values()
627 if (tp->t_cc == NULL) { in rack_swap_beta_values()
631 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
632 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_swap_beta_values()
633 /* Not new-reno we can't play games with beta! */ in rack_swap_beta_values()
638 if (CC_ALGO(tp)->ctl_output == NULL) { in rack_swap_beta_values()
639 /* Huh, not using new-reno so no swaps.? */ in rack_swap_beta_values()
647 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
654 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
664 opt.val = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
665 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
671 opt.val = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
672 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
678 rack->r_ctl.rc_saved_beta = old_beta; in rack_swap_beta_values()
679 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; in rack_swap_beta_values()
681 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
686 ptr = ((struct newreno *)tp->t_ccv.cc_data); in rack_swap_beta_values()
689 log.u_bbr.flex1 = ptr->beta; in rack_swap_beta_values()
690 log.u_bbr.flex2 = ptr->beta_ecn; in rack_swap_beta_values()
691 log.u_bbr.flex3 = ptr->newreno_flags; in rack_swap_beta_values()
692 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
693 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
695 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
697 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
699 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
700 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
710 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
716 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
723 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
729 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
736 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
738 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
740 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
742 rack->rc_always_pace = 0; in rack_remove_pacing()
743 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
744 rack->dgp_on = 0; in rack_remove_pacing()
745 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
746 rack->use_fixed_rate = 0; in rack_remove_pacing()
753 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
759 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
761 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
763 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
766 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
767 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
769 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
770 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
771 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
772 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
774 log.u_bbr.applimited = rsm->r_start; in rack_log_gpset()
775 log.u_bbr.delivered = rsm->r_end; in rack_log_gpset()
776 log.u_bbr.epoch = rsm->r_flags; in rack_log_gpset()
779 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
780 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
781 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
794 if (error || req->newptr == NULL) in sysctl_rack_clear()
917 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); in rack_init_sysctls()
922 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); in rack_init_sysctls()
967 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); in rack_init_sysctls()
972 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); in rack_init_sysctls()
997 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); in rack_init_sysctls()
1007 "Do we clear I/S counts on exiting probe-rtt"); in rack_init_sysctls()
1017 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); in rack_init_sysctls()
1191 "If we fall below this rate, dis-engage hw pacing?"); in rack_init_sysctls()
1233 "Rack timely Beta value 80 = .8 (scaled by 100)"); in rack_init_sysctls()
1332 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); in rack_init_sysctls()
1352 "Should we always send the oldest TLP and RACK-TLP"); in rack_init_sysctls()
1390 "When doing recovery -> rto -> recovery do we reset SSthresh?"); in rack_init_sysctls()
1425 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); in rack_init_sysctls()
1430 "Maximum RTO in microseconds -- should be at least as large as min_rto"); in rack_init_sysctls()
1452 "Does a cwnd just-return end the measurement window (app limited)"); in rack_init_sysctls()
1457 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); in rack_init_sysctls()
1514 "Should RACK use mbuf queuing for non-paced connections"); in rack_init_sysctls()
1553 "Do we ue a high beta (80 instead of 50)?"); in rack_init_sysctls()
1558 … "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); in rack_init_sysctls()
1642 "Highest move to non-move ratio seen"); in rack_init_sysctls()
1783 "Total number of times a sends returned enobuf for non-hdwr paced connections"); in rack_init_sysctls()
1980 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
1987 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
1988 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
1989 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
1990 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
1992 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
2014 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2022 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2024 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
2049 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
2051 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
2052 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
2054 /* Use the last known rtt i.e. the rack-rtt */ in rack_log_hybrid_bw()
2055 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
2060 log.u_bbr.cur_del_rate = cur->deadline; in rack_log_hybrid_bw()
2063 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2064 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2065 log.u_bbr.flex6 = cur->start_seq; in rack_log_hybrid_bw()
2066 log.u_bbr.pkts_out = cur->end_seq; in rack_log_hybrid_bw()
2069 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2070 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2072 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_bw()
2073 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2076 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); in rack_log_hybrid_bw()
2077 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2079 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_bw()
2080 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2082 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
2086 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
2087 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
2088 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; in rack_log_hybrid_bw()
2100 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
2102 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
2104 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
2106 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
2108 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
2109 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
2110 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
2122 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
2131 log.u_bbr.delRate = cur->sent_at_fs; in rack_log_hybrid_sends()
2133 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { in rack_log_hybrid_sends()
2137 * current byte counts are correct. in rack_log_hybrid_sends()
2139 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2140 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2146 log.u_bbr.cur_del_rate = cur->sent_at_ls; in rack_log_hybrid_sends()
2147 log.u_bbr.rttProp = cur->rxt_at_ls; in rack_log_hybrid_sends()
2149 log.u_bbr.bw_inuse = cur->rxt_at_fs; in rack_log_hybrid_sends()
2151 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2154 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_sends()
2155 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2157 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_sends()
2158 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2161 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_sends()
2162 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2164 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); in rack_log_hybrid_sends()
2165 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2167 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid_sends()
2168 log.u_bbr.lost = cur->playout_ms; in rack_log_hybrid_sends()
2169 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid_sends()
2172 * where a false retransmit occurred so first_send <-> lastsend may in rack_log_hybrid_sends()
2175 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2176 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2184 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2186 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2188 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2190 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2193 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2194 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2195 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2208 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2209 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2220 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2235 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2238 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2239 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2247 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2250 if (timenow >= ent->deadline) { in rack_rate_cap_bw()
2252 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2254 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2258 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2261 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2263 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2272 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_rate_cap_bw()
2273 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2274 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2277 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2279 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2288 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2289 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2292 if (lengone < (ent->end - ent->start)) in rack_rate_cap_bw()
2293 lenleft = (ent->end - ent->start) - lengone; in rack_rate_cap_bw()
2296 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2298 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2304 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2306 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2316 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2317 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2319 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2320 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2323 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2324 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2325 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2326 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2328 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2331 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2332 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); in rack_rate_cap_bw()
2340 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2342 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2343 rack->rc_catch_up && in rack_rate_cap_bw()
2344 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2345 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2347 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2348 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2351 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2352 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2353 …rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2354 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2358 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2359 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2370 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2384 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2390 * No goodput bw but a long-term b/w does exist in rack_get_gp_est()
2396 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2397 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2400 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2408 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2409 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2416 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2418 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2421 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2423 if (rack->dis_lt_bw) { in rack_get_gp_est()
2424 /* We are not using lt-bw */ in rack_get_gp_est()
2431 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2433 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2465 if (rack->use_fixed_rate) { in rack_get_bw()
2476 if (rack->use_fixed_rate) { in rack_get_output_gain()
2478 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2479 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2480 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2481 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2484 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2485 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2488 } else if (rack->rack_no_prr && in rack_get_output_gain()
2489 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2494 * Here we may have a non-retransmit but we in rack_get_output_gain()
2498 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2503 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2504 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2506 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2514 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. in rack_log_dsack_event()
2521 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2526 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2528 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2530 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2531 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2532 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2536 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2539 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2540 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2541 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2542 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2543 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2554 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2563 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2564 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2565 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2566 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2567 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2580 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2581 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2583 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2584 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2585 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2586 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2587 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2590 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2591 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2592 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2593 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2594 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2595 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2616 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2618 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2620 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2625 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2635 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2636 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2637 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2638 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2639 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2647 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2665 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2672 * 1 - We are retransmitting and this tells the reason. in rack_log_retran_reason()
2673 * 2 - We are clearing a dup-ack count. in rack_log_retran_reason()
2674 * 3 - We are incrementing a dup-ack count. in rack_log_retran_reason()
2684 log.u_bbr.flex3 = rsm->r_flags; in rack_log_retran_reason()
2685 log.u_bbr.flex4 = rsm->r_dupack; in rack_log_retran_reason()
2686 log.u_bbr.flex5 = rsm->r_start; in rack_log_retran_reason()
2687 log.u_bbr.flex6 = rsm->r_end; in rack_log_retran_reason()
2689 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2691 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2692 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2693 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2694 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2695 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2696 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2697 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2698 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2699 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2708 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2713 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2715 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2717 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2718 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2719 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2721 if (rack->rack_no_prr) in rack_log_to_start()
2724 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2725 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2727 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2728 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2729 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2730 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2731 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2732 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2733 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2735 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2736 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2738 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2739 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2740 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2741 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2742 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2751 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2756 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2758 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2759 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2763 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; in rack_log_to_event()
2764 if (rack->rack_no_prr) in rack_log_to_event()
2767 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2769 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2770 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2771 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2772 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2773 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2775 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2776 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2777 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2778 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2791 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2797 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2803 log.u_bbr.flex1 = prev->r_start; in rack_log_map_chg()
2804 log.u_bbr.flex2 = prev->r_end; in rack_log_map_chg()
2808 log.u_bbr.flex3 = rsm->r_start; in rack_log_map_chg()
2809 log.u_bbr.flex4 = rsm->r_end; in rack_log_map_chg()
2813 log.u_bbr.flex5 = next->r_start; in rack_log_map_chg()
2814 log.u_bbr.flex6 = next->r_end; in rack_log_map_chg()
2820 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2821 if (rack->rack_no_prr) in rack_log_map_chg()
2824 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2825 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2827 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2828 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2829 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2830 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2844 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2847 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2848 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2849 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2850 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2852 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2853 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2855 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2856 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2859 log.u_bbr.pkt_epoch = rsm->r_start; in rack_log_rtt_upd()
2860 log.u_bbr.lost = rsm->r_end; in rack_log_rtt_upd()
2861 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; in rack_log_rtt_upd()
2863 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; in rack_log_rtt_upd()
2866 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2872 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2874 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2876 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2878 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2880 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2882 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2884 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2886 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2887 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2888 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2889 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2890 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2891 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2892 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2895 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); in rack_log_rtt_upd()
2897 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2898 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2914 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2921 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2924 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2925 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2926 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2927 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2934 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2936 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2940 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2943 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2944 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2946 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2947 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2948 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2949 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2958 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2970 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2972 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2973 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2974 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2975 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
2985 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
2997 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
2999 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
3000 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
3001 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
3002 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
3012 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
3017 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
3020 log.u_bbr.flex3 = tp->t_maxunacktime; in rack_log_progress_event()
3021 log.u_bbr.flex4 = tp->t_acktime; in rack_log_progress_event()
3024 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
3025 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
3026 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
3027 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
3028 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
3030 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
3032 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
3033 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
3042 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
3046 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
3048 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
3051 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
3052 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
3054 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
3055 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
3057 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
3058 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
3059 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
3060 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
3061 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
3062 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
3063 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
3072 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
3080 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
3081 if (rack->rack_no_prr) in rack_log_doseg_done()
3084 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
3086 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
3087 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
3089 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
3091 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
3092 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
3093 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
3095 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
3096 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
3098 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
3099 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
3100 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
3101 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
3102 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
3104 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
3105 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
3106 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
3107 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
3108 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
3109 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
3110 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
3111 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
3120 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
3125 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
3126 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
3129 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
3133 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
3134 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3135 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3136 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3137 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3138 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, in rack_log_type_pacing_sizes()
3139 &tptosocket(tp)->so_snd, in rack_log_type_pacing_sizes()
3148 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3153 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3155 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3157 if (rack->rack_no_prr) in rack_log_type_just_return()
3160 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3162 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3165 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3166 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3167 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3168 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3169 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3170 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3172 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3173 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3174 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3175 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3185 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3189 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3191 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3194 if (rack->rack_no_prr) in rack_log_to_cancel()
3197 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3198 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3201 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3203 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3204 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3205 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3206 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3207 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3209 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3210 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3211 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3212 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3225 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3243 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3244 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3245 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3254 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3261 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3262 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3264 if (rack->rack_no_prr) in rack_log_to_processing()
3267 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3268 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3269 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3270 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3272 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3273 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3274 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3275 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3284 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3289 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3290 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3291 if (rack->rack_no_prr) in rack_log_to_prr()
3294 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3295 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3296 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3297 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3302 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3303 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3305 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3306 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3307 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3308 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3386 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3387 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3388 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3390 rack->rc_free_cnt--; in rack_alloc()
3400 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3408 if (rack->rc_free_cnt) { in rack_alloc()
3410 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3411 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3412 rack->rc_free_cnt--; in rack_alloc()
3422 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3424 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3425 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3441 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3442 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3444 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3445 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3455 rsm->r_limit_type = limit_type; in rack_alloc_limit()
3456 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3470 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3471 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3472 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3473 rack->rc_free_cnt--; in rack_free_trim()
3474 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3482 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_free()
3483 if (rack->r_ctl.rc_app_limited_cnt > 0) { in rack_free()
3484 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3487 if (rsm->r_limit_type) { in rack_free()
3489 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3491 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3492 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; in rack_free()
3493 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3494 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3495 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3497 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3499 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3500 rack->r_ctl.rc_resend = NULL; in rack_free()
3501 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3502 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3503 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3504 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3505 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3506 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3509 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3512 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3513 rack->rc_free_cnt++; in rack_free()
3522 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3524 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3561 srtt = (uint64_t)tp->t_srtt; in rack_get_measure_window()
3603 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_enough_for_measurement()
3607 if ((tp->snd_max == tp->snd_una) || in rack_enough_for_measurement()
3608 (th_ack == tp->snd_max)){ in rack_enough_for_measurement()
3622 if (SEQ_GEQ(th_ack, tp->gput_ack)) { in rack_enough_for_measurement()
3632 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3633 if (SEQ_LT(th_ack, tp->gput_ack) && in rack_enough_for_measurement()
3634 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3638 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3639 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3648 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3649 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3650 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3669 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3675 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3677 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3679 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3681 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3682 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3683 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3684 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3685 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3686 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3693 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3695 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3696 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3697 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3698 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3704 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3705 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3706 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3707 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3708 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3795 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3797 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3799 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3800 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3802 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3803 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3810 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3811 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3813 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3814 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3825 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3832 * to a new-reno flow. in rack_increase_bw_mul()
3837 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3838 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3845 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3850 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3852 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3853 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3855 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3857 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3861 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3862 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3863 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3864 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3865 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3867 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3868 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3870 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3872 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3876 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3877 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3878 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3879 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3880 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3882 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3884 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3886 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3889 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3890 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3891 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3892 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3893 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3897 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3899 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3900 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3902 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3904 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3905 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3914 /*- in rack_get_decrease()
3916 * new_per = curper * (1 - B * norm_grad) in rack_get_decrease()
3919 * rtt_dif = input var current rtt-diff in rack_get_decrease()
3932 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_get_decrease()
3935 * reduce_by = (1000000 - inverse); in rack_get_decrease()
3941 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_get_decrease()
3944 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3949 perf = curper - 1; in rack_get_decrease()
3959 * result = curper * (1 - (B * ( 1 - ------ )) in rack_decrease_highrtt()
3968 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3970 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_decrease_highrtt()
3971 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - in rack_decrease_highrtt()
3974 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3997 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
3999 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
4001 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
4002 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
4008 rtt_diff *= -1; in rack_decrease_bw_mul()
4011 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
4014 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
4015 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4021 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4022 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
4023 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
4024 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
4026 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4029 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
4030 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4033 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
4036 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
4037 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4043 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4044 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
4045 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
4046 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
4049 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4058 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4063 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
4064 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4066 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
4069 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
4070 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4076 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4077 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
4078 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
4079 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
4081 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4091 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4096 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
4097 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4100 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
4101 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
4103 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
4104 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
4117 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
4123 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
4124 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4125 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
4127 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
4129 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
4131 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
4133 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
4135 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
4136 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
4137 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
4138 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
4142 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
4144 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
4145 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
4146 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
4147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4148 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4149 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4150 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4151 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4152 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4155 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4156 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4157 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4158 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4160 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4172 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4173 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4179 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4190 * counts from there for how long between. But it is in rack_enter_probertt()
4200 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4201 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4204 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4208 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4209 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4217 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4218 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4221 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4222 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4223 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4224 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4225 rack->in_probe_rtt = 1; in rack_enter_probertt()
4226 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4227 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4228 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4230 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4232 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4233 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4243 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4244 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4245 rack->in_probe_rtt = 0; in rack_exit_probertt()
4246 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4247 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4255 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4256 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4258 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4262 * probe-rtt. We probably are not interested in in rack_exit_probertt()
4265 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4271 * We need to mark these as app-limited so we in rack_exit_probertt()
4274 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4275 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_exit_probertt()
4276 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4277 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4284 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4285 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4286 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4288 rsm->r_flags |= RACK_APP_LIMITED; in rack_exit_probertt()
4289 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4301 rack->rc_gp_incr = 0; in rack_exit_probertt()
4302 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4303 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4304 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4307 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4308 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4309 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4311 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4312 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4313 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4319 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4322 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4323 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4336 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4340 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4344 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4349 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4351 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4359 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4366 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4367 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4368 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4375 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4382 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4385 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4387 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4388 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4389 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4391 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4394 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4397 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4400 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4401 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4402 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4403 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4409 /* Check in on probe-rtt */ in rack_check_probe_rtt()
4411 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4412 /* We do not do p-rtt unless we have gp measurements */ in rack_check_probe_rtt()
4415 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4419 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4420 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4426 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4427 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4432 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4437 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4438 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4439 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4441 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4442 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4447 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4448 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4451 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4456 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4458 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4460 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4461 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4466 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4468 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4469 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4470 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4475 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4477 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4478 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4479 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4481 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4486 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4493 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4499 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4500 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4501 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4514 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4515 (rack->use_fixed_rate) || in rack_update_multiplier()
4516 (rack->in_probe_rtt) || in rack_update_multiplier()
4517 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4521 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4524 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4526 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4528 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4530 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4531 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4544 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4563 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4564 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4566 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4567 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4569 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4575 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4576 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4590 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4591 /* We are not incrementing really no-count */ in rack_update_multiplier()
4592 rack->rc_gp_incr = 0; in rack_update_multiplier()
4593 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4613 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4614 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4615 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4616 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4623 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4624 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4625 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4632 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4633 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4635 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4652 rack->rc_gp_incr = 0; in rack_update_multiplier()
4653 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4654 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4659 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4660 /* We are not incrementing really no-count */ in rack_update_multiplier()
4661 rack->rc_gp_incr = 0; in rack_update_multiplier()
4662 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4666 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4667 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4682 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4686 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4690 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4692 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4693 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4696 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4697 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4703 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4726 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4731 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4740 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4741 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4746 * |----------------| in rack_in_gp_window()
4747 * |-----| <or> in rack_in_gp_window()
4748 * |----| in rack_in_gp_window()
4749 * <or> |---| in rack_in_gp_window()
4752 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4753 SEQ_GT(rsm->r_end, tp->gput_seq)){ in rack_in_gp_window()
4756 * |--------------| in rack_in_gp_window()
4757 * |-------->| in rack_in_gp_window()
4760 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4761 SEQ_LT(rsm->r_start, tp->gput_ack) && in rack_in_gp_window()
4762 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4766 * |--------------| in rack_in_gp_window()
4767 * |-------->| in rack_in_gp_window()
4778 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_mark_in_gp_win()
4786 rsm->r_flags |= RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4788 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4797 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4799 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4802 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ in rack_clear_gp_marks()
4803 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_clear_gp_marks()
4804 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4814 if (tp->snd_una == tp->snd_max) { in rack_tend_gp_marks()
4818 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { in rack_tend_gp_marks()
4825 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4828 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) in rack_tend_gp_marks()
4830 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4838 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4846 * *before* we started our measurment. The rsm, if non-null in rack_tend_gp_marks()
4851 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4854 if (SEQ_GT(rsm->r_end, tp->gput_ack)) in rack_tend_gp_marks()
4856 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4863 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4875 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4878 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4879 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4880 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4882 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4896 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4897 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4898 if (TSTMP_GEQ(us_cts, tp->gput_ts)) in rack_do_goodput_measurement()
4899 tim = us_cts - tp->gput_ts; in rack_do_goodput_measurement()
4902 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4903 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4918 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4930 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4964 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4965 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4966 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4967 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_do_goodput_measurement()
4975 bytes = (th_ack - tp->gput_seq); in rack_do_goodput_measurement()
4986 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4995 * IW - 2MSS. in rack_do_goodput_measurement()
4997 reqbytes -= (2 * segsiz); in rack_do_goodput_measurement()
4999 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5001 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
5003 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5011 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
5012 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5014 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
5016 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
5023 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
5024 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
5028 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5029 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
5030 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
5034 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
5045 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
5047 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
5050 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5053 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
5054 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
5055 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
5056 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5059 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5062 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
5063 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
5066 * where we transition from un-paced to paced. in rack_do_goodput_measurement()
5072 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
5073 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
5074 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
5077 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
5079 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
5080 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5081 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5082 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
5084 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5103 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
5104 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5106 srtt = (uint64_t)tp->t_srtt; in rack_do_goodput_measurement()
5111 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
5112 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
5127 * and non-dynamic... but considering lots of folks in rack_do_goodput_measurement()
5132 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
5133 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5135 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5154 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5159 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5160 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5172 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5183 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5187 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5188 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5190 * For probe-rtt we only add it in in rack_do_goodput_measurement()
5196 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5197 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5204 * or first-slowstart that ensues. If we ever needed to watch in rack_do_goodput_measurement()
5208 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5209 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5213 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5219 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5220 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5222 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5227 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5228 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5235 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5236 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5237 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5244 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5245 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5249 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5255 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5257 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5259 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5264 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5265 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5266 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5268 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5272 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5273 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5275 rack->gp_ready = 1; in rack_do_goodput_measurement()
5276 if (rack->dgp_on || in rack_do_goodput_measurement()
5277 rack->rack_hibeta) in rack_do_goodput_measurement()
5279 if (rack->defer_options) in rack_do_goodput_measurement()
5284 /* We do not update any multipliers if we are in or have seen a probe-rtt */ in rack_do_goodput_measurement()
5286 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5287 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5288 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5290 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5291 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5300 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5304 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5306 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5312 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5313 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5314 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5315 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5316 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5324 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, in rack_do_goodput_measurement()
5331 if (tp->t_stats_gput_prev > 0) in rack_do_goodput_measurement()
5332 stats_voi_update_abs_s32(tp->t_stats, in rack_do_goodput_measurement()
5334 ((gput - tp->t_stats_gput_prev) * 100) / in rack_do_goodput_measurement()
5335 tp->t_stats_gput_prev); in rack_do_goodput_measurement()
5337 tp->t_stats_gput_prev = gput; in rack_do_goodput_measurement()
5339 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5344 * We don't do the other case i.e. non-applimited here since in rack_do_goodput_measurement()
5347 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5348 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_goodput_measurement()
5349 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5350 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5351 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5358 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5359 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5360 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5361 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5362 tp->gput_seq = th_ack; in rack_do_goodput_measurement()
5363 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5364 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5365 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5366 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5367 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5368 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5370 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5373 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5374 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_do_goodput_measurement()
5378 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5379 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5384 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_do_goodput_measurement()
5390 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { in rack_do_goodput_measurement()
5395 tp->t_flags |= TF_GPUTINPROG; in rack_do_goodput_measurement()
5397 * Now we need to find the timestamp of the send at tp->gput_seq in rack_do_goodput_measurement()
5400 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5401 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5403 /* Ok send-based limit is set */ in rack_do_goodput_measurement()
5404 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { in rack_do_goodput_measurement()
5411 tp->gput_seq = rsm->r_start; in rack_do_goodput_measurement()
5413 if (rsm->r_flags & RACK_ACKED) { in rack_do_goodput_measurement()
5416 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; in rack_do_goodput_measurement()
5417 tp->gput_seq = rsm->r_end; in rack_do_goodput_measurement()
5418 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5422 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5425 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5427 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5431 * send-limit set the current time, which in rack_do_goodput_measurement()
5432 * basically disables the send-limit. in rack_do_goodput_measurement()
5437 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5441 tp->gput_seq, in rack_do_goodput_measurement()
5442 tp->gput_ack, in rack_do_goodput_measurement()
5444 tp->gput_ts, in rack_do_goodput_measurement()
5445 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5448 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5470 tp->t_ccv.nsegs = nsegs; in rack_ack_received()
5471 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); in rack_ack_received()
5472 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5475 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5476 if (tp->t_ccv.bytes_this_ack > max) { in rack_ack_received()
5477 tp->t_ccv.bytes_this_ack = max; in rack_ack_received()
5481 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, in rack_ack_received()
5482 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5484 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5493 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5494 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5495 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5496 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5497 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5499 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5500 rack->lt_bw_up = 0; in rack_ack_received()
5503 if ((tp->t_flags & TF_GPUTINPROG) && in rack_ack_received()
5509 if (tp->snd_cwnd <= tp->snd_wnd) in rack_ack_received()
5510 tp->t_ccv.flags |= CCF_CWND_LIMITED; in rack_ack_received()
5512 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; in rack_ack_received()
5513 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_ack_received()
5514 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, in rack_ack_received()
5517 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5518 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5519 tp->t_ccv.flags |= CCF_ABC_SENTAWND; in rack_ack_received()
5522 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_ack_received()
5523 tp->t_bytes_acked = 0; in rack_ack_received()
5525 prior_cwnd = tp->snd_cwnd; in rack_ack_received()
5526 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5527 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5528 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5529 labc_to_use = rack->rc_labc; in rack_ack_received()
5532 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5539 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_ack_received()
5540 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_ack_received()
5541 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_ack_received()
5549 if (CC_ALGO(tp)->ack_received != NULL) { in rack_ack_received()
5551 tp->t_ccv.curack = th_ack; in rack_ack_received()
5552 tp->t_ccv.labc = labc_to_use; in rack_ack_received()
5553 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; in rack_ack_received()
5554 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); in rack_ack_received()
5557 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; in rack_ack_received()
5559 if (rack->r_must_retran) { in rack_ack_received()
5560 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5565 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5566 rack->r_must_retran = 0; in rack_ack_received()
5567 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { in rack_ack_received()
5574 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5575 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5577 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5582 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5584 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5585 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5587 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5588 (tp->snd_cwnd >= tp->snd_ssthresh)) { in rack_ack_received()
5593 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5602 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5611 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5612 rack->rack_no_prr) in tcp_rack_partialack()
5613 rack->r_wanted_output = 1; in tcp_rack_partialack()
5622 EXIT_RECOVERY(tp->t_flags); in rack_exit_recovery()
5631 orig_cwnd = tp->snd_cwnd; in rack_post_recovery()
5633 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5635 if (CC_ALGO(tp)->post_recovery != NULL) { in rack_post_recovery()
5636 tp->t_ccv.curack = th_ack; in rack_post_recovery()
5637 CC_ALGO(tp)->post_recovery(&tp->t_ccv); in rack_post_recovery()
5638 if (tp->snd_cwnd < tp->snd_ssthresh) { in rack_post_recovery()
5642 * snd_ssthresh per RFC-6582 (option 2). in rack_post_recovery()
5644 tp->snd_cwnd = tp->snd_ssthresh; in rack_post_recovery()
5647 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5654 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_post_recovery()
5655 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_post_recovery()
5656 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_post_recovery()
5660 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5665 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5666 (rack->no_prr_addback == 0) && in rack_post_recovery()
5667 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5672 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { in rack_post_recovery()
5682 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), in rack_post_recovery()
5683 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5685 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5689 tp->snd_recover = tp->snd_una; in rack_post_recovery()
5690 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5691 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5692 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5693 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5697 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5698 rack->rto_from_rec = 0; in rack_post_recovery()
5699 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5700 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5713 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); in rack_cong_signal()
5715 if (IN_RECOVERY(tp->t_flags) == 0) { in rack_cong_signal()
5717 ssthresh_enter = tp->snd_ssthresh; in rack_cong_signal()
5718 cwnd_enter = tp->snd_cwnd; in rack_cong_signal()
5721 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5724 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5725 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5726 if (!IN_FASTRECOVERY(tp->t_flags)) { in rack_cong_signal()
5727 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ in rack_cong_signal()
5728 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5730 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5732 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5733 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5734 rack->r_fast_output = 0; in rack_cong_signal()
5735 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5736 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5739 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5740 tp->snd_recover = tp->snd_max; in rack_cong_signal()
5741 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5742 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5746 if (!IN_CONGRECOVERY(tp->t_flags) || in rack_cong_signal()
5751 SEQ_GEQ(ack, tp->snd_recover)) { in rack_cong_signal()
5752 EXIT_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5754 rack->r_fast_output = 0; in rack_cong_signal()
5755 tp->snd_recover = tp->snd_max + 1; in rack_cong_signal()
5756 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5757 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5761 tp->t_dupacks = 0; in rack_cong_signal()
5762 tp->t_bytes_acked = 0; in rack_cong_signal()
5763 rack->r_fast_output = 0; in rack_cong_signal()
5764 if (IN_RECOVERY(tp->t_flags)) in rack_cong_signal()
5766 orig_cwnd = tp->snd_cwnd; in rack_cong_signal()
5768 if (CC_ALGO(tp)->cong_signal == NULL) { in rack_cong_signal()
5770 tp->snd_ssthresh = max(2, in rack_cong_signal()
5771 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5773 tp->snd_cwnd = ctf_fixed_maxseg(tp); in rack_cong_signal()
5775 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5776 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5781 tp->snd_cwnd = tp->snd_cwnd_prev; in rack_cong_signal()
5782 tp->snd_ssthresh = tp->snd_ssthresh_prev; in rack_cong_signal()
5783 tp->snd_recover = tp->snd_recover_prev; in rack_cong_signal()
5784 if (tp->t_flags & TF_WASFRECOVERY) { in rack_cong_signal()
5785 ENTER_FASTRECOVERY(tp->t_flags); in rack_cong_signal()
5786 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5788 if (tp->t_flags & TF_WASCRECOVERY) { in rack_cong_signal()
5789 ENTER_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5790 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5792 tp->snd_nxt = tp->snd_max; in rack_cong_signal()
5793 tp->t_badrxtwin = 0; in rack_cong_signal()
5796 if ((CC_ALGO(tp)->cong_signal != NULL) && in rack_cong_signal()
5798 tp->t_ccv.curack = ack; in rack_cong_signal()
5799 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); in rack_cong_signal()
5801 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { in rack_cong_signal()
5803 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5804 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5805 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5806 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5807 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5818 if (CC_ALGO(tp)->after_idle != NULL) in rack_cc_after_idle()
5819 CC_ALGO(tp)->after_idle(&tp->t_ccv); in rack_cc_after_idle()
5821 if (tp->snd_cwnd == 1) in rack_cc_after_idle()
5822 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ in rack_cc_after_idle()
5831 if (tp->snd_cwnd < i_cwnd) { in rack_cc_after_idle()
5832 tp->snd_cwnd = i_cwnd; in rack_cc_after_idle()
5839 * - There is no delayed ack timer in progress.
5840 * - Our last ack wasn't a 0-sized window. We never want to delay
5841 * the ack that opens up a 0-sized window.
5842 * - LRO wasn't used for this segment. We make sure by checking that the
5844 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5848 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5849 ((tp->t_flags & TF_DELACK) == 0) && \
5850 (tlen <= tp->t_maxseg) && \
5851 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5859 * Walk the time-order transmitted list looking for an rsm that is in rack_find_lowest_rsm()
5863 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5864 if (rsm->r_flags & RACK_ACKED) { in rack_find_lowest_rsm()
5885 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5886 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { in rack_find_high_nonack()
5905 * If reorder-fade is configured, then we track the last time we saw in rack_calc_thresh_rack()
5906 * re-ordering occur. If we reach the point where enough time as in rack_calc_thresh_rack()
5909 * Or if reorder-face is 0, then once we see reordering we consider in rack_calc_thresh_rack()
5913 * In the end if lro is non-zero we add the extra time for in rack_calc_thresh_rack()
5918 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5919 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5920 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5921 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5933 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5935 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5945 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5946 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5948 /* Standards based pkt-delay is 1/4 srtt */ in rack_calc_thresh_rack()
5951 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5953 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5954 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5958 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5960 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5965 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
5992 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
5993 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
5998 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
5999 len = rsm->r_end - rsm->r_start; in rack_calc_thresh_tlp()
6000 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
6002 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
6005 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6011 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
6017 * possible inter-packet delay (if any). in rack_calc_thresh_tlp()
6022 idx = rsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6023 nidx = prsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6024 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { in rack_calc_thresh_tlp()
6026 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; in rack_calc_thresh_tlp()
6031 * Possibly compensate for delayed-ack. in rack_calc_thresh_tlp()
6039 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
6044 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6052 if (thresh > tp->t_rxtcur) { in rack_calc_thresh_tlp()
6053 thresh = tp->t_rxtcur; in rack_calc_thresh_tlp()
6077 if (rack->rc_rack_rtt) in rack_grab_rtt()
6078 return (rack->rc_rack_rtt); in rack_grab_rtt()
6079 else if (tp->t_srtt == 0) in rack_grab_rtt()
6081 return (tp->t_srtt); in rack_grab_rtt()
6097 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
6098 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
6101 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
6106 if (rsm->r_flags & RACK_ACKED) { in rack_check_recovery_mode()
6111 idx = rsm->r_rtr_cnt - 1; in rack_check_recovery_mode()
6114 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { in rack_check_recovery_mode()
6117 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { in rack_check_recovery_mode()
6120 /* Ok if we reach here we are over-due and this guy can be sent */ in rack_check_recovery_mode()
6121 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_check_recovery_mode()
6132 t = (tp->t_srtt + (tp->t_rttvar << 2)); in rack_get_persists_timer_val()
6133 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], in rack_get_persists_timer_val()
6134 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
6135 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6155 if (rack->t_timers_stopped) { in rack_timer_start()
6159 if (rack->rc_in_persist) { in rack_timer_start()
6163 rack->rc_on_min_to = 0; in rack_timer_start()
6164 if ((tp->t_state < TCPS_ESTABLISHED) || in rack_timer_start()
6165 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_timer_start()
6168 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6173 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6180 * recently thats the discount we want to use (now - timer time). in rack_timer_start()
6182 * we want to use that (now - oldest-packet-last_transmit_time). in rack_timer_start()
6185 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6186 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6187 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6189 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6191 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6193 if (SEQ_LT(tp->snd_una, tp->snd_max) || in rack_timer_start()
6194 sbavail(&tptosocket(tp)->so_snd)) { in rack_timer_start()
6195 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6196 to = tp->t_rxtcur; in rack_timer_start()
6198 to -= time_since_sent; in rack_timer_start()
6200 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6204 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timer_start()
6209 * of the keep-init timeout. in rack_timer_start()
6214 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { in rack_timer_start()
6215 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); in rack_timer_start()
6217 max_time -= red; in rack_timer_start()
6229 if (rsm->r_flags & RACK_ACKED) { in rack_timer_start()
6237 if ((rsm->r_flags & RACK_SACK_PASSED) || in rack_timer_start()
6238 (rsm->r_flags & RACK_RWND_COLLAPSED) || in rack_timer_start()
6239 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in rack_timer_start()
6240 if ((tp->t_flags & TF_SENTFIN) && in rack_timer_start()
6241 ((tp->snd_max - tp->snd_una) == 1) && in rack_timer_start()
6242 (rsm->r_flags & RACK_HAS_FIN)) { in rack_timer_start()
6249 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6250 (IN_FASTRECOVERY(tp->t_flags)) && in rack_timer_start()
6251 (rack->rack_no_prr == 0) && in rack_timer_start()
6252 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6259 * get to use the rack-cheat. in rack_timer_start()
6265 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6266 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; in rack_timer_start()
6268 to = exp - cts; in rack_timer_start()
6269 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6270 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6271 if (rack->r_rr_config == 3) in rack_timer_start()
6272 rack->rc_on_min_to = 1; in rack_timer_start()
6275 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6276 if (rack->r_rr_config == 3) in rack_timer_start()
6277 rack->rc_on_min_to = 1; in rack_timer_start()
6282 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6283 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6290 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6295 if (rsm->r_flags & RACK_HAS_FIN) { in rack_timer_start()
6300 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6302 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6303 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6305 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6307 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6309 if (tp->t_srtt) { in rack_timer_start()
6310 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6311 (tp->t_srtt == 1)) { in rack_timer_start()
6318 srtt_cur = tp->t_srtt; in rack_timer_start()
6329 tp->t_srtt && in rack_timer_start()
6335 to = thresh - time_since_sent; in rack_timer_start()
6337 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6342 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6343 (uint32_t)rsm->r_tim_lastsent[idx], in rack_timer_start()
6359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6361 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6371 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6372 if (tp->t_flags & TF_GPUTINPROG) { in rack_enter_persist()
6377 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6381 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6382 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6383 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6386 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6387 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6388 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6389 if (rack->lt_bw_up) { in rack_enter_persist()
6393 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6394 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6395 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6396 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6397 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6399 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6400 rack->lt_bw_up = 0; in rack_enter_persist()
6401 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6404 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6405 rack->probe_not_answered = 0; in rack_enter_persist()
6406 rack->forced_ack = 0; in rack_enter_persist()
6407 tp->t_rxtshift = 0; in rack_enter_persist()
6408 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_enter_persist()
6409 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6410 rack->rc_in_persist = 1; in rack_enter_persist()
6417 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6418 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6419 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6422 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6423 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6424 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6427 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6428 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6429 (rack->rc_always_pace)) { in rack_exit_persist()
6431 * Do we count this as if a probe-rtt just in rack_exit_persist()
6436 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6440 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6446 /* Yes, we count it as a probe-rtt. */ in rack_exit_persist()
6450 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6451 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6452 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6453 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6454 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6460 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6462 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6463 rack->lt_bw_up = 1; in rack_exit_persist()
6464 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6466 rack->rc_in_persist = 0; in rack_exit_persist()
6467 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6468 tp->t_rxtshift = 0; in rack_exit_persist()
6469 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_exit_persist()
6470 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6471 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6472 rack->r_early = 0; in rack_exit_persist()
6473 rack->r_late = 0; in rack_exit_persist()
6474 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6481 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6485 log.u_bbr.flex1 = diag->p_nxt_slot; in rack_log_hpts_diag()
6486 log.u_bbr.flex2 = diag->p_cur_slot; in rack_log_hpts_diag()
6487 log.u_bbr.flex3 = diag->slot_req; in rack_log_hpts_diag()
6488 log.u_bbr.flex4 = diag->inp_hptsslot; in rack_log_hpts_diag()
6489 log.u_bbr.flex5 = diag->slot_remaining; in rack_log_hpts_diag()
6490 log.u_bbr.flex6 = diag->need_new_to; in rack_log_hpts_diag()
6491 log.u_bbr.flex7 = diag->p_hpts_active; in rack_log_hpts_diag()
6492 log.u_bbr.flex8 = diag->p_on_min_sleep; in rack_log_hpts_diag()
6494 log.u_bbr.epoch = diag->have_slept; in rack_log_hpts_diag()
6495 log.u_bbr.lt_epoch = diag->yet_to_sleep; in rack_log_hpts_diag()
6496 log.u_bbr.pkts_out = diag->co_ret; in rack_log_hpts_diag()
6497 log.u_bbr.applimited = diag->hpts_sleep_time; in rack_log_hpts_diag()
6498 log.u_bbr.delivered = diag->p_prev_slot; in rack_log_hpts_diag()
6499 log.u_bbr.inflight = diag->p_runningslot; in rack_log_hpts_diag()
6500 log.u_bbr.bw_inuse = diag->wheel_slot; in rack_log_hpts_diag()
6501 log.u_bbr.rttProp = diag->wheel_cts; in rack_log_hpts_diag()
6503 log.u_bbr.delRate = diag->maxslots; in rack_log_hpts_diag()
6504 log.u_bbr.cur_del_rate = diag->p_curtick; in rack_log_hpts_diag()
6506 log.u_bbr.cur_del_rate |= diag->p_lasttick; in rack_log_hpts_diag()
6507 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6508 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6509 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6519 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6524 log.u_bbr.flex1 = sb->sb_flags; in rack_log_wakeup()
6526 log.u_bbr.flex3 = sb->sb_state; in rack_log_wakeup()
6529 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6530 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6531 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6551 if ((tp->t_state == TCPS_CLOSED) || in rack_start_hpts_timer()
6552 (tp->t_state == TCPS_LISTEN)) { in rack_start_hpts_timer()
6559 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6560 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6561 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6563 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6564 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6568 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6576 * by an ack aka the rc_agg_early (non-paced mode). in rack_start_hpts_timer()
6578 slot += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6579 rack->r_early = 0; in rack_start_hpts_timer()
6580 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6582 if ((rack->r_late) && in rack_start_hpts_timer()
6583 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6590 if (rack->r_ctl.rc_agg_delayed >= slot) { in rack_start_hpts_timer()
6599 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); in rack_start_hpts_timer()
6603 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); in rack_start_hpts_timer()
6607 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6608 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6611 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; in rack_start_hpts_timer()
6614 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6615 rack->r_late = 0; in rack_start_hpts_timer()
6617 } else if (rack->r_late) { in rack_start_hpts_timer()
6621 max_red = (slot * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6622 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6623 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6624 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6626 slot -= max_red; in rack_start_hpts_timer()
6627 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6630 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6632 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6645 if (tp->t_flags & TF_DELACK) { in rack_start_hpts_timer()
6647 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6653 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6656 * wheel, we resort to a keep-alive timer if its configured. in rack_start_hpts_timer()
6660 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_start_hpts_timer()
6661 (tp->t_state <= TCPS_CLOSING)) { in rack_start_hpts_timer()
6664 * del-ack), we don't have segments being paced. So in rack_start_hpts_timer()
6667 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_start_hpts_timer()
6668 /* Get the established keep-alive time */ in rack_start_hpts_timer()
6672 * Get the initial setup keep-alive time, in rack_start_hpts_timer()
6680 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6681 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6685 * exit probe-rtt and initiate a keep-alive ack. in rack_start_hpts_timer()
6686 * This will get us out of probe-rtt and update in rack_start_hpts_timer()
6687 * our min-rtt. in rack_start_hpts_timer()
6694 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6700 * keep-alive, delayed_ack we keep track of what was left in rack_start_hpts_timer()
6708 * Hack alert for now we can't time-out over 2,147,483 in rack_start_hpts_timer()
6714 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6717 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6718 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6720 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6740 * TF2_MBUF_QUEUE_READY - This flags says that I am busy in rack_start_hpts_timer()
6745 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction in rack_start_hpts_timer()
6760 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); in rack_start_hpts_timer()
6762 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6763 rack->r_ctl.rc_last_output_to = us_cts + slot; in rack_start_hpts_timer()
6772 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; in rack_start_hpts_timer()
6778 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6779 (IN_RECOVERY(tp->t_flags))) { in rack_start_hpts_timer()
6780 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6781 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6782 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6791 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6794 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6798 * backout the changes (used for non-paced in rack_start_hpts_timer()
6801 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | in rack_start_hpts_timer()
6804 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6805 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6809 * t-o if the t-o does not cause a send. in rack_start_hpts_timer()
6830 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6838 if (SEQ_GT(tp->snd_max, tp->snd_una)) { in rack_start_hpts_timer()
6839 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", in rack_start_hpts_timer()
6844 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6858 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6859 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { in rack_mark_lost()
6860 /* Got up to all that were marked sack-passed */ in rack_mark_lost()
6863 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_mark_lost()
6864 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_mark_lost()
6867 nrsm->r_flags |= RACK_WAS_LOST; in rack_mark_lost()
6868 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6890 * retransmissions, if so we will enter fast-recovery. The output in rack_timeout_rack()
6897 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6899 rack->rc_on_min_to = 0; in rack_timeout_rack()
6905 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6906 rack->r_timer_override = 1; in rack_timeout_rack()
6907 if (rack->use_rack_rr) { in rack_timeout_rack()
6911 * over-ride pacing i.e. rrr takes precedence in rack_timeout_rack()
6916 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6919 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6935 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { in rack_adjust_orig_mlen()
6942 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), in rack_adjust_orig_mlen()
6944 rsm->m, in rack_adjust_orig_mlen()
6946 (intmax_t)M_TRAILINGROOM(rsm->m), in rack_adjust_orig_mlen()
6947 rsm->orig_t_space, in rack_adjust_orig_mlen()
6948 rsm->orig_m_len, in rack_adjust_orig_mlen()
6949 rsm->m->m_len)); in rack_adjust_orig_mlen()
6950 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); in rack_adjust_orig_mlen()
6951 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_orig_mlen()
6953 if (rsm->m->m_len < rsm->orig_m_len) { in rack_adjust_orig_mlen()
6958 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), in rack_adjust_orig_mlen()
6960 rsm->m, rsm->m->m_len, in rack_adjust_orig_mlen()
6961 rsm, rsm->orig_m_len, in rack_adjust_orig_mlen()
6962 rsm->soff)); in rack_adjust_orig_mlen()
6963 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) in rack_adjust_orig_mlen()
6964 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); in rack_adjust_orig_mlen()
6966 rsm->soff = 0; in rack_adjust_orig_mlen()
6967 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_orig_mlen()
6969 } else if (rsm->m->m_len > rsm->orig_m_len) { in rack_adjust_orig_mlen()
6971 rsm, rsm->m); in rack_adjust_orig_mlen()
6982 if (src_rsm->m && in rack_setup_offset_for_rsm()
6983 ((src_rsm->orig_m_len != src_rsm->m->m_len) || in rack_setup_offset_for_rsm()
6984 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { in rack_setup_offset_for_rsm()
6988 m = src_rsm->m; in rack_setup_offset_for_rsm()
6989 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); in rack_setup_offset_for_rsm()
6990 while (soff >= m->m_len) { in rack_setup_offset_for_rsm()
6992 soff -= m->m_len; in rack_setup_offset_for_rsm()
6993 m = m->m_next; in rack_setup_offset_for_rsm()
6999 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7000 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7001 &src_rsm->soff); in rack_setup_offset_for_rsm()
7002 src_rsm->orig_m_len = src_rsm->m->m_len; in rack_setup_offset_for_rsm()
7003 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); in rack_setup_offset_for_rsm()
7004 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7005 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7006 &rsm->soff); in rack_setup_offset_for_rsm()
7007 rsm->orig_m_len = rsm->m->m_len; in rack_setup_offset_for_rsm()
7008 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7012 rsm->m = m; in rack_setup_offset_for_rsm()
7013 rsm->soff = soff; in rack_setup_offset_for_rsm()
7014 rsm->orig_m_len = m->m_len; in rack_setup_offset_for_rsm()
7015 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7024 nrsm->r_start = start; in rack_clone_rsm()
7025 nrsm->r_end = rsm->r_end; in rack_clone_rsm()
7026 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; in rack_clone_rsm()
7027 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; in rack_clone_rsm()
7028 nrsm->r_flags = rsm->r_flags; in rack_clone_rsm()
7029 nrsm->r_dupack = rsm->r_dupack; in rack_clone_rsm()
7030 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; in rack_clone_rsm()
7031 nrsm->r_rtr_bytes = 0; in rack_clone_rsm()
7032 nrsm->r_fas = rsm->r_fas; in rack_clone_rsm()
7033 nrsm->r_bas = rsm->r_bas; in rack_clone_rsm()
7034 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
7035 nrsm->r_just_ret = rsm->r_just_ret; in rack_clone_rsm()
7036 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { in rack_clone_rsm()
7037 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; in rack_clone_rsm()
7040 if (nrsm->r_flags & RACK_HAS_SYN) in rack_clone_rsm()
7041 nrsm->r_flags &= ~RACK_HAS_SYN; in rack_clone_rsm()
7043 if (rsm->r_flags & RACK_HAS_FIN) in rack_clone_rsm()
7044 rsm->r_flags &= ~RACK_HAS_FIN; in rack_clone_rsm()
7046 if (rsm->r_flags & RACK_HAD_PUSH) in rack_clone_rsm()
7047 rsm->r_flags &= ~RACK_HAD_PUSH; in rack_clone_rsm()
7049 nrsm->r_hw_tls = rsm->r_hw_tls; in rack_clone_rsm()
7057 KASSERT(((rsm->m != NULL) || in rack_clone_rsm()
7058 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), in rack_clone_rsm()
7059 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
7060 if (rsm->m) in rack_clone_rsm()
7079 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
7080 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); in rack_merge_rsm()
7081 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
7082 if (l_rsm->r_dupack < r_rsm->r_dupack) in rack_merge_rsm()
7083 l_rsm->r_dupack = r_rsm->r_dupack; in rack_merge_rsm()
7084 if (r_rsm->r_rtr_bytes) in rack_merge_rsm()
7085 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; in rack_merge_rsm()
7086 if (r_rsm->r_in_tmap) { in rack_merge_rsm()
7088 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
7089 r_rsm->r_in_tmap = 0; in rack_merge_rsm()
7093 if (r_rsm->r_flags & RACK_HAS_FIN) in rack_merge_rsm()
7094 l_rsm->r_flags |= RACK_HAS_FIN; in rack_merge_rsm()
7095 if (r_rsm->r_flags & RACK_TLP) in rack_merge_rsm()
7096 l_rsm->r_flags |= RACK_TLP; in rack_merge_rsm()
7097 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) in rack_merge_rsm()
7098 l_rsm->r_flags |= RACK_RWND_COLLAPSED; in rack_merge_rsm()
7099 if ((r_rsm->r_flags & RACK_APP_LIMITED) && in rack_merge_rsm()
7100 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_merge_rsm()
7102 * If both are app-limited then let the in rack_merge_rsm()
7106 l_rsm->r_flags |= RACK_APP_LIMITED; in rack_merge_rsm()
7107 r_rsm->r_flags &= ~RACK_APP_LIMITED; in rack_merge_rsm()
7108 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
7109 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
7111 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
7126 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < in rack_merge_rsm()
7127 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { in rack_merge_rsm()
7128 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; in rack_merge_rsm()
7135 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) in rack_merge_rsm()
7136 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; in rack_merge_rsm()
7138 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { in rack_merge_rsm()
7140 r_rsm->r_limit_type = l_rsm->r_limit_type; in rack_merge_rsm()
7141 l_rsm->r_limit_type = 0; in rack_merge_rsm()
7144 l_rsm->r_flags |= RACK_MERGED; in rack_merge_rsm()
7169 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7175 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_tlp()
7182 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7183 rack->r_might_revert = 0; in rack_timeout_tlp()
7184 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7186 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7188 avail = sbavail(&so->so_snd); in rack_timeout_tlp()
7189 out = tp->snd_max - tp->snd_una; in rack_timeout_tlp()
7190 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7195 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7196 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7197 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7198 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7202 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_tlp()
7203 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7212 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_tlp()
7213 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7214 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7215 tp->gput_seq, in rack_timeout_tlp()
7222 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7227 amm = avail - out; in rack_timeout_tlp()
7230 if ((amm + out) > tp->snd_wnd) { in rack_timeout_tlp()
7238 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_timeout_tlp()
7240 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7241 if (out + amm <= tp->snd_wnd) { in rack_timeout_tlp()
7242 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7243 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7249 /* Set the send-new override */ in rack_timeout_tlp()
7250 if (out + amm <= tp->snd_wnd) in rack_timeout_tlp()
7251 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7255 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7261 * Ok we need to arrange the last un-acked segment to be re-sent, or in rack_timeout_tlp()
7262 * optionally the first un-acked segment. in rack_timeout_tlp()
7266 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7268 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7269 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { in rack_timeout_tlp()
7284 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7285 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7287 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7294 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { in rack_timeout_tlp()
7309 (rsm->r_end - ctf_fixed_maxseg(tp))); in rack_timeout_tlp()
7312 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7314 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7319 if (rsm->r_in_tmap) { in rack_timeout_tlp()
7320 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7321 nrsm->r_in_tmap = 1; in rack_timeout_tlp()
7325 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7329 rack->r_timer_override = 1; in rack_timeout_tlp()
7330 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7333 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7350 tp->t_flags &= ~TF_DELACK; in rack_timeout_delack()
7351 tp->t_flags |= TF_ACKNOW; in rack_timeout_delack()
7353 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7362 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7364 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7365 rack->forced_ack = 1; in rack_send_ack_challange()
7366 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7368 rack->probe_not_answered = 1; in rack_send_ack_challange()
7370 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7371 &t_template->tt_t, (struct mbuf *)NULL, in rack_send_ack_challange()
7372 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7374 /* This does send an ack so kill any D-ack timer */ in rack_send_ack_challange()
7375 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7376 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7396 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7401 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7402 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_persist()
7415 if (tp->t_rxtshift >= V_tcp_retries && in rack_timeout_persist()
7416 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || in rack_timeout_persist()
7417 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { in rack_timeout_persist()
7420 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7421 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7424 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7425 tp->snd_una == tp->snd_max) in rack_timeout_persist()
7427 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7432 if (tp->t_state > TCPS_CLOSE_WAIT && in rack_timeout_persist()
7433 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { in rack_timeout_persist()
7436 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7437 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7442 if (rack->probe_not_answered) { in rack_timeout_persist()
7444 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7449 if (tp->t_rxtshift < V_tcp_retries) in rack_timeout_persist()
7450 tp->t_rxtshift++; in rack_timeout_persist()
7469 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7472 * Keep-alive timer went off; send something or drop connection if in rack_timeout_keepalive()
7476 if (tp->t_state < TCPS_ESTABLISHED) in rack_timeout_keepalive()
7478 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timeout_keepalive()
7479 tp->t_state <= TCPS_CLOSING) { in rack_timeout_keepalive()
7480 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) in rack_timeout_keepalive()
7487 * number tp->snd_una-1 causes the transmitted zero-length in rack_timeout_keepalive()
7500 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_keepalive()
7512 * un-acked. in rack_remxt_tmr()
7517 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7520 rack->r_timer_override = 1; in rack_remxt_tmr()
7521 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7522 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7523 rack->r_late = 0; in rack_remxt_tmr()
7524 rack->r_early = 0; in rack_remxt_tmr()
7525 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7526 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7527 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7529 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { in rack_remxt_tmr()
7532 * more than rack_rxt_scoreboard_clear_thresh time-outs. in rack_remxt_tmr()
7534 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7535 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7536 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7542 * mark SACK-PASS on anything not acked here. in rack_remxt_tmr()
7551 * sacks that come floating in will "re-ack" the data. in rack_remxt_tmr()
7556 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7558 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7559 rsm->r_dupack = 0; in rack_remxt_tmr()
7562 /* We must re-add it back to the tlist */ in rack_remxt_tmr()
7564 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7566 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7568 rsm->r_in_tmap = 1; in rack_remxt_tmr()
7570 if (rsm->r_flags & RACK_ACKED) in rack_remxt_tmr()
7571 rsm->r_flags |= RACK_WAS_ACKED; in rack_remxt_tmr()
7572 …rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_W… in rack_remxt_tmr()
7573 rsm->r_flags |= RACK_MUST_RXT; in rack_remxt_tmr()
7576 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7577 /* Clear the count (we just un-acked them) */ in rack_remxt_tmr()
7578 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7579 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7581 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7582 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7583 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7584 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7586 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7587 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7588 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7589 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && in rack_remxt_tmr()
7590 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_remxt_tmr()
7592 * For non-sack customers new data in rack_remxt_tmr()
7596 rack->r_must_retran = 1; in rack_remxt_tmr()
7597 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7598 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7606 tp->t_rxtcur = RACK_REXMTVAL(tp); in rack_convert_rtts()
7607 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_convert_rtts()
7608 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); in rack_convert_rtts()
7610 if (tp->t_rxtcur > rack_rto_max) { in rack_convert_rtts()
7611 tp->t_rxtcur = rack_rto_max; in rack_convert_rtts()
7621 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7622 srtt = tp->t_srtt; in rack_cc_conn_init()
7628 if ((srtt == 0) && (tp->t_srtt != 0)) in rack_cc_conn_init()
7636 if (tp->snd_ssthresh < tp->snd_wnd) { in rack_cc_conn_init()
7637 tp->snd_ssthresh = tp->snd_wnd; in rack_cc_conn_init()
7643 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7644 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7648 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7659 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_rxt()
7660 (tp->t_rxtshift)) { in rack_timeout_rxt()
7667 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_rxt()
7668 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7669 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7670 tp->gput_seq, in rack_timeout_rxt()
7676 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_rxt()
7678 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7679 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7680 rack->rc_ack_required = 1; in rack_timeout_rxt()
7681 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7682 if (IN_RECOVERY(tp->t_flags) && in rack_timeout_rxt()
7683 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7690 rack->rto_from_rec = 1; in rack_timeout_rxt()
7691 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7693 if (IN_FASTRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7694 tp->t_flags |= TF_WASFRECOVERY; in rack_timeout_rxt()
7696 tp->t_flags &= ~TF_WASFRECOVERY; in rack_timeout_rxt()
7697 if (IN_CONGRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7698 tp->t_flags |= TF_WASCRECOVERY; in rack_timeout_rxt()
7700 tp->t_flags &= ~TF_WASCRECOVERY; in rack_timeout_rxt()
7701 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_timeout_rxt()
7702 (tp->snd_una == tp->snd_max)) { in rack_timeout_rxt()
7706 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7707 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7708 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7709 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7721 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timeout_rxt()
7725 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7728 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && in rack_timeout_rxt()
7729 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { in rack_timeout_rxt()
7741 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7742 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7749 tp->t_rxtshift++; in rack_timeout_rxt()
7752 if (tp->t_rxtshift > V_tcp_retries) { in rack_timeout_rxt()
7755 tp->t_rxtshift = V_tcp_retries; in rack_timeout_rxt()
7758 MPASS(tp->t_softerror >= 0); in rack_timeout_rxt()
7759 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; in rack_timeout_rxt()
7762 if (tp->t_state == TCPS_SYN_SENT) { in rack_timeout_rxt()
7767 tp->snd_cwnd = 1; in rack_timeout_rxt()
7768 } else if (tp->t_rxtshift == 1) { in rack_timeout_rxt()
7775 * End-to-End Network Path Properties" by Allman and Paxson in rack_timeout_rxt()
7778 tp->snd_cwnd_prev = tp->snd_cwnd; in rack_timeout_rxt()
7779 tp->snd_ssthresh_prev = tp->snd_ssthresh; in rack_timeout_rxt()
7780 tp->snd_recover_prev = tp->snd_recover; in rack_timeout_rxt()
7781 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); in rack_timeout_rxt()
7782 tp->t_flags |= TF_PREVVALID; in rack_timeout_rxt()
7783 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) in rack_timeout_rxt()
7784 tp->t_flags &= ~TF_PREVVALID; in rack_timeout_rxt()
7786 if ((tp->t_state == TCPS_SYN_SENT) || in rack_timeout_rxt()
7787 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_timeout_rxt()
7788 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7790 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7792 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, in rack_timeout_rxt()
7793 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7802 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; in rack_timeout_rxt()
7809 ((tp->t_state == TCPS_ESTABLISHED) || in rack_timeout_rxt()
7810 (tp->t_state == TCPS_FIN_WAIT_1))) { in rack_timeout_rxt()
7813 * 1448 -> 1188 -> 524) should be given 2 chances to recover in rack_timeout_rxt()
7814 * before further clamping down. 'tp->t_rxtshift % 2 == 0' in rack_timeout_rxt()
7817 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == in rack_timeout_rxt()
7819 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && in rack_timeout_rxt()
7820 tp->t_rxtshift % 2 == 0)) { in rack_timeout_rxt()
7822 * Enter Path MTU Black-hole Detection mechanism: - in rack_timeout_rxt()
7823 * Disable Path MTU Discovery (IP "DF" bit). - in rack_timeout_rxt()
7827 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { in rack_timeout_rxt()
7829 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7831 tp->t_pmtud_saved_maxseg = tp->t_maxseg; in rack_timeout_rxt()
7840 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { in rack_timeout_rxt()
7842 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; in rack_timeout_rxt()
7846 tp->t_maxseg = V_tcp_v6mssdflt; in rack_timeout_rxt()
7851 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7859 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { in rack_timeout_rxt()
7861 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; in rack_timeout_rxt()
7865 tp->t_maxseg = V_tcp_mssdflt; in rack_timeout_rxt()
7870 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7883 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && in rack_timeout_rxt()
7884 (tp->t_rxtshift >= 6)) { in rack_timeout_rxt()
7885 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7886 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7887 tp->t_maxseg = tp->t_pmtud_saved_maxseg; in rack_timeout_rxt()
7888 if (tp->t_maxseg < V_tcp_mssdflt) { in rack_timeout_rxt()
7894 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7896 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7904 * our third SYN to work-around some broken terminal servers in rack_timeout_rxt()
7907 * unknown-to-them TCP options. in rack_timeout_rxt()
7909 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && in rack_timeout_rxt()
7910 (tp->t_rxtshift == 3)) in rack_timeout_rxt()
7911 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); in rack_timeout_rxt()
7918 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in rack_timeout_rxt()
7920 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_timeout_rxt()
7925 tp->t_rttvar += tp->t_srtt; in rack_timeout_rxt()
7926 tp->t_srtt = 0; in rack_timeout_rxt()
7928 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7929 tp->snd_recover = tp->snd_max; in rack_timeout_rxt()
7930 tp->t_flags |= TF_ACKNOW; in rack_timeout_rxt()
7931 tp->t_rtttime = 0; in rack_timeout_rxt()
7932 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); in rack_timeout_rxt()
7941 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7943 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_timers()
7944 (tp->t_flags & TF_GPUTINPROG)) { in rack_process_timers()
7953 bytes = tp->gput_ack - tp->gput_seq; in rack_process_timers()
7954 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_process_timers()
7955 bytes += tp->gput_seq - tp->snd_una; in rack_process_timers()
7956 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_process_timers()
7962 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7963 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7964 tp->gput_seq, in rack_process_timers()
7966 tp->t_flags &= ~TF_GPUTINPROG; in rack_process_timers()
7972 if (tp->t_state == TCPS_LISTEN) { in rack_process_timers()
7974 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
7979 rack->rc_on_min_to) { in rack_process_timers()
7982 * are on a min-timeout (which means rrr_conf = 3) in rack_process_timers()
7987 * If its on a normal rack timer (non-min) then in rack_process_timers()
7992 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
7995 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
7996 ret = -1; in rack_process_timers()
8007 ret = -2; in rack_process_timers()
8014 * no-sack wakeup on since we no longer have a PKT_OUTPUT in rack_process_timers()
8017 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
8018 ret = -3; in rack_process_timers()
8019 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
8025 rack->rc_tmr_stopped = 0; in rack_process_timers()
8026 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
8030 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8031 rack->r_fast_output = 0; in rack_process_timers()
8034 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8035 rack->r_fast_output = 0; in rack_process_timers()
8038 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8039 rack->r_fast_output = 0; in rack_process_timers()
8057 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
8059 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
8060 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
8061 ((tp->snd_max - tp->snd_una) == 0))) { in rack_timer_cancel()
8062 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8065 if ((tp->snd_max - tp->snd_una) == 0) in rack_timer_cancel()
8066 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
8069 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
8070 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
8071 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
8072 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
8078 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8081 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
8092 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
8093 rack->t_timers_stopped = 1; in rack_stopall()
8108 rack->rc_in_persist = 1; in rack_stop_all_timers()
8110 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
8111 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
8121 rsm->r_rtr_cnt++; in rack_update_rsm()
8122 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { in rack_update_rsm()
8123 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; in rack_update_rsm()
8124 rsm->r_flags |= RACK_OVERMAX; in rack_update_rsm()
8126 rsm->r_act_rxt_cnt++; in rack_update_rsm()
8129 rsm->r_dupack = 0; in rack_update_rsm()
8130 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { in rack_update_rsm()
8131 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8132 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8134 if (rsm->r_flags & RACK_WAS_LOST) { in rack_update_rsm()
8140 rsm->r_flags &= ~RACK_WAS_LOST; in rack_update_rsm()
8141 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_update_rsm()
8143 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_update_rsm()
8144 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_update_rsm()
8146 rack->r_ctl.rc_considered_lost = 0; in rack_update_rsm()
8148 idx = rsm->r_rtr_cnt - 1; in rack_update_rsm()
8149 rsm->r_tim_lastsent[idx] = ts; in rack_update_rsm()
8152 * in snduna <->snd_max. in rack_update_rsm()
8154 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8155 rack->r_ctl.rc_sacked); in rack_update_rsm()
8156 if (rsm->r_flags & RACK_ACKED) { in rack_update_rsm()
8158 rsm->r_flags &= ~RACK_ACKED; in rack_update_rsm()
8159 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8161 if (rsm->r_in_tmap) { in rack_update_rsm()
8162 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8163 rsm->r_in_tmap = 0; in rack_update_rsm()
8167 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8168 rsm->r_in_tmap = 1; in rack_update_rsm()
8169 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); in rack_update_rsm()
8171 if (rsm->r_flags & RACK_MUST_RXT) { in rack_update_rsm()
8172 if (rack->r_must_retran) in rack_update_rsm()
8173 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8174 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8179 rack->r_must_retran = 0; in rack_update_rsm()
8180 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8182 rsm->r_flags &= ~RACK_MUST_RXT; in rack_update_rsm()
8185 rsm->r_flags &= ~RACK_RWND_COLLAPSED; in rack_update_rsm()
8186 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_update_rsm()
8188 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_update_rsm()
8189 rsm->r_flags |= RACK_WAS_SACKPASS; in rack_update_rsm()
8198 * We (re-)transmitted starting at rsm->r_start for some length in rack_update_entry()
8207 c_end = rsm->r_start + len; in rack_update_entry()
8208 if (SEQ_GEQ(c_end, rsm->r_end)) { in rack_update_entry()
8214 if (c_end == rsm->r_end) { in rack_update_entry()
8221 act_len = rsm->r_end - rsm->r_start; in rack_update_entry()
8222 *lenp = (len - act_len); in rack_update_entry()
8223 return (rsm->r_end); in rack_update_entry()
8247 nrsm->r_dupack = 0; in rack_update_entry()
8250 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8252 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8257 if (rsm->r_in_tmap) { in rack_update_entry()
8258 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8259 nrsm->r_in_tmap = 1; in rack_update_entry()
8261 rsm->r_flags &= (~RACK_HAS_FIN); in rack_update_entry()
8299 * -- i.e. return if err != 0 or should we pretend we sent it? -- in rack_log_output()
8305 * We don't log errors -- we could but snd_max does not in rack_log_output()
8317 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8318 snd_una = tp->snd_una; in rack_log_output()
8319 snd_max = tp->snd_max; in rack_log_output()
8327 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) in rack_log_output()
8333 /* Are sending an old segment to induce an ack (keep-alive)? */ in rack_log_output()
8343 len = end - seq_out; in rack_log_output()
8351 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_log_output()
8352 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8368 rsm->r_flags = RACK_HAS_FIN|add_flag; in rack_log_output()
8370 rsm->r_flags = add_flag; in rack_log_output()
8373 rsm->r_hw_tls = 1; in rack_log_output()
8374 rsm->r_tim_lastsent[0] = cts; in rack_log_output()
8375 rsm->r_rtr_cnt = 1; in rack_log_output()
8376 rsm->r_act_rxt_cnt = 0; in rack_log_output()
8377 rsm->r_rtr_bytes = 0; in rack_log_output()
8380 rsm->r_flags |= RACK_HAS_SYN; in rack_log_output()
8382 rsm->r_start = seq_out; in rack_log_output()
8383 rsm->r_end = rsm->r_start + len; in rack_log_output()
8385 rsm->r_dupack = 0; in rack_log_output()
8391 rsm->m = s_mb; in rack_log_output()
8392 rsm->soff = s_moff; in rack_log_output()
8395 * reflected in in snduna <->snd_max in rack_log_output()
8397 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8398 rack->r_ctl.rc_sacked) + in rack_log_output()
8399 (rsm->r_end - rsm->r_start)); in rack_log_output()
8400 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8401 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8402 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8404 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ in rack_log_output()
8405 if (rsm->m) { in rack_log_output()
8406 if (rsm->m->m_len <= rsm->soff) { in rack_log_output()
8412 * within rsm->m. But if the sbsndptr was in rack_log_output()
8418 lm = rsm->m; in rack_log_output()
8419 while (lm->m_len <= rsm->soff) { in rack_log_output()
8420 rsm->soff -= lm->m_len; in rack_log_output()
8421 lm = lm->m_next; in rack_log_output()
8422 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", in rack_log_output()
8423 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8425 rsm->m = lm; in rack_log_output()
8427 rsm->orig_m_len = rsm->m->m_len; in rack_log_output()
8428 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_log_output()
8430 rsm->orig_m_len = 0; in rack_log_output()
8431 rsm->orig_t_space = 0; in rack_log_output()
8433 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); in rack_log_output()
8438 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8440 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8445 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8446 rsm->r_in_tmap = 1; in rack_log_output()
8447 if (rsm->r_flags & RACK_IS_PCM) { in rack_log_output()
8448 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8449 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8451 if (rack->pcm_in_progress == 0) in rack_log_output()
8452 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8460 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_log_output()
8461 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8464 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8466 prsm->r_one_out_nr = 1; in rack_log_output()
8474 if (hintrsm && (hintrsm->r_start == seq_out)) { in rack_log_output()
8481 if ((rsm) && (rsm->r_start == seq_out)) { in rack_log_output()
8491 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8493 if (rsm->r_start == seq_out) { in rack_log_output()
8501 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { in rack_log_output()
8519 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8521 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8526 if (rsm->r_in_tmap) { in rack_log_output()
8527 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8528 nrsm->r_in_tmap = 1; in rack_log_output()
8530 rsm->r_flags &= (~RACK_HAS_FIN); in rack_log_output()
8542 if (seq_out == tp->snd_max) { in rack_log_output()
8544 } else if (SEQ_LT(seq_out, tp->snd_max)) { in rack_log_output()
8546 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", in rack_log_output()
8547 seq_out, len, tp->snd_una, tp->snd_max); in rack_log_output()
8549 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8551 rsm, rsm->r_start, rsm->r_end); in rack_log_output()
8560 * Hmm beyond sndmax? (only if we are using the new rtt-pack in rack_log_output()
8564 seq_out, len, tp->snd_max, tp); in rack_log_output()
8578 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8579 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8580 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8582 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8583 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8584 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8586 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8587 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8588 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8589 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8590 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8594 (rsm->r_just_ret) || in tcp_rack_xmit_timer()
8595 (rsm->r_one_out_nr && in tcp_rack_xmit_timer()
8596 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8603 * the r_one_out_nr. If it was a CUM-ACK and in tcp_rack_xmit_timer()
8610 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8611 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8612 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8617 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8618 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8619 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8628 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8629 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8630 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8633 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8634 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8635 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8636 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8640 * Collect new round-trip time estimate
8649 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8652 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8654 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8655 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8657 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8658 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8660 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8661 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8664 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8670 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8675 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8676 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8677 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8679 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8680 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8682 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8687 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8688 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8690 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8696 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8697 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8698 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8699 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8701 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8705 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8706 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8711 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8713 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8714 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8715 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8716 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8719 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8720 if (tp->t_srtt != 0) { in tcp_rack_xmit_timer_commit()
8729 delta = tp->t_srtt - rtt; in tcp_rack_xmit_timer_commit()
8731 tp->t_srtt -= (tp->t_srtt >> 3); in tcp_rack_xmit_timer_commit()
8733 tp->t_srtt += (rtt >> 3); in tcp_rack_xmit_timer_commit()
8734 if (tp->t_srtt <= 0) in tcp_rack_xmit_timer_commit()
8735 tp->t_srtt = 1; in tcp_rack_xmit_timer_commit()
8738 delta = -delta; in tcp_rack_xmit_timer_commit()
8740 tp->t_rttvar -= (tp->t_rttvar >> 3); in tcp_rack_xmit_timer_commit()
8742 tp->t_rttvar += (delta >> 3); in tcp_rack_xmit_timer_commit()
8743 if (tp->t_rttvar <= 0) in tcp_rack_xmit_timer_commit()
8744 tp->t_rttvar = 1; in tcp_rack_xmit_timer_commit()
8747 * No rtt measurement yet - use the unsmoothed rtt. Set the in tcp_rack_xmit_timer_commit()
8751 tp->t_srtt = rtt; in tcp_rack_xmit_timer_commit()
8752 tp->t_rttvar = rtt >> 1; in tcp_rack_xmit_timer_commit()
8754 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8756 if (tp->t_rttupdated < UCHAR_MAX) in tcp_rack_xmit_timer_commit()
8757 tp->t_rttupdated++; in tcp_rack_xmit_timer_commit()
8761 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); in tcp_rack_xmit_timer_commit()
8767 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8768 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8774 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8775 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8778 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8780 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8782 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8787 * tick of rounding and 1 extra tick because of +-1/2 tick in tcp_rack_xmit_timer_commit()
8793 tp->t_rxtshift = 0; in tcp_rack_xmit_timer_commit()
8794 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in tcp_rack_xmit_timer_commit()
8795 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8797 tp->t_softerror = 0; in tcp_rack_xmit_timer_commit()
8805 * Apply to filter the inbound us-rtt at us_cts. in rack_apply_updated_usrtt()
8809 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8810 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8820 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { in rack_apply_updated_usrtt()
8822 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8823 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8824 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8827 * to the time that we would have entered probe-rtt. in rack_apply_updated_usrtt()
8829 * has entered probe-rtt. Lets go in now too. in rack_apply_updated_usrtt()
8835 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8836 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8837 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8841 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8854 if ((rsm->r_flags & RACK_ACKED) || in rack_update_rtt()
8855 (rsm->r_flags & RACK_WAS_ACKED)) in rack_update_rtt()
8858 if (rsm->r_no_rtt_allowed) { in rack_update_rtt()
8863 if (SEQ_GT(th_ack, rsm->r_end)) { in rack_update_rtt()
8864 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8867 len_acked = th_ack - rsm->r_start; in rack_update_rtt()
8871 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8874 if (rsm->r_rtr_cnt == 1) { in rack_update_rtt()
8876 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8879 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8880 tp->t_rttlow = t; in rack_update_rtt()
8881 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8882 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8883 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8884 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8885 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8888 …if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)… in rack_update_rtt()
8889 …us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr… in rack_update_rtt()
8891 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_update_rtt()
8894 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8896 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8898 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8900 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8901 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8914 * When we are not app-limited then we see if in rack_update_rtt()
8931 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_update_rtt()
8936 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8941 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8943 calc_conf, rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8945 if ((rsm->r_flags & RACK_TLP) && in rack_update_rtt()
8946 (!IN_FASTRECOVERY(tp->t_flags))) { in rack_update_rtt()
8948 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8952 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8953 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8954 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8956 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8957 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8958 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8959 rack->rc_rack_rtt = t; in rack_update_rtt()
8968 tp->t_rxtshift = 0; in rack_update_rtt()
8969 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_update_rtt()
8970 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8971 tp->t_softerror = 0; in rack_update_rtt()
8972 if (to && (to->to_flags & TOF_TS) && in rack_update_rtt()
8974 (to->to_tsecr) && in rack_update_rtt()
8975 ((rsm->r_flags & RACK_OVERMAX) == 0)) { in rack_update_rtt()
8980 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
8981 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { in rack_update_rtt()
8982 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8985 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8993 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
8994 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8996 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8997 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8999 if ((i + 1) < rsm->r_rtr_cnt) { in rack_update_rtt()
9011 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
9012 tp->t_rttlow = t; in rack_update_rtt()
9013 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9014 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9015 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9016 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9019 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9020 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9021 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
9023 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
9024 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9025 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9026 rack->rc_rack_rtt = t; in rack_update_rtt()
9028 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
9030 rsm->r_rtr_cnt); in rack_update_rtt()
9035 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
9036 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
9037 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
9045 * time-stamp since its not there or the time the peer last in rack_update_rtt()
9046 * received a segment that moved forward its cum-ack point. in rack_update_rtt()
9049 i = rsm->r_rtr_cnt - 1; in rack_update_rtt()
9050 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9053 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9058 * 6.2 Step 2 point 2 in the rack-draft so we in rack_update_rtt()
9064 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
9069 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
9070 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9071 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9072 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9073 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9076 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9077 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9078 (uint32_t)rsm->r_tim_lastsent[i]))) { in rack_update_rtt()
9080 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9081 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9082 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9083 rack->rc_rack_rtt = t; in rack_update_rtt()
9105 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
9111 if (nrsm->r_flags & RACK_ACKED) { in rack_log_sack_passed()
9119 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { in rack_log_sack_passed()
9127 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_log_sack_passed()
9130 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_log_sack_passed()
9133 nrsm->r_flags |= RACK_WAS_LOST; in rack_log_sack_passed()
9134 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
9137 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_log_sack_passed()
9145 nrsm->r_flags |= RACK_SACK_PASSED; in rack_log_sack_passed()
9146 nrsm->r_flags &= ~RACK_WAS_SACKPASS; in rack_log_sack_passed()
9160 if ((tp->t_flags & TF_GPUTINPROG) && in rack_need_set_test()
9161 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9171 if (rsm->r_rtr_cnt > 1) { in rack_need_set_test()
9184 seq = tp->gput_seq; in rack_need_set_test()
9185 ts = tp->gput_ts; in rack_need_set_test()
9186 rack->app_limited_needs_set = 0; in rack_need_set_test()
9187 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9190 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { in rack_need_set_test()
9198 tp->gput_seq = rsm->r_start; in rack_need_set_test()
9201 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9213 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9219 * way up to where this ack cum-ack moves in rack_need_set_test()
9222 if (SEQ_GT(th_ack, rsm->r_end)) in rack_need_set_test()
9223 tp->gput_seq = th_ack; in rack_need_set_test()
9225 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9227 if (SEQ_LT(tp->gput_seq, tp->snd_max)) in rack_need_set_test()
9228 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9242 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9244 /* If we hit here we have to have *not* sent tp->gput_seq */ in rack_need_set_test()
9245 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9247 rack->app_limited_needs_set = 1; in rack_need_set_test()
9249 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { in rack_need_set_test()
9251 * We moved beyond this guy's range, re-calculate in rack_need_set_test()
9254 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9255 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9257 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9264 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9265 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9266 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9267 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9268 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9269 seq, tp->gput_seq, in rack_need_set_test()
9270 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9271 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9273 if (rack->rc_gp_filled && in rack_need_set_test()
9274 ((tp->gput_ack - tp->gput_seq) < in rack_need_set_test()
9280 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { in rack_need_set_test()
9287 tp->t_flags &= ~TF_GPUTINPROG; in rack_need_set_test()
9288 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9290 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9291 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9297 tp->gput_ack = tp->gput_seq + ideal_amount; in rack_need_set_test()
9301 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9308 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9312 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9316 /* It has to be a sub-part of the original TLP recorded */ in is_rsm_inside_declared_tlp_block()
9332 start = sack->start; in rack_proc_sack_blk()
9333 end = sack->end; in rack_proc_sack_blk()
9338 (SEQ_LT(end, rsm->r_start)) || in rack_proc_sack_blk()
9339 (SEQ_GEQ(start, rsm->r_end)) || in rack_proc_sack_blk()
9340 (SEQ_LT(start, rsm->r_start))) { in rack_proc_sack_blk()
9346 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9353 if (rsm->r_start != start) { in rack_proc_sack_blk()
9354 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9359 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9360 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9365 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9373 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9377 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9378 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9379 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9380 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9382 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9383 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9384 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9385 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9388 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9389 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9390 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9391 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9392 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9399 * rsm |--------------| in rack_proc_sack_blk()
9400 * sackblk |-------> in rack_proc_sack_blk()
9402 * rsm |---| in rack_proc_sack_blk()
9404 * nrsm |----------| in rack_proc_sack_blk()
9416 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9418 (rsm->bindex == next->bindex) && in rack_proc_sack_blk()
9419 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9420 ((next->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9421 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9422 ((next->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9423 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9424 (next->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9429 (next->r_flags & RACK_ACKED) && in rack_proc_sack_blk()
9430 SEQ_GEQ(end, next->r_start)) { in rack_proc_sack_blk()
9437 * rsm |------------| (not-acked) in rack_proc_sack_blk()
9438 * next |-----------| (acked) in rack_proc_sack_blk()
9439 * sackblk |--------> in rack_proc_sack_blk()
9441 * rsm |------| (not-acked) in rack_proc_sack_blk()
9442 * next |-----------------| (acked) in rack_proc_sack_blk()
9443 * nrsm |-----| in rack_proc_sack_blk()
9451 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9452 next->r_start = start; in rack_proc_sack_blk()
9453 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9454 next->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9455 /* Now we must adjust back where next->m is */ in rack_proc_sack_blk()
9475 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9476 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) in rack_proc_sack_blk()
9477 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9481 if (next->r_ack_arrival < in rack_proc_sack_blk()
9482 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9483 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9488 rsm->r_dupack = 0; in rack_proc_sack_blk()
9489 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9492 nrsm->r_start = start; in rack_proc_sack_blk()
9495 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9496 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9497 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9498 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9499 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9502 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9503 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9505 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9506 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9508 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9510 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9511 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9512 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9513 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9517 * one left un-acked) to the next one in rack_proc_sack_blk()
9520 * sack-passed on rsm (The one passed in in rack_proc_sack_blk()
9525 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9531 if (nrsm && nrsm->r_in_tmap) in rack_proc_sack_blk()
9535 if (SEQ_LT(end, next->r_end) || in rack_proc_sack_blk()
9536 (end == next->r_end)) { in rack_proc_sack_blk()
9543 start = next->r_end; in rack_proc_sack_blk()
9544 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9552 * rsm |--------| in rack_proc_sack_blk()
9553 * sackblk |-----> in rack_proc_sack_blk()
9558 * rsm |----| in rack_proc_sack_blk()
9559 * sackblk |-----> in rack_proc_sack_blk()
9560 * nrsm |---| in rack_proc_sack_blk()
9575 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9577 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9579 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9584 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9585 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9586 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9589 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9596 if (end == rsm->r_end) { in rack_proc_sack_blk()
9598 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9600 } else if (SEQ_LT(end, rsm->r_end)) { in rack_proc_sack_blk()
9602 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9610 start = rsm->r_end; in rack_proc_sack_blk()
9611 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9617 if (SEQ_GEQ(end, rsm->r_end)) { in rack_proc_sack_blk()
9621 * rsm --- |-----| in rack_proc_sack_blk()
9622 * end |-----| in rack_proc_sack_blk()
9624 * end |---------| in rack_proc_sack_blk()
9626 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9630 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9631 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9636 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9643 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9647 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9648 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9649 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9650 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9652 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9653 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9654 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9655 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9658 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9659 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9660 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9661 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9662 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9666 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9668 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9671 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9672 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9673 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9675 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9676 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9678 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9680 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9681 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9684 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9685 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9686 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9687 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9688 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9690 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9691 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9692 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9693 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9694 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9695 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9696 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9697 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9703 if (end == rsm->r_end) { in rack_proc_sack_blk()
9704 /* This block only - done, setup for next */ in rack_proc_sack_blk()
9711 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9712 start = rsm->r_end; in rack_proc_sack_blk()
9721 * rsm --- |-----| in rack_proc_sack_blk()
9722 * end |--| in rack_proc_sack_blk()
9724 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9728 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9729 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9734 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9741 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9745 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9746 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9747 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9748 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9750 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9751 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9752 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9753 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9756 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9757 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9758 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9759 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9760 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9768 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9770 (rsm->bindex == prev->bindex) && in rack_proc_sack_blk()
9771 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9772 ((prev->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9773 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9774 ((prev->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9775 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9776 (prev->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9781 (prev->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9784 * in place and span from (rsm->r_start = end) to rsm->r_end. in rack_proc_sack_blk()
9786 * to prev->r_end <- end. in rack_proc_sack_blk()
9788 * prev |--------| (acked) in rack_proc_sack_blk()
9789 * rsm |-------| (non-acked) in rack_proc_sack_blk()
9790 * sackblk |-| in rack_proc_sack_blk()
9792 * prev |----------| (acked) in rack_proc_sack_blk()
9793 * rsm |-----| (non-acked) in rack_proc_sack_blk()
9794 * nrsm |-| (temporary) in rack_proc_sack_blk()
9801 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9802 rsm->r_start = end; in rack_proc_sack_blk()
9803 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9804 prev->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9809 nrsm->r_end = end; in rack_proc_sack_blk()
9810 rsm->r_dupack = 0; in rack_proc_sack_blk()
9829 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9830 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { in rack_proc_sack_blk()
9831 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9837 if(prev->r_ack_arrival < in rack_proc_sack_blk()
9838 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9839 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9854 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9855 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9856 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9857 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9858 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9861 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9862 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9864 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9865 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9867 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9869 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9870 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9871 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9872 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9888 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9889 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9894 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9901 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9905 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9906 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9907 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9908 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9910 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9911 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9912 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9913 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9916 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9917 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9918 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9919 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9920 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9925 * nrsm->r_start = end; in rack_proc_sack_blk()
9926 * nrsm->r_end = rsm->r_end; in rack_proc_sack_blk()
9927 * which is un-acked. in rack_proc_sack_blk()
9929 * rsm->r_end = nrsm->r_start; in rack_proc_sack_blk()
9930 * i.e. the remaining un-acked in rack_proc_sack_blk()
9935 * rsm |----------| (not acked) in rack_proc_sack_blk()
9936 * sackblk |---| in rack_proc_sack_blk()
9938 * rsm |---| (acked) in rack_proc_sack_blk()
9939 * nrsm |------| (not acked) in rack_proc_sack_blk()
9943 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9944 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9946 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9948 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9953 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9954 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9955 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9957 nrsm->r_dupack = 0; in rack_proc_sack_blk()
9960 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9961 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9964 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9965 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9966 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9968 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9969 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9971 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9973 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9975 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9978 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9979 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9980 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9981 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9982 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9984 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9985 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9986 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9987 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9988 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9990 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9991 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9992 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
10003 ((rsm->r_flags & RACK_TLP) == 0) && in rack_proc_sack_blk()
10004 (rsm->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
10010 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10012 if (next->r_flags & RACK_TLP) in rack_proc_sack_blk()
10015 if ((next->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10016 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10019 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10020 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10023 if (rsm->bindex != next->bindex) in rack_proc_sack_blk()
10025 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10027 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10029 if (next->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10031 if (next->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10033 if (next->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10036 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10041 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10043 if (prev->r_flags & RACK_TLP) in rack_proc_sack_blk()
10046 if ((prev->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10047 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10050 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10051 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10054 if (rsm->bindex != prev->bindex) in rack_proc_sack_blk()
10056 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10058 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10060 if (prev->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10062 if (prev->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10064 if (prev->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10067 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10078 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
10079 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
10089 while (rsm && (rsm->r_flags & RACK_ACKED)) { in rack_peer_reneges()
10091 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
10093 if (rsm->r_in_tmap) { in rack_peer_reneges()
10095 rack, rsm, rsm->r_flags); in rack_peer_reneges()
10098 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); in rack_peer_reneges()
10101 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
10104 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
10107 tmap->r_in_tmap = 1; in rack_peer_reneges()
10108 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
10114 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10159 * The cum-ack is being advanced upon the sendmap. in rack_rsm_sender_update()
10165 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_rsm_sender_update()
10172 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { in rack_rsm_sender_update()
10173 tp->gput_ack = rsm->r_end; in rack_rsm_sender_update()
10182 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10200 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= in rack_rsm_sender_update()
10201 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10204 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10205 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10220 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10225 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10227 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_process_to_cumack()
10229 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10231 rack->r_wanted_output = 1; in rack_process_to_cumack()
10232 if (SEQ_GT(th_ack, tp->snd_una)) in rack_process_to_cumack()
10233 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10236 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10237 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10238 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10241 * tlp retransmit sequence is ahead of the cum-ack. in rack_process_to_cumack()
10242 * This can only happen when the cum-ack moves all in rack_process_to_cumack()
10249 * the cum-ack is by the TLP before checking which is in rack_process_to_cumack()
10253 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10254 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10255 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10256 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10257 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10258 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10259 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10263 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10266 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10267 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10268 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10270 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10271 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10272 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10273 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10274 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10275 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10276 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10277 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10281 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10284 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10286 if ((th_ack - 1) == tp->iss) { in rack_process_to_cumack()
10295 if (tp->t_flags & TF_SENTFIN) { in rack_process_to_cumack()
10302 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10303 tp->snd_una, tp->snd_max); in rack_process_to_cumack()
10307 if (SEQ_LT(th_ack, rsm->r_start)) { in rack_process_to_cumack()
10311 rsm->r_start, in rack_process_to_cumack()
10312 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10319 if ((rsm->r_flags & RACK_TLP) && in rack_process_to_cumack()
10320 (rsm->r_rtr_cnt > 1)) { in rack_process_to_cumack()
10330 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10337 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10341 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10342 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10343 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10344 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10346 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10347 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10348 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10349 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10352 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10353 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10354 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10355 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10356 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10360 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10361 if (SEQ_GEQ(th_ack, rsm->r_end)) { in rack_process_to_cumack()
10366 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10372 rsm->r_flags &= ~RACK_WAS_LOST; in rack_process_to_cumack()
10373 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_process_to_cumack()
10375 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_process_to_cumack()
10376 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_process_to_cumack()
10378 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10380 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10381 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10382 rsm->r_rtr_bytes = 0; in rack_process_to_cumack()
10388 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10389 if (rsm->r_in_tmap) { in rack_process_to_cumack()
10390 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10391 rsm->r_in_tmap = 0; in rack_process_to_cumack()
10394 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10396 * It was acked on the scoreboard -- remove in rack_process_to_cumack()
10399 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10401 } else if (rsm->r_flags & RACK_SACK_PASSED) { in rack_process_to_cumack()
10407 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_process_to_cumack()
10408 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10409 rsm->r_flags |= RACK_ACKED; in rack_process_to_cumack()
10410 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10411 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10412 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10413 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10418 rack->r_might_revert = 1; in rack_process_to_cumack()
10420 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10422 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10424 if ((rsm->r_flags & RACK_TO_REXT) && in rack_process_to_cumack()
10425 (tp->t_flags & TF_RCVD_TSTMP) && in rack_process_to_cumack()
10426 (to->to_flags & TOF_TS) && in rack_process_to_cumack()
10427 (to->to_tsecr != 0) && in rack_process_to_cumack()
10428 (tp->t_flags & TF_PREVVALID)) { in rack_process_to_cumack()
10434 tp->t_flags &= ~TF_PREVVALID; in rack_process_to_cumack()
10435 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { in rack_process_to_cumack()
10440 left = th_ack - rsm->r_end; in rack_process_to_cumack()
10441 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10449 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10450 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { in rack_process_to_cumack()
10458 * given us snd_una up to (rsm->r_end). in rack_process_to_cumack()
10462 * our rsm->r_start in case we get an old ack in rack_process_to_cumack()
10469 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10471 * It was acked on the scoreboard -- remove it from in rack_process_to_cumack()
10472 * total for the part being cum-acked. in rack_process_to_cumack()
10474 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10476 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10479 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10486 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), in rack_process_to_cumack()
10488 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) in rack_process_to_cumack()
10489 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; in rack_process_to_cumack()
10491 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10497 rsm->r_dupack = 0; in rack_process_to_cumack()
10499 if (rsm->r_rtr_bytes) { in rack_process_to_cumack()
10506 ack_am = (th_ack - rsm->r_start); in rack_process_to_cumack()
10507 if (ack_am >= rsm->r_rtr_bytes) { in rack_process_to_cumack()
10508 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10509 rsm->r_rtr_bytes -= ack_am; in rack_process_to_cumack()
10519 if (rsm->m && in rack_process_to_cumack()
10520 ((rsm->orig_m_len != rsm->m->m_len) || in rack_process_to_cumack()
10521 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_process_to_cumack()
10525 rsm->soff += (th_ack - rsm->r_start); in rack_process_to_cumack()
10528 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10534 m = rsm->m; in rack_process_to_cumack()
10535 soff = rsm->soff; in rack_process_to_cumack()
10537 while (soff >= m->m_len) { in rack_process_to_cumack()
10538 soff -= m->m_len; in rack_process_to_cumack()
10539 KASSERT((m->m_next != NULL), in rack_process_to_cumack()
10541 rsm, rsm->soff, soff, m)); in rack_process_to_cumack()
10542 m = m->m_next; in rack_process_to_cumack()
10545 * This is a fall-back that prevents a panic. In reality in rack_process_to_cumack()
10548 * but tqhash_trim did update rsm->r_start so the offset calcuation in rack_process_to_cumack()
10553 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10554 (rsm->r_start - tp->snd_una), in rack_process_to_cumack()
10562 rsm->m = m; in rack_process_to_cumack()
10563 rsm->soff = soff; in rack_process_to_cumack()
10564 rsm->orig_m_len = rsm->m->m_len; in rack_process_to_cumack()
10565 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_process_to_cumack()
10568 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10569 SEQ_GEQ(th_ack, tp->gput_seq)) in rack_process_to_cumack()
10570 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10579 if (rack->r_might_revert) { in rack_handle_might_revert()
10590 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10591 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_handle_might_revert()
10603 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10604 orig_cwnd = tp->snd_cwnd; in rack_handle_might_revert()
10605 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10606 tp->snd_recover = tp->snd_una; in rack_handle_might_revert()
10608 if (IN_RECOVERY(tp->t_flags)) { in rack_handle_might_revert()
10610 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10613 * and then re-entered recovery (more sack's arrived) in rack_handle_might_revert()
10615 * the first recovery. We want to be able to slow-start in rack_handle_might_revert()
10619 * so we get no slow-start after our RTO. in rack_handle_might_revert()
10621 rack->rto_from_rec = 0; in rack_handle_might_revert()
10622 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10623 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10627 rack->r_might_revert = 0; in rack_handle_might_revert()
10640 am = end - start; in rack_note_dsack()
10643 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10644 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10645 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10656 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10657 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10658 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10669 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10670 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10671 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10672 rack->r_ctl.num_dsack++; in rack_note_dsack()
10673 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10681 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10682 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10683 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10684 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10689 rack->r_might_revert = 1; in rack_note_dsack()
10690 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10691 rack->r_might_revert = 0; in rack_note_dsack()
10692 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10693 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10701 return (((tp->snd_max - snd_una) - in do_rack_compute_pipe()
10702 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10709 (struct tcp_rack *)tp->t_fb_ptr, in rack_compute_pipe()
10710 tp->snd_una)); in rack_compute_pipe()
10719 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10721 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10725 * Note we use tp->snd_una here and not th_ack because in rack_update_prr()
10728 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10732 if (SEQ_GT(tp->snd_una, th_ack)) { in rack_update_prr()
10733 snd_una = tp->snd_una; in rack_update_prr()
10738 if (pipe > tp->snd_ssthresh) { in rack_update_prr()
10741 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10742 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10743 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10745 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10750 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10751 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10754 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10759 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10760 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10766 if (tp->snd_ssthresh > pipe) { in rack_update_prr()
10767 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10770 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10797 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10799 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10801 th_ack = th->th_ack; in rack_log_ack()
10802 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10807 * credit for larger cum-ack moves). in rack_log_ack()
10811 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10814 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_log_ack()
10816 tp->t_acktime = ticks; in rack_log_ack()
10818 if (rsm && SEQ_GT(th_ack, rsm->r_start)) in rack_log_ack()
10819 changed = th_ack - rsm->r_start; in rack_log_ack()
10822 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10824 if ((to->to_flags & TOF_SACK) == 0) { in rack_log_ack()
10828 * For cases where we struck a dup-ack in rack_log_ack()
10833 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10838 if (SEQ_GT(th_ack, tp->snd_una)) in rack_log_ack()
10841 ack_point = tp->snd_una; in rack_log_ack()
10842 for (i = 0; i < to->to_nsacks; i++) { in rack_log_ack()
10843 bcopy((to->to_sacks + i * TCPOLEN_SACK), in rack_log_ack()
10849 SEQ_LT(sack.start, tp->snd_max) && in rack_log_ack()
10851 SEQ_LEQ(sack.end, tp->snd_max)) { in rack_log_ack()
10862 * Its a D-SACK block. in rack_log_ack()
10867 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10869 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10871 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10879 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10880 num_sack_blks, th->th_ack); in rack_log_ack()
10881 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10885 /* Nothing to sack, but we need to update counts */ in rack_log_ack()
10928 * Now collapse out the dup-sack and in rack_log_ack()
10936 num_sack_blks--; in rack_log_ack()
10948 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10950 SEQ_GT(sack_blocks[0].end, rsm->r_start) && in rack_log_ack()
10951 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { in rack_log_ack()
10958 rack->r_wanted_output = 1; in rack_log_ack()
10966 * i.e the sack-filter pushes down in rack_log_ack()
10972 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); in rack_log_ack()
10984 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
10988 rack->r_wanted_output = 1; in rack_log_ack()
10996 * you have more than one sack-blk, this in rack_log_ack()
10998 * and the sack-filter is still working, or in rack_log_ack()
11007 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
11011 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_log_ack()
11013 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { in rack_log_ack()
11021 if (rack->rack_no_prr == 0) { in rack_log_ack()
11022 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
11025 rack->r_timer_override = 1; in rack_log_ack()
11026 rack->r_early = 0; in rack_log_ack()
11027 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11028 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11030 (rack->r_rr_config == 3)) { in rack_log_ack()
11035 rack->r_timer_override = 1; in rack_log_ack()
11036 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
11037 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
11039 if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11040 (rack->rack_no_prr == 0) && in rack_log_ack()
11043 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
11044 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
11045 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
11050 rack->r_early = 0; in rack_log_ack()
11051 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11052 rack->r_timer_override = 1; in rack_log_ack()
11062 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
11068 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in rack_strike_dupack()
11069 (rsm->r_flags & RACK_MUST_RXT)) { in rack_strike_dupack()
11075 if (rsm && (rsm->r_dupack < 0xff)) { in rack_strike_dupack()
11076 rsm->r_dupack++; in rack_strike_dupack()
11077 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { in rack_strike_dupack()
11083 * we will get a return of the rsm. For a non-sack in rack_strike_dupack()
11088 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
11089 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
11090 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
11091 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
11094 rack->r_wanted_output = 1; in rack_strike_dupack()
11095 rack->r_timer_override = 1; in rack_strike_dupack()
11119 * gauge the inter-ack times). If that occurs we have a real problem in rack_check_bottom_drag()
11132 if (tp->snd_max == tp->snd_una) { in rack_check_bottom_drag()
11144 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
11146 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11148 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
11149 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
11150 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
11153 * Lets use the long-term b/w we have in rack_check_bottom_drag()
11156 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
11168 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11169 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11170 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
11171 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11172 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11173 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11174 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
11175 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11176 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11177 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11178 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11179 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11181 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11182 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
11183 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
11185 rack->gp_ready = 1; in rack_check_bottom_drag()
11186 if (rack->dgp_on || in rack_check_bottom_drag()
11187 rack->rack_hibeta) in rack_check_bottom_drag()
11189 if (rack->defer_options) in rack_check_bottom_drag()
11196 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11198 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_check_bottom_drag()
11199 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), in rack_check_bottom_drag()
11201 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11202 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11203 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11214 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11215 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11226 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11228 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11249 log.u_bbr.flex2 = cur->start_seq; in rack_log_hybrid()
11250 log.u_bbr.flex3 = cur->end_seq; in rack_log_hybrid()
11251 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid()
11252 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid()
11253 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid()
11254 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid()
11255 log.u_bbr.rttProp = cur->timestamp; in rack_log_hybrid()
11256 log.u_bbr.cur_del_rate = cur->cspr; in rack_log_hybrid()
11257 log.u_bbr.bw_inuse = cur->start; in rack_log_hybrid()
11258 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid()
11259 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11260 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); in rack_log_hybrid()
11261 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11264 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11273 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11275 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11277 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11285 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11287 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11289 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11291 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11293 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11294 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11295 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11296 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11297 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11298 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11299 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11300 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11301 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11316 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11317 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11320 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11322 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11329 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11330 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11331 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11332 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11333 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11335 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11337 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11338 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11340 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11341 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11345 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { in rack_set_dgp_hybrid_mode()
11347 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11348 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11349 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11350 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11352 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11353 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11355 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_set_dgp_hybrid_mode()
11356 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_set_dgp_hybrid_mode()
11357 rc_cur->first_send = cts; in rack_set_dgp_hybrid_mode()
11358 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11359 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11370 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11371 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11372 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11374 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11378 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11379 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11381 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11382 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11383 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11388 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ in rack_set_dgp_hybrid_mode()
11390 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11391 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11393 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11395 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11396 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11397 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11399 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11402 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) in rack_set_dgp_hybrid_mode()
11403 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11405 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11406 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11410 * sendtime not arrival time for catch-up mode. in rack_set_dgp_hybrid_mode()
11412 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; in rack_set_dgp_hybrid_mode()
11414 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && in rack_set_dgp_hybrid_mode()
11415 (rc_cur->cspr > 0)) { in rack_set_dgp_hybrid_mode()
11418 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11423 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { in rack_set_dgp_hybrid_mode()
11429 rc_cur->deadline = cts; in rack_set_dgp_hybrid_mode()
11435 rc_cur->deadline = rc_cur->localtime; in rack_set_dgp_hybrid_mode()
11441 len = rc_cur->end - rc_cur->start; in rack_set_dgp_hybrid_mode()
11442 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { in rack_set_dgp_hybrid_mode()
11447 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); in rack_set_dgp_hybrid_mode()
11457 len /= rc_cur->cspr; in rack_set_dgp_hybrid_mode()
11458 rc_cur->deadline += len; in rack_set_dgp_hybrid_mode()
11460 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11461 rc_cur->deadline = 0; in rack_set_dgp_hybrid_mode()
11463 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11471 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11472 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11473 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11477 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11478 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11488 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11490 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || in rack_chk_req_and_hybrid_on_out()
11491 (SEQ_GEQ(seq, ent->end_seq))) { in rack_chk_req_and_hybrid_on_out()
11494 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11500 if (SEQ_LT(ent->end_seq, (seq + len))) { in rack_chk_req_and_hybrid_on_out()
11511 ent->end_seq = (seq + len); in rack_chk_req_and_hybrid_on_out()
11512 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11516 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_chk_req_and_hybrid_on_out()
11517 ent->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_chk_req_and_hybrid_on_out()
11518 ent->first_send = cts; in rack_chk_req_and_hybrid_on_out()
11519 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11520 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11549 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11550 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), in rack_gain_for_fastoutput()
11551 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11555 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11556 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11558 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11559 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11560 (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11599 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11601 m = sb->sb_mb; in rack_adjust_sendmap_head()
11602 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11608 KASSERT((rsm->m == m), in rack_adjust_sendmap_head()
11609 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", in rack_adjust_sendmap_head()
11611 while (rsm->m && (rsm->m == m)) { in rack_adjust_sendmap_head()
11617 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); in rack_adjust_sendmap_head()
11618 if ((rsm->orig_m_len != m->m_len) || in rack_adjust_sendmap_head()
11619 (rsm->orig_t_space != M_TRAILINGROOM(m))){ in rack_adjust_sendmap_head()
11623 KASSERT((rsm->soff == 0), in rack_adjust_sendmap_head()
11624 ("Rack:%p rsm:%p -- rsm at head but soff not zero", in rack_adjust_sendmap_head()
11628 if ((rsm->soff != soff) || (rsm->m != tm)) { in rack_adjust_sendmap_head()
11637 rsm->m = tm; in rack_adjust_sendmap_head()
11638 rsm->soff = soff; in rack_adjust_sendmap_head()
11640 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11641 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11643 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11644 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11647 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); in rack_adjust_sendmap_head()
11648 if (rsm->m) { in rack_adjust_sendmap_head()
11649 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11650 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11652 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11653 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11656 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11669 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11670 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11675 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11685 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11699 data = ent->end - ent->start; in rack_req_check_for_comp()
11700 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11701 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { in rack_req_check_for_comp()
11702 if (ent->first_send > ent->localtime) in rack_req_check_for_comp()
11703 ftim = ent->first_send; in rack_req_check_for_comp()
11705 ftim = ent->localtime; in rack_req_check_for_comp()
11708 ftim = ent->localtime; in rack_req_check_for_comp()
11710 if (laa > ent->localtime) in rack_req_check_for_comp()
11711 tim = laa - ftim; in rack_req_check_for_comp()
11725 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11726 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11727 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11728 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11729 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11730 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11732 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11733 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11737 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11740 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11741 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11750 * For ret_val if its 0 the TCP is locked, if its non-zero
11770 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11771 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { in rack_process_ack()
11773 tp->t_flags2 |= TF2_NO_ISS_CHECK; in rack_process_ack()
11779 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { in rack_process_ack()
11781 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11784 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { in rack_process_ack()
11786 seq_min = tp->iss + 1; in rack_process_ack()
11793 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11797 if (SEQ_LT(th->th_ack, seq_min)) { in rack_process_ack()
11804 rack->r_wanted_output = 1; in rack_process_ack()
11808 if (SEQ_GT(th->th_ack, tp->snd_max)) { in rack_process_ack()
11810 rack->r_wanted_output = 1; in rack_process_ack()
11813 if (rack->gp_ready && in rack_process_ack()
11814 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11817 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { in rack_process_ack()
11821 in_rec = IN_FASTRECOVERY(tp->t_flags); in rack_process_ack()
11822 if (rack->rc_in_persist) { in rack_process_ack()
11823 tp->t_rxtshift = 0; in rack_process_ack()
11824 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11825 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11828 if ((th->th_ack == tp->snd_una) && in rack_process_ack()
11829 (tiwin == tp->snd_wnd) && in rack_process_ack()
11831 ((to->to_flags & TOF_SACK) == 0)) { in rack_process_ack()
11832 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11835 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), in rack_process_ack()
11839 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_process_ack()
11845 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { in rack_process_ack()
11846 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11847 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11848 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11856 if (tp->t_flags & TF_NEEDSYN) { in rack_process_ack()
11858 * T/TCP: Connection was half-synchronized, and our SYN has in rack_process_ack()
11860 * to non-starred state, increment snd_una for ACK of SYN, in rack_process_ack()
11863 tp->t_flags &= ~TF_NEEDSYN; in rack_process_ack()
11864 tp->snd_una++; in rack_process_ack()
11866 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_process_ack()
11868 tp->rcv_scale = tp->request_r_scale; in rack_process_ack()
11872 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_ack()
11877 * Any time we move the cum-ack forward clear in rack_process_ack()
11878 * keep-alive tied probe-not-answered. The in rack_process_ack()
11881 rack->probe_not_answered = 0; in rack_process_ack()
11891 if ((tp->t_flags & TF_PREVVALID) && in rack_process_ack()
11892 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_process_ack()
11893 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11894 if (tp->t_rxtshift == 1 && in rack_process_ack()
11895 (int)(ticks - tp->t_badrxtwin) < 0) in rack_process_ack()
11896 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_process_ack()
11900 tp->t_rxtshift = 0; in rack_process_ack()
11901 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11902 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11903 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11904 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11909 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11910 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11912 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11931 * (possibly backed-off) value. in rack_process_ack()
11938 if (IN_RECOVERY(tp->t_flags)) { in rack_process_ack()
11939 if (SEQ_LT(th->th_ack, tp->snd_recover) && in rack_process_ack()
11940 (SEQ_LT(th->th_ack, tp->snd_max))) { in rack_process_ack()
11943 rack_post_recovery(tp, th->th_ack); in rack_process_ack()
11950 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11952 p_cwnd += tp->snd_cwnd; in rack_process_ack()
11954 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11955 SEQ_GEQ(th->th_ack, tp->snd_recover)) { in rack_process_ack()
11958 * and never re-entered recovery. The timeout(s) in rack_process_ack()
11962 rack->rto_from_rec = 0; in rack_process_ack()
11969 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11971 (tp->snd_cwnd > p_cwnd)) { in rack_process_ack()
11972 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_process_ack()
11973 tp->snd_cwnd = p_cwnd; in rack_process_ack()
11976 acked_amount = min(acked, (int)sbavail(&so->so_snd)); in rack_process_ack()
11977 tp->snd_wnd -= acked_amount; in rack_process_ack()
11978 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_process_ack()
11979 if ((sbused(&so->so_snd) == 0) && in rack_process_ack()
11981 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
11982 (tp->t_flags & TF_SENTFIN)) { in rack_process_ack()
11991 tp->snd_una = th->th_ack; in rack_process_ack()
11993 if (acked_amount && sbavail(&so->so_snd)) in rack_process_ack()
11994 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
11995 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
11999 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_process_ack()
12000 tp->snd_recover = tp->snd_una; in rack_process_ack()
12002 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { in rack_process_ack()
12003 tp->snd_nxt = tp->snd_max; in rack_process_ack()
12006 (rack->use_fixed_rate == 0) && in rack_process_ack()
12007 (rack->in_probe_rtt == 0) && in rack_process_ack()
12008 rack->rc_gp_dyn_mul && in rack_process_ack()
12009 rack->rc_always_pace) { in rack_process_ack()
12013 if (tp->snd_una == tp->snd_max) { in rack_process_ack()
12015 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
12016 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
12017 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
12018 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
12019 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
12021 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_process_ack()
12022 tp->t_acktime = 0; in rack_process_ack()
12023 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
12024 rack->rc_suspicious = 0; in rack_process_ack()
12026 rack->r_wanted_output = 1; in rack_process_ack()
12027 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
12028 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
12029 (sbavail(&so->so_snd) == 0) && in rack_process_ack()
12030 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_process_ack()
12037 /* tcp_close will kill the inp pre-log the Reset */ in rack_process_ack()
12054 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
12063 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
12065 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
12075 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
12076 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
12077 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
12078 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
12093 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
12094 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
12095 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
12097 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
12098 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
12099 rack->rc_has_collapsed = 1; in rack_collapsed_window()
12100 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
12101 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
12112 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
12113 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
12114 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12117 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12121 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
12122 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
12123 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
12132 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12134 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
12136 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
12141 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
12142 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
12143 if (rsm->r_in_tmap) { in rack_un_collapse_window()
12144 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
12145 nrsm->r_in_tmap = 1; in rack_un_collapse_window()
12155 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
12157 nrsm->r_flags |= RACK_RWND_COLLAPSED; in rack_un_collapse_window()
12158 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
12164 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12173 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
12174 tp->t_flags |= TF_DELACK; in rack_handle_delayed_ack()
12176 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
12177 tp->t_flags |= TF_ACKNOW; in rack_handle_delayed_ack()
12189 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
12197 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12199 if (out >= tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12201 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12204 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
12205 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12207 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12234 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12235 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_data()
12237 (SEQ_LT(tp->snd_wl1, th->th_seq) || in rack_process_data()
12238 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || in rack_process_data()
12239 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { in rack_process_data()
12242 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) in rack_process_data()
12244 tp->snd_wnd = tiwin; in rack_process_data()
12246 tp->snd_wl1 = th->th_seq; in rack_process_data()
12247 tp->snd_wl2 = th->th_ack; in rack_process_data()
12248 if (tp->snd_wnd > tp->max_sndwnd) in rack_process_data()
12249 tp->max_sndwnd = tp->snd_wnd; in rack_process_data()
12250 rack->r_wanted_output = 1; in rack_process_data()
12252 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { in rack_process_data()
12253 tp->snd_wnd = tiwin; in rack_process_data()
12255 tp->snd_wl1 = th->th_seq; in rack_process_data()
12256 tp->snd_wl2 = th->th_ack; in rack_process_data()
12259 if (tp->snd_wnd < ctf_outstanding(tp)) in rack_process_data()
12261 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12262 else if (rack->rc_has_collapsed) in rack_process_data()
12264 if ((rack->r_collapse_point_valid) && in rack_process_data()
12265 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12266 rack->r_collapse_point_valid = 0; in rack_process_data()
12268 if ((rack->rc_in_persist != 0) && in rack_process_data()
12269 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12270 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12271 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12272 tp->snd_nxt = tp->snd_max; in rack_process_data()
12274 rack->r_wanted_output = 1; in rack_process_data()
12277 if ((rack->rc_in_persist == 0) && in rack_process_data()
12278 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12279 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_process_data()
12280 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12281 sbavail(&tptosocket(tp)->so_snd) && in rack_process_data()
12282 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_process_data()
12289 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12291 if (tp->t_flags2 & TF2_DROP_AF_DATA) { in rack_process_data()
12299 tp->rcv_up = tp->rcv_nxt; in rack_process_data()
12304 * This process logically involves adjusting tp->rcv_wnd as data is in rack_process_data()
12309 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_process_data()
12310 (tp->t_flags & TF_FASTOPEN)); in rack_process_data()
12312 TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12313 tcp_seq save_start = th->th_seq; in rack_process_data()
12314 tcp_seq save_rnxt = tp->rcv_nxt; in rack_process_data()
12329 if (th->th_seq == tp->rcv_nxt && in rack_process_data()
12331 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_process_data()
12336 if (so->so_rcv.sb_shlim) { in rack_process_data()
12339 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_process_data()
12348 tp->rcv_nxt += tlen; in rack_process_data()
12350 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_process_data()
12351 (tp->t_fbyte_in == 0)) { in rack_process_data()
12352 tp->t_fbyte_in = ticks; in rack_process_data()
12353 if (tp->t_fbyte_in == 0) in rack_process_data()
12354 tp->t_fbyte_in = 1; in rack_process_data()
12355 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_process_data()
12356 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_process_data()
12362 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_process_data()
12371 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_process_data()
12376 sbappendstream_locked(&so->so_rcv, m, 0); in rack_process_data()
12378 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12382 if (so->so_rcv.sb_shlim && appended != mcnt) in rack_process_data()
12383 counter_fo_release(so->so_rcv.sb_shlim, in rack_process_data()
12384 mcnt - appended); in rack_process_data()
12396 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12397 if (tp->t_flags & TF_WAKESOR) { in rack_process_data()
12398 tp->t_flags &= ~TF_WAKESOR; in rack_process_data()
12403 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_process_data()
12405 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_process_data()
12413 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { in rack_process_data()
12414 if ((tp->rcv_numsacks >= 1) && in rack_process_data()
12415 (tp->sackblks[0].end == save_start)) { in rack_process_data()
12421 tp->sackblks[0].start, in rack_process_data()
12422 tp->sackblks[0].end); in rack_process_data()
12446 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12450 * If connection is half-synchronized (ie NEEDSYN in rack_process_data()
12456 if (tp->t_flags & TF_NEEDSYN) { in rack_process_data()
12458 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12459 tp->t_flags |= TF_DELACK; in rack_process_data()
12461 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12463 tp->rcv_nxt++; in rack_process_data()
12465 switch (tp->t_state) { in rack_process_data()
12471 tp->t_starttime = ticks; in rack_process_data()
12475 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12485 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12491 * starting the time-wait timer, turning off the in rack_process_data()
12496 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12504 if ((tp->t_flags & TF_ACKNOW) || in rack_process_data()
12505 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { in rack_process_data()
12506 rack->r_wanted_output = 1; in rack_process_data()
12513 * have broken out the fast-data path also just like
12514 * the fast-ack.
12533 if (__predict_false(th->th_seq != tp->rcv_nxt)) { in rack_do_fastnewdata()
12536 if (tiwin && tiwin != tp->snd_wnd) { in rack_do_fastnewdata()
12539 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { in rack_do_fastnewdata()
12542 if (__predict_false((to->to_flags & TOF_TS) && in rack_do_fastnewdata()
12543 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { in rack_do_fastnewdata()
12546 if (__predict_false((th->th_ack != tp->snd_una))) { in rack_do_fastnewdata()
12549 if (__predict_false(tlen > sbspace(&so->so_rcv))) { in rack_do_fastnewdata()
12552 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fastnewdata()
12553 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_do_fastnewdata()
12554 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fastnewdata()
12555 tp->ts_recent = to->to_tsval; in rack_do_fastnewdata()
12557 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12559 * This is a pure, in-sequence data packet with nothing on the in rack_do_fastnewdata()
12562 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_do_fastnewdata()
12565 if (so->so_rcv.sb_shlim) { in rack_do_fastnewdata()
12568 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_do_fastnewdata()
12577 if (tp->rcv_numsacks) in rack_do_fastnewdata()
12580 tp->rcv_nxt += tlen; in rack_do_fastnewdata()
12582 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_do_fastnewdata()
12583 (tp->t_fbyte_in == 0)) { in rack_do_fastnewdata()
12584 tp->t_fbyte_in = ticks; in rack_do_fastnewdata()
12585 if (tp->t_fbyte_in == 0) in rack_do_fastnewdata()
12586 tp->t_fbyte_in = 1; in rack_do_fastnewdata()
12587 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_do_fastnewdata()
12588 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_do_fastnewdata()
12593 tp->snd_wl1 = th->th_seq; in rack_do_fastnewdata()
12597 tp->rcv_up = tp->rcv_nxt; in rack_do_fastnewdata()
12604 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fastnewdata()
12613 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_do_fastnewdata()
12618 sbappendstream_locked(&so->so_rcv, m, 0); in rack_do_fastnewdata()
12621 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12625 if (so->so_rcv.sb_shlim && mcnt != appended) in rack_do_fastnewdata()
12626 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); in rack_do_fastnewdata()
12629 if (tp->snd_una == tp->snd_max) in rack_do_fastnewdata()
12630 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12637 * in sequence to remain in the fast-path. We also add
12641 * slow-path.
12653 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_fastack()
12657 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { in rack_fastack()
12665 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { in rack_fastack()
12669 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { in rack_fastack()
12673 if (__predict_false(IN_RECOVERY(tp->t_flags))) { in rack_fastack()
12677 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12678 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12682 /* Ok if we reach here, we can process a fast-ack */ in rack_fastack()
12683 if (rack->gp_ready && in rack_fastack()
12684 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12687 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_fastack()
12690 if (tiwin != tp->snd_wnd) { in rack_fastack()
12691 tp->snd_wnd = tiwin; in rack_fastack()
12693 tp->snd_wl1 = th->th_seq; in rack_fastack()
12694 if (tp->snd_wnd > tp->max_sndwnd) in rack_fastack()
12695 tp->max_sndwnd = tp->snd_wnd; in rack_fastack()
12698 if ((rack->rc_in_persist != 0) && in rack_fastack()
12699 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12700 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12704 if ((rack->rc_in_persist == 0) && in rack_fastack()
12705 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12706 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_fastack()
12707 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12708 sbavail(&tptosocket(tp)->so_snd) && in rack_fastack()
12709 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_fastack()
12716 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12723 if ((to->to_flags & TOF_TS) != 0 && in rack_fastack()
12724 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_fastack()
12725 tp->ts_recent_age = tcp_ts_getticks(); in rack_fastack()
12726 tp->ts_recent = to->to_tsval; in rack_fastack()
12736 if ((tp->t_flags & TF_PREVVALID) && in rack_fastack()
12737 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_fastack()
12738 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12739 if (tp->t_rxtshift == 1 && in rack_fastack()
12740 (int)(ticks - tp->t_badrxtwin) < 0) in rack_fastack()
12741 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_fastack()
12761 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12763 mfree = sbcut_locked(&so->so_snd, acked); in rack_fastack()
12764 tp->snd_una = th->th_ack; in rack_fastack()
12766 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12768 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12771 tp->t_rxtshift = 0; in rack_fastack()
12772 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_fastack()
12773 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12774 rack->rc_tlp_in_progress = 0; in rack_fastack()
12775 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12780 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12781 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12784 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12792 if (tp->snd_wnd < ctf_outstanding(tp)) { in rack_fastack()
12794 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12795 } else if (rack->rc_has_collapsed) in rack_fastack()
12797 if ((rack->r_collapse_point_valid) && in rack_fastack()
12798 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12799 rack->r_collapse_point_valid = 0; in rack_fastack()
12803 tp->snd_wl2 = th->th_ack; in rack_fastack()
12804 tp->t_dupacks = 0; in rack_fastack()
12810 * otherwise restart timer using current (possibly backed-off) in rack_fastack()
12816 (rack->use_fixed_rate == 0) && in rack_fastack()
12817 (rack->in_probe_rtt == 0) && in rack_fastack()
12818 rack->rc_gp_dyn_mul && in rack_fastack()
12819 rack->rc_always_pace) { in rack_fastack()
12823 if (tp->snd_una == tp->snd_max) { in rack_fastack()
12824 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12825 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12826 rack->rc_suspicious = 0; in rack_fastack()
12827 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12828 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12829 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12830 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12832 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_fastack()
12833 tp->t_acktime = 0; in rack_fastack()
12834 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12836 if (acked && rack->r_fast_output) in rack_fastack()
12838 if (sbavail(&so->so_snd)) { in rack_fastack()
12839 rack->r_wanted_output = 1; in rack_fastack()
12867 * this is an acceptable SYN segment initialize tp->rcv_nxt and in rack_do_syn_sent()
12868 * tp->irs if seg contains ack then advance tp->snd_una if seg in rack_do_syn_sent()
12875 (SEQ_LEQ(th->th_ack, tp->iss) || in rack_do_syn_sent()
12876 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_sent()
12896 tp->irs = th->th_seq; in rack_do_syn_sent()
12898 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12908 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_sent()
12910 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_sent()
12912 tp->rcv_adv += min(tp->rcv_wnd, in rack_do_syn_sent()
12913 TCP_MAXWIN << tp->rcv_scale); in rack_do_syn_sent()
12918 if ((tp->t_flags & TF_FASTOPEN) && in rack_do_syn_sent()
12919 (tp->snd_una != tp->snd_max)) { in rack_do_syn_sent()
12921 if (SEQ_LT(th->th_ack, tp->snd_max)) in rack_do_syn_sent()
12930 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12931 tp->t_flags |= TF_DELACK; in rack_do_syn_sent()
12933 rack->r_wanted_output = 1; in rack_do_syn_sent()
12934 tp->t_flags |= TF_ACKNOW; in rack_do_syn_sent()
12939 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_syn_sent()
12945 * ack-processing since the in rack_do_syn_sent()
12946 * data stream in our send-map in rack_do_syn_sent()
12952 tp->snd_una++; in rack_do_syn_sent()
12953 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { in rack_do_syn_sent()
12962 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12964 if (rsm->r_flags & RACK_HAS_SYN) { in rack_do_syn_sent()
12965 rsm->r_flags &= ~RACK_HAS_SYN; in rack_do_syn_sent()
12966 rsm->r_start++; in rack_do_syn_sent()
12968 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
12974 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 in rack_do_syn_sent()
12976 tp->t_starttime = ticks; in rack_do_syn_sent()
12977 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_sent()
12979 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_sent()
12989 * Received initial SYN in SYN-SENT[*] state => simultaneous in rack_do_syn_sent()
12992 * half-synchronized. Otherwise, do 3-way handshake: in rack_do_syn_sent()
12993 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If in rack_do_syn_sent()
12996 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); in rack_do_syn_sent()
13000 * Advance th->th_seq to correspond to first data byte. If data, in rack_do_syn_sent()
13003 th->th_seq++; in rack_do_syn_sent()
13004 if (tlen > tp->rcv_wnd) { in rack_do_syn_sent()
13005 todrop = tlen - tp->rcv_wnd; in rack_do_syn_sent()
13006 m_adj(m, -todrop); in rack_do_syn_sent()
13007 tlen = tp->rcv_wnd; in rack_do_syn_sent()
13012 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_sent()
13013 tp->rcv_up = th->th_seq; in rack_do_syn_sent()
13021 /* For syn-sent we need to possibly update the rtt */ in rack_do_syn_sent()
13022 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_sent()
13026 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_sent()
13027 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_sent()
13028 tp->t_rttlow = t; in rack_do_syn_sent()
13029 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
13036 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_sent()
13053 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_sent()
13083 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
13086 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_syn_recv()
13089 (SEQ_LEQ(th->th_ack, tp->snd_una) || in rack_do_syn_recv()
13090 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_recv()
13095 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13108 /* non-initial SYN is ignored */ in rack_do_syn_recv()
13109 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
13110 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
13111 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
13125 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_syn_recv()
13126 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_syn_recv()
13131 * In the SYN-RECEIVED state, validate that the packet belongs to in rack_do_syn_recv()
13137 if (SEQ_LT(th->th_seq, tp->irs)) { in rack_do_syn_recv()
13159 if ((to->to_flags & TOF_TS) != 0 && in rack_do_syn_recv()
13160 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_syn_recv()
13161 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_syn_recv()
13163 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_syn_recv()
13164 tp->ts_recent = to->to_tsval; in rack_do_syn_recv()
13166 tp->snd_wnd = tiwin; in rack_do_syn_recv()
13169 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_syn_recv()
13170 * is on (half-synchronized state), then queue data for later in rack_do_syn_recv()
13174 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13181 if (tp->t_flags & TF_SONOTCONN) { in rack_do_syn_recv()
13182 tp->t_flags &= ~TF_SONOTCONN; in rack_do_syn_recv()
13186 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_recv()
13188 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_recv()
13191 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> in rack_do_syn_recv()
13192 * FIN-WAIT-1 in rack_do_syn_recv()
13194 tp->t_starttime = ticks; in rack_do_syn_recv()
13195 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { in rack_do_syn_recv()
13196 tcp_fastopen_decrement_counter(tp->t_tfo_pending); in rack_do_syn_recv()
13197 tp->t_tfo_pending = NULL; in rack_do_syn_recv()
13199 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_recv()
13201 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_recv()
13212 if (!(tp->t_flags & TF_FASTOPEN)) in rack_do_syn_recv()
13220 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) in rack_do_syn_recv()
13221 tp->snd_una++; in rack_do_syn_recv()
13229 if (tp->t_flags & TF_WAKESOR) { in rack_do_syn_recv()
13230 tp->t_flags &= ~TF_WAKESOR; in rack_do_syn_recv()
13235 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_recv()
13236 /* For syn-recv we need to possibly update the rtt */ in rack_do_syn_recv()
13237 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_recv()
13241 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_recv()
13242 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_recv()
13243 tp->t_rttlow = t; in rack_do_syn_recv()
13244 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13251 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_recv()
13268 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_recv()
13298 * uni-directional data xfer. If the packet has no control flags, in rack_do_established()
13299 * is in-sequence, the window didn't change and we're not in rack_do_established()
13303 * waiting for space. If the length is non-zero and the ack didn't in rack_do_established()
13304 * move, we're the receiver side. If we're getting packets in-order in rack_do_established()
13307 * hidden state-flags are also off. Since we check for in rack_do_established()
13310 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13311 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && in rack_do_established()
13314 __predict_true(th->th_seq == tp->rcv_nxt)) { in rack_do_established()
13317 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13330 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_established()
13345 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_established()
13346 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_established()
13367 if ((to->to_flags & TOF_TS) != 0 && in rack_do_established()
13368 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_established()
13369 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_established()
13371 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_established()
13372 tp->ts_recent = to->to_tsval; in rack_do_established()
13375 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_established()
13376 * is on (half-synchronized state), then queue data for later in rack_do_established()
13380 if (tp->t_flags & TF_NEEDSYN) { in rack_do_established()
13384 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_established()
13386 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_established()
13399 if (sbavail(&so->so_snd)) { in rack_do_established()
13426 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_close_wait()
13440 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_close_wait()
13441 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_close_wait()
13462 if ((to->to_flags & TOF_TS) != 0 && in rack_do_close_wait()
13463 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_close_wait()
13464 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_close_wait()
13466 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_close_wait()
13467 tp->ts_recent = to->to_tsval; in rack_do_close_wait()
13470 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_close_wait()
13471 * is on (half-synchronized state), then queue data for later in rack_do_close_wait()
13475 if (tp->t_flags & TF_NEEDSYN) { in rack_do_close_wait()
13479 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_close_wait()
13481 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_close_wait()
13494 if (sbavail(&so->so_snd)) { in rack_do_close_wait()
13496 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_close_wait()
13512 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13513 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13516 /* tcp_close will kill the inp pre-log the Reset */ in rack_check_data_after_close()
13523 if (sbavail(&so->so_snd) == 0) in rack_check_data_after_close()
13527 tp->rcv_nxt = th->th_seq + *tlen; in rack_check_data_after_close()
13528 tp->t_flags2 |= TF2_DROP_AF_DATA; in rack_check_data_after_close()
13529 rack->r_wanted_output = 1; in rack_check_data_after_close()
13551 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_1()
13565 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_1()
13566 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_1()
13577 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_1()
13594 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_1()
13595 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_1()
13596 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_1()
13598 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_1()
13599 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_1()
13602 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_1()
13603 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_1()
13607 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_1()
13610 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_1()
13612 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_1()
13635 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fin_wait_1()
13644 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_1()
13646 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_1()
13673 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_closing()
13687 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_closing()
13688 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_closing()
13709 if ((to->to_flags & TOF_TS) != 0 && in rack_do_closing()
13710 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_closing()
13711 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_closing()
13713 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_closing()
13714 tp->ts_recent = to->to_tsval; in rack_do_closing()
13717 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_closing()
13718 * is on (half-synchronized state), then queue data for later in rack_do_closing()
13722 if (tp->t_flags & TF_NEEDSYN) { in rack_do_closing()
13725 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_closing()
13727 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_closing()
13745 if (sbavail(&so->so_snd)) { in rack_do_closing()
13747 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_closing()
13774 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_lastack()
13788 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_lastack()
13789 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_lastack()
13811 if ((to->to_flags & TOF_TS) != 0 && in rack_do_lastack()
13812 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_lastack()
13813 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_lastack()
13815 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_lastack()
13816 tp->ts_recent = to->to_tsval; in rack_do_lastack()
13819 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_lastack()
13820 * is on (half-synchronized state), then queue data for later in rack_do_lastack()
13824 if (tp->t_flags & TF_NEEDSYN) { in rack_do_lastack()
13827 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_lastack()
13829 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_lastack()
13847 if (sbavail(&so->so_snd)) { in rack_do_lastack()
13849 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_lastack()
13877 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_2()
13891 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_2()
13892 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_2()
13903 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_2()
13920 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_2()
13921 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_2()
13922 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_2()
13924 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_2()
13925 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_2()
13928 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_2()
13929 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_2()
13933 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_2()
13936 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_2()
13938 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_2()
13951 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_2()
13953 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_2()
13966 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13967 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13968 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13979 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
13980 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
13982 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
13983 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
13987 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13988 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13989 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
13990 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
13992 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
13993 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
13994 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13997 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
13998 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
13999 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
14001 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
14002 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
14003 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14009 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
14014 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
14015 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
14017 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
14019 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
14020 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
14021 rack->r_ctl.init_rate) { in rack_set_pace_segments()
14026 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14033 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
14035 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
14037 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14038 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14040 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
14041 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
14047 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
14050 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
14052 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
14072 if (rack->r_is_v6) { in rack_init_fsb_block()
14073 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
14074 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14075 if (tp->t_port) { in rack_init_fsb_block()
14076 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14078 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14079 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14080 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14081 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14084 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
14085 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14087 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14088 tp->t_port, in rack_init_fsb_block()
14089 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14090 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
14095 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
14096 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14097 if (tp->t_port) { in rack_init_fsb_block()
14098 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14100 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14101 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14102 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14103 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14106 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14107 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
14109 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14110 tp->t_port, in rack_init_fsb_block()
14111 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14112 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
14115 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
14116 (long)TCP_MAXWIN << tp->rcv_scale); in rack_init_fsb_block()
14117 rack->r_fsb_inited = 1; in rack_init_fsb_block()
14128 …rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
14130 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
14132 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
14134 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
14137 rack->r_fsb_inited = 0; in rack_init_fsb()
14146 * 20 - Initial round setup in rack_log_hystart_event()
14147 * 21 - Rack declares a new round. in rack_log_hystart_event()
14151 tp = rack->rc_tp; in rack_log_hystart_event()
14157 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
14158 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
14160 log.u_bbr.flex4 = tp->snd_max; in rack_log_hystart_event()
14163 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
14164 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
14166 &tptosocket(tp)->so_rcv, in rack_log_hystart_event()
14167 &tptosocket(tp)->so_snd, in rack_log_hystart_event()
14176 rack->rack_deferred_inited = 1; in rack_deferred_init()
14177 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
14178 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
14179 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
14193 * 1 - Use full sized retransmits i.e. limit in rack_init_retransmit_value()
14197 * 2 - Use pacer min granularity as a guide to in rack_init_retransmit_value()
14205 * 0 - The rack default 1 MSS (anything not 0/1/2 in rack_init_retransmit_value()
14210 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14211 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14213 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14214 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14216 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14217 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14227 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14250 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14251 switch (reqr->req) { in rack_chg_query()
14253 if ((reqr->req_param == tp->snd_max) || in rack_chg_query()
14254 (tp->snd_max == tp->snd_una)){ in rack_chg_query()
14258 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14260 /* Can't find that seq -- unlikely */ in rack_chg_query()
14263 reqr->sendmap_start = rsm->r_start; in rack_chg_query()
14264 reqr->sendmap_end = rsm->r_end; in rack_chg_query()
14265 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; in rack_chg_query()
14266 reqr->sendmap_fas = rsm->r_fas; in rack_chg_query()
14267 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) in rack_chg_query()
14268 reqr->sendmap_send_cnt = SNDMAP_NRTX; in rack_chg_query()
14269 for(i=0; i<reqr->sendmap_send_cnt; i++) in rack_chg_query()
14270 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; in rack_chg_query()
14271 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; in rack_chg_query()
14272 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; in rack_chg_query()
14273 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; in rack_chg_query()
14274 reqr->sendmap_dupacks = rsm->r_dupack; in rack_chg_query()
14276 rsm->r_start, in rack_chg_query()
14277 rsm->r_end, in rack_chg_query()
14278 rsm->r_flags); in rack_chg_query()
14282 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14286 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14287 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14288 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14290 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14291 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14294 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14295 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14296 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14301 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14302 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14304 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14305 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14306 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14307 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14308 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14310 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14311 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14312 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14313 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14314 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14315 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14317 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14318 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14319 if (rack->rc_in_persist) { in rack_chg_query()
14320 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14321 reqr->rack_in_persist = 1; in rack_chg_query()
14323 reqr->rack_time_went_idle = 0; in rack_chg_query()
14324 reqr->rack_in_persist = 0; in rack_chg_query()
14326 if (rack->r_wanted_output) in rack_chg_query()
14327 reqr->rack_wanted_output = 1; in rack_chg_query()
14329 reqr->rack_wanted_output = 0; in rack_chg_query()
14333 return (-EINVAL); in rack_chg_query()
14352 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14354 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14355 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14357 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14358 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14359 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_switch_failed()
14360 if (tp->t_in_hpts > IHPTS_NONE) { in rack_switch_failed()
14365 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14366 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14367 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14372 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14373 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14374 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14393 * to not refer to tp->t_fb_ptr. This has the old rack in rack_init_outstanding()
14399 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init_outstanding()
14407 rsm->r_no_rtt_allowed = 1; in rack_init_outstanding()
14408 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14409 rsm->r_rtr_cnt = 1; in rack_init_outstanding()
14410 rsm->r_rtr_bytes = 0; in rack_init_outstanding()
14411 if (tp->t_flags & TF_SENTFIN) in rack_init_outstanding()
14412 rsm->r_flags |= RACK_HAS_FIN; in rack_init_outstanding()
14413 rsm->r_end = tp->snd_max; in rack_init_outstanding()
14414 if (tp->snd_una == tp->iss) { in rack_init_outstanding()
14416 rsm->r_flags |= RACK_HAS_SYN; in rack_init_outstanding()
14417 rsm->r_start = tp->iss; in rack_init_outstanding()
14418 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); in rack_init_outstanding()
14420 rsm->r_start = tp->snd_una; in rack_init_outstanding()
14421 rsm->r_dupack = 0; in rack_init_outstanding()
14422 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14423 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14424 if (rsm->m) { in rack_init_outstanding()
14425 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14426 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14428 rsm->orig_m_len = 0; in rack_init_outstanding()
14429 rsm->orig_t_space = 0; in rack_init_outstanding()
14433 * This can happen if we have a stand-alone FIN or in rack_init_outstanding()
14436 rsm->m = NULL; in rack_init_outstanding()
14437 rsm->orig_m_len = 0; in rack_init_outstanding()
14438 rsm->orig_t_space = 0; in rack_init_outstanding()
14439 rsm->soff = 0; in rack_init_outstanding()
14442 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14447 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14449 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14450 rsm->r_in_tmap = 1; in rack_init_outstanding()
14457 at = tp->snd_una; in rack_init_outstanding()
14458 while (at != tp->snd_max) { in rack_init_outstanding()
14462 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) in rack_init_outstanding()
14474 rsm->r_dupack = qr.sendmap_dupacks; in rack_init_outstanding()
14475 rsm->r_start = qr.sendmap_start; in rack_init_outstanding()
14476 rsm->r_end = qr.sendmap_end; in rack_init_outstanding()
14478 rsm->r_fas = qr.sendmap_end; in rack_init_outstanding()
14480 rsm->r_fas = rsm->r_start - tp->snd_una; in rack_init_outstanding()
14486 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; in rack_init_outstanding()
14487 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; in rack_init_outstanding()
14488 rsm->r_rtr_cnt = qr.sendmap_send_cnt; in rack_init_outstanding()
14489 rsm->r_ack_arrival = qr.sendmap_ack_arrival; in rack_init_outstanding()
14490 for (i=0 ; i<rsm->r_rtr_cnt; i++) in rack_init_outstanding()
14491 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; in rack_init_outstanding()
14492 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14493 (rsm->r_start - tp->snd_una), &rsm->soff); in rack_init_outstanding()
14494 if (rsm->m) { in rack_init_outstanding()
14495 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14496 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14498 rsm->orig_m_len = 0; in rack_init_outstanding()
14499 rsm->orig_t_space = 0; in rack_init_outstanding()
14502 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14507 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14509 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_init_outstanding()
14510 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14511 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > in rack_init_outstanding()
14512 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { in rack_init_outstanding()
14519 rsm->r_in_tmap = 1; in rack_init_outstanding()
14524 if (rsm->r_in_tmap == 0) { in rack_init_outstanding()
14528 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14529 rsm->r_in_tmap = 1; in rack_init_outstanding()
14532 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14533 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14534 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14538 rsm->r_start, in rack_init_outstanding()
14539 rsm->r_end, in rack_init_outstanding()
14540 rsm->r_flags); in rack_init_outstanding()
14561 * will be tp->t_fb_ptr. If its a stack switch that in rack_init()
14565 if (ptr == &tp->t_fb_ptr) in rack_init()
14581 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14582 if (rack->r_ctl.tqh == NULL) { in rack_init()
14586 tqhash_init(rack->r_ctl.tqh); in rack_init()
14587 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14588 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14589 rack->rc_tp = tp; in rack_init()
14590 rack->rc_inp = inp; in rack_init()
14592 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14607 * We specifically put into the beta the ecn value for pacing. in rack_init()
14609 rack->rc_new_rnd_needed = 1; in rack_init()
14610 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14613 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14614 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14615 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14617 rack->rc_pace_to_cwnd = 1; in rack_init()
14619 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14621 rack->use_rack_rr = 1; in rack_init()
14623 rack->rc_pace_dnd = 1; in rack_init()
14626 tp->t_delayed_ack = 1; in rack_init()
14628 tp->t_delayed_ack = 0; in rack_init()
14631 tp->t_flags2 |= TF2_TCP_ACCOUNTING; in rack_init()
14634 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14635 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14636 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14637 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14638 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14641 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; in rack_init()
14643 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14644 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14646 rack->rack_enable_scwnd = 1; in rack_init()
14647 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14648 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14649 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14650 rack->rc_force_max_seg = 0; in rack_init()
14651 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14652 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; in rack_init()
14653 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; in rack_init()
14655 rack->rack_hibeta = 1; in rack_init()
14658 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; in rack_init()
14659 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14662 rack->r_ctl.saved_hibeta = 50; in rack_init()
14667 * will never have all 1's in ms :-) in rack_init()
14669 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14670 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14671 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14672 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14673 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14674 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14675 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14676 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14678 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14679 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14681 rack->r_use_cmp_ack = 1; in rack_init()
14683 rack->rack_no_prr = 1; in rack_init()
14685 rack->rc_gp_no_rec_chg = 1; in rack_init()
14687 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14688 rack->rc_always_pace = 1; in rack_init()
14689 if (rack->rack_hibeta) in rack_init()
14692 rack->rc_always_pace = 0; in rack_init()
14693 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14694 rack->r_mbuf_queue = 1; in rack_init()
14696 rack->r_mbuf_queue = 0; in rack_init()
14699 rack->r_limit_scw = 1; in rack_init()
14701 rack->r_limit_scw = 0; in rack_init()
14703 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14705 rack->r_use_hpts_min = 1; in rack_init()
14706 if (tp->snd_una != 0) { in rack_init()
14707 rack->rc_sendvars_notset = 0; in rack_init()
14715 * syn-cache. This means none of the in rack_init()
14719 rack->rc_sendvars_notset = 1; in rack_init()
14722 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14723 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14724 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14725 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14726 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14727 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; in rack_init()
14728 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14730 rack->r_up_only = 1; in rack_init()
14733 rack->rc_gp_dyn_mul = 1; in rack_init()
14735 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14737 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14738 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14740 rack->rc_skip_timely = 1; in rack_init()
14742 if (rack->rc_skip_timely) { in rack_init()
14743 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14744 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14745 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14747 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14748 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14749 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14751 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14753 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_init()
14754 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14755 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14756 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14757 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14759 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14761 rack->r_ctl.gate_to_fs = 1; in rack_init()
14762 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14768 rack->rc_rack_tmr_std_based = 1; in rack_init()
14772 rack->rc_rack_use_dsack = 1; in rack_init()
14776 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14778 rack->r_ctl.req_measurements = 1; in rack_init()
14780 rack->rack_hdw_pace_ena = 1; in rack_init()
14782 rack->r_rack_hw_rate_caps = 1; in rack_init()
14784 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14793 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_init()
14795 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_init()
14797 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_init()
14805 tp->t_flags &= ~TF_GPUTINPROG; in rack_init()
14806 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14807 (tp->t_state != TCPS_TIME_WAIT)) { in rack_init()
14812 if (SEQ_GT(tp->snd_max, tp->iss)) in rack_init()
14813 snt = tp->snd_max - tp->iss; in rack_init()
14824 if (tp->snd_cwnd < iwin) in rack_init()
14825 tp->snd_cwnd = iwin; in rack_init()
14846 tp->snd_ssthresh = 0xffffffff; in rack_init()
14857 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14858 (tp->t_state != TCPS_TIME_WAIT) && in rack_init()
14860 (tp->snd_una != tp->snd_max)) { in rack_init()
14869 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14870 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_init()
14872 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_init()
14873 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14874 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_init()
14880 * they are non-zero. They are kept with a 5 in rack_init()
14885 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14886 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { in rack_init()
14888 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init()
14898 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14900 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14901 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14902 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14903 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14904 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14905 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14906 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14907 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14908 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14909 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14910 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14911 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14913 rack->rc_tlp_in_progress = 1; in rack_init()
14914 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14916 rack->rc_tlp_in_progress = 0; in rack_init()
14917 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14920 rack->rc_srtt_measure_made = 1; in rack_init()
14922 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14924 if (rack->r_ctl.rc_scw) { in rack_init()
14925 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14926 rack->rack_scwnd_is_idle = 1; in rack_init()
14929 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14930 rack->probe_not_answered = 0; in rack_init()
14931 rack->forced_ack = 0; in rack_init()
14932 tp->t_rxtshift = 0; in rack_init()
14933 rack->rc_in_persist = 1; in rack_init()
14934 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_init()
14935 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14938 rack->r_wanted_output = 1; in rack_init()
14947 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14950 * non-zero return means we have a timer('s) in rack_init()
14956 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14958 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14960 tov = qr.timer_pacing_to - us_cts; in rack_init()
14965 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14968 tov = qr.timer_timer_exp - us_cts; in rack_init()
14974 rack->r_ctl.rc_hpts_flags, in rack_init()
14975 rack->r_ctl.rc_last_output_to, in rack_init()
14976 rack->r_ctl.rc_timer_exp); in rack_init()
14982 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
14986 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
14995 if ((tp->t_state == TCPS_CLOSED) || in rack_handoff_ok()
14996 (tp->t_state == TCPS_LISTEN)) { in rack_handoff_ok()
15000 if ((tp->t_state == TCPS_SYN_SENT) || in rack_handoff_ok()
15001 (tp->t_state == TCPS_SYN_RECEIVED)) { in rack_handoff_ok()
15008 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { in rack_handoff_ok()
15021 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ in rack_handoff_ok()
15035 if (tp->t_fb_ptr) { in rack_fini()
15041 tp->t_flags &= ~TF_FORCEDATA; in rack_fini()
15042 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
15051 if (rack->r_ctl.rc_scw) { in rack_fini()
15054 if (rack->r_limit_scw) in rack_fini()
15055 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
15058 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
15059 rack->r_ctl.rc_scw_index, in rack_fini()
15061 rack->r_ctl.rc_scw = NULL; in rack_fini()
15064 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
15065 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
15066 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
15067 rack->r_ctl.fsb.th = NULL; in rack_fini()
15069 if (rack->rc_always_pace == 1) { in rack_fini()
15073 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
15076 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
15077 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
15081 if (rack->r_ctl.crte != NULL) { in rack_fini()
15082 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
15083 rack->rack_hdrw_pacing = 0; in rack_fini()
15084 rack->r_ctl.crte = NULL; in rack_fini()
15091 * get each one and free it like a cum-ack would and in rack_fini()
15094 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15096 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
15097 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15099 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15101 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15103 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
15104 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15105 rack->rc_free_cnt--; in rack_fini()
15108 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15110 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
15111 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
15112 rack->r_ctl.pcm_s = NULL; in rack_fini()
15113 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
15114 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
15116 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
15123 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
15124 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
15126 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
15127 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15129 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15136 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
15139 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
15140 rack->rc_free_cnt = 0; in rack_fini()
15141 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
15142 rack->r_ctl.tqh = NULL; in rack_fini()
15143 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); in rack_fini()
15144 tp->t_fb_ptr = NULL; in rack_fini()
15147 tp->snd_nxt = tp->snd_max; in rack_fini()
15153 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
15154 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
15156 switch (tp->t_state) { in rack_set_state()
15158 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
15159 rack->r_substate = rack_do_syn_sent; in rack_set_state()
15162 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
15163 rack->r_substate = rack_do_syn_recv; in rack_set_state()
15167 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
15168 rack->r_substate = rack_do_established; in rack_set_state()
15171 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
15172 rack->r_substate = rack_do_close_wait; in rack_set_state()
15176 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
15177 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
15181 rack->r_state = TCPS_CLOSING; in rack_set_state()
15182 rack->r_substate = rack_do_closing; in rack_set_state()
15186 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
15187 rack->r_substate = rack_do_lastack; in rack_set_state()
15190 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
15191 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
15199 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
15200 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15216 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15217 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15223 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15227 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15229 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15230 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && in rack_timer_audit()
15237 if (tp->t_flags & TF_DELACK) { in rack_timer_audit()
15242 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15243 (tp->t_state <= TCPS_CLOSING)) && in rack_timer_audit()
15245 (tp->snd_max == tp->snd_una)) { in rack_timer_audit()
15250 if (SEQ_GT(tp->snd_max, tp->snd_una) && in rack_timer_audit()
15274 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15275 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15279 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15280 rack->r_early = 1; in rack_timer_audit()
15281 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15283 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15285 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15287 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15295 if ((SEQ_LT(tp->snd_wl1, seq) || in rack_do_win_updates()
15296 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || in rack_do_win_updates()
15297 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { in rack_do_win_updates()
15299 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) in rack_do_win_updates()
15301 tp->snd_wnd = tiwin; in rack_do_win_updates()
15303 tp->snd_wl1 = seq; in rack_do_win_updates()
15304 tp->snd_wl2 = ack; in rack_do_win_updates()
15305 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15306 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15307 rack->r_wanted_output = 1; in rack_do_win_updates()
15308 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { in rack_do_win_updates()
15309 tp->snd_wnd = tiwin; in rack_do_win_updates()
15311 tp->snd_wl1 = seq; in rack_do_win_updates()
15312 tp->snd_wl2 = ack; in rack_do_win_updates()
15317 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15318 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15320 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15321 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15322 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15326 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15327 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15328 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_win_updates()
15329 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15330 sbavail(&tptosocket(tp)->so_snd) && in rack_do_win_updates()
15331 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_do_win_updates()
15338 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15346 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15359 if (SEQ_GT(ae->ack, tp->snd_una)) { in rack_log_input_packet()
15360 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); in rack_log_input_packet()
15362 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); in rack_log_input_packet()
15366 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15367 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15368 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15371 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15373 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15374 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15375 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15376 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15377 log.u_bbr.pkts_out = tp->t_maxseg; in rack_log_input_packet()
15378 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15380 log.u_bbr.lost = ae->flags; in rack_log_input_packet()
15383 if (ae->flags & TSTMP_HDWR) { in rack_log_input_packet()
15386 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15387 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15391 } else if (ae->flags & TSTMP_LRO) { in rack_log_input_packet()
15394 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15395 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15402 log.u_bbr.delRate = ae->timestamp; in rack_log_input_packet()
15404 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_log_input_packet()
15406 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_log_input_packet()
15408 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_log_input_packet()
15412 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_log_input_packet()
15414 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_log_input_packet()
15415 log.u_bbr.rttProp = tcp_req->timestamp; in rack_log_input_packet()
15416 log.u_bbr.cur_del_rate = tcp_req->start; in rack_log_input_packet()
15417 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_log_input_packet()
15421 log.u_bbr.bw_inuse = tcp_req->end; in rack_log_input_packet()
15423 log.u_bbr.flex6 = tcp_req->start_seq; in rack_log_input_packet()
15424 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_log_input_packet()
15426 log.u_bbr.epoch = tcp_req->end_seq; in rack_log_input_packet()
15432 th->th_seq = ae->seq; in rack_log_input_packet()
15433 th->th_ack = ae->ack; in rack_log_input_packet()
15434 th->th_win = ae->win; in rack_log_input_packet()
15436 th->th_sport = inp->inp_fport; in rack_log_input_packet()
15437 th->th_dport = inp->inp_lport; in rack_log_input_packet()
15438 tcp_set_flags(th, ae->flags); in rack_log_input_packet()
15440 if (ae->flags & HAS_TSTMP) { in rack_log_input_packet()
15444 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); in rack_log_input_packet()
15454 val = htonl(ae->ts_value); in rack_log_input_packet()
15457 val = htonl(ae->ts_echo); in rack_log_input_packet()
15461 th->th_off = (sizeof(struct tcphdr) >> 2); in rack_log_input_packet()
15470 * snd_una was advanced and then un-advancing it so that the in rack_log_input_packet()
15473 if (tp->snd_una != high_seq) { in rack_log_input_packet()
15474 orig_snd_una = tp->snd_una; in rack_log_input_packet()
15475 tp->snd_una = high_seq; in rack_log_input_packet()
15480 &tptosocket(tp)->so_rcv, in rack_log_input_packet()
15481 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, in rack_log_input_packet()
15484 tp->snd_una = orig_snd_una; in rack_log_input_packet()
15495 * A persist or keep-alive was forced out, update our in rack_handle_probe_response()
15497 * When a subsequent keep-alive or persist times out in rack_handle_probe_response()
15503 * will clear the probe_not_answered flag i.e. cum-ack in rack_handle_probe_response()
15507 rack->forced_ack = 0; in rack_handle_probe_response()
15508 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15509 if ((rack->rc_in_persist && in rack_handle_probe_response()
15510 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15511 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15526 if (rack->rc_in_persist) in rack_handle_probe_response()
15528 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15531 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15553 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15554 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15555 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15563 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15570 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15576 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15577 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15579 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15581 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15582 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15597 rack->r_ctl.current_round++; in rack_new_round_setup()
15599 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15600 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15601 (rack->pcm_needed == 0) && in rack_new_round_setup()
15602 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15610 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15611 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15612 rack->pcm_needed = 1; in rack_new_round_setup()
15613 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15615 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15618 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_new_round_setup()
15620 if (CC_ALGO(tp)->newround != NULL) { in rack_new_round_setup()
15621 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15626 * that we are not just pushing on slow-start and just in rack_new_round_setup()
15628 * boost in b/w during the inital slow-start. in rack_new_round_setup()
15630 if (rack->dgp_on && in rack_new_round_setup()
15631 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15632 (tp->snd_cwnd < tp->snd_ssthresh) && in rack_new_round_setup()
15633 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15634 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15635 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15645 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15647 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15653 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15654 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15655 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15656 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15657 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15662 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15663 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15664 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15666 tp->snd_ssthresh = tp->snd_cwnd - 1; in rack_new_round_setup()
15668 rack->r_fast_output = 0; in rack_new_round_setup()
15679 * A) It moves the cum-ack forward in rack_do_compressed_ack_processing()
15680 * B) It is behind the cum-ack. in rack_do_compressed_ack_processing()
15681 * C) It is a window-update ack. in rack_do_compressed_ack_processing()
15682 * D) It is a dup-ack. in rack_do_compressed_ack_processing()
15684 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES in rack_do_compressed_ack_processing()
15709 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15710 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15711 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15714 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15716 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15717 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_compressed_ack_processing()
15726 bytes = tp->gput_ack - tp->gput_seq; in rack_do_compressed_ack_processing()
15727 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_compressed_ack_processing()
15728 bytes += tp->gput_seq - tp->snd_una; in rack_do_compressed_ack_processing()
15729 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_compressed_ack_processing()
15735 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15736 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15737 tp->gput_seq, in rack_do_compressed_ack_processing()
15739 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_compressed_ack_processing()
15743 to->to_flags = 0; in rack_do_compressed_ack_processing()
15744 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), in rack_do_compressed_ack_processing()
15745 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); in rack_do_compressed_ack_processing()
15746 cnt = m->m_len / sizeof(struct tcp_ackent); in rack_do_compressed_ack_processing()
15748 high_seq = tp->snd_una; in rack_do_compressed_ack_processing()
15749 the_win = tp->snd_wnd; in rack_do_compressed_ack_processing()
15750 win_seq = tp->snd_wl1; in rack_do_compressed_ack_processing()
15751 win_upd_ack = tp->snd_wl2; in rack_do_compressed_ack_processing()
15754 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15756 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15757 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15758 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15768 if (ae->flags & TH_FIN) in rack_do_compressed_ack_processing()
15777 tiwin = ae->win << tp->snd_scale; in rack_do_compressed_ack_processing()
15778 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15779 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15781 if (SEQ_LT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15783 ae->ack_val_set = ACK_BEHIND; in rack_do_compressed_ack_processing()
15784 } else if (SEQ_GT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15786 ae->ack_val_set = ACK_CUMACK; in rack_do_compressed_ack_processing()
15787 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15789 ae->ack_val_set = ACK_DUPACK; in rack_do_compressed_ack_processing()
15792 ae->ack_val_set = ACK_RWND; in rack_do_compressed_ack_processing()
15795 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15797 if (ae->flags & HAS_TSTMP) { in rack_do_compressed_ack_processing()
15799 to->to_flags = TOF_TS; in rack_do_compressed_ack_processing()
15800 ae->ts_echo -= tp->ts_offset; in rack_do_compressed_ack_processing()
15801 to->to_tsecr = ae->ts_echo; in rack_do_compressed_ack_processing()
15802 to->to_tsval = ae->ts_value; in rack_do_compressed_ack_processing()
15808 if (TSTMP_GT(ae->ts_echo, ms_cts)) in rack_do_compressed_ack_processing()
15809 to->to_tsecr = 0; in rack_do_compressed_ack_processing()
15810 if (tp->ts_recent && in rack_do_compressed_ack_processing()
15811 TSTMP_LT(ae->ts_value, tp->ts_recent)) { in rack_do_compressed_ack_processing()
15812 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { in rack_do_compressed_ack_processing()
15816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15817 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15824 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && in rack_do_compressed_ack_processing()
15825 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { in rack_do_compressed_ack_processing()
15826 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_compressed_ack_processing()
15827 tp->ts_recent = ae->ts_value; in rack_do_compressed_ack_processing()
15831 to->to_flags = 0; in rack_do_compressed_ack_processing()
15834 if (tp->t_idle_reduce && in rack_do_compressed_ack_processing()
15835 (tp->snd_max == tp->snd_una) && in rack_do_compressed_ack_processing()
15836 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_compressed_ack_processing()
15840 tp->t_rcvtime = ticks; in rack_do_compressed_ack_processing()
15842 if (tcp_ecn_input_segment(tp, ae->flags, 0, in rack_do_compressed_ack_processing()
15843 tcp_packets_this_ack(tp, ae->ack), in rack_do_compressed_ack_processing()
15844 ae->codepoint)) in rack_do_compressed_ack_processing()
15845 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); in rack_do_compressed_ack_processing()
15848 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15849 tp->tcp_cnt_counters[ae->ack_val_set]++; in rack_do_compressed_ack_processing()
15856 * The non-compressed path through the code has this in rack_do_compressed_ack_processing()
15863 if (ae->ack_val_set == ACK_BEHIND) { in rack_do_compressed_ack_processing()
15866 * or it could be a keep-alive or persists in rack_do_compressed_ack_processing()
15868 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { in rack_do_compressed_ack_processing()
15869 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15870 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15871 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15873 } else if (ae->ack_val_set == ACK_DUPACK) { in rack_do_compressed_ack_processing()
15875 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15876 } else if (ae->ack_val_set == ACK_RWND) { in rack_do_compressed_ack_processing()
15878 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15879 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15880 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15881 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15882 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15884 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15886 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15888 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15893 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15894 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15899 if (SEQ_GT(ae->ack, tp->snd_max)) { in rack_do_compressed_ack_processing()
15904 if ((tp->t_flags & TF_ACKNOW) == 0) { in rack_do_compressed_ack_processing()
15906 if (tp->t_flags && TF_ACKNOW) in rack_do_compressed_ack_processing()
15907 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15912 if (tiwin != tp->snd_wnd) { in rack_do_compressed_ack_processing()
15913 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15914 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15920 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15921 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); in rack_do_compressed_ack_processing()
15924 high_seq = ae->ack; in rack_do_compressed_ack_processing()
15926 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15927 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15928 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15929 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15930 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15932 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15934 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15935 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15939 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15941 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15943 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15954 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15955 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15956 if (ae->ack_val_set == ACK_CUMACK) in rack_do_compressed_ack_processing()
15957 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15966 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { in rack_do_compressed_ack_processing()
15968 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15969 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15971 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15972 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
15973 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
15974 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
15987 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
15988 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
15998 * since cum-ack moved forward. in rack_do_compressed_ack_processing()
16000 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
16001 if (tp->t_flags & TF_NEEDSYN) { in rack_do_compressed_ack_processing()
16003 * T/TCP: Connection was half-synchronized, and our SYN has in rack_do_compressed_ack_processing()
16005 * to non-starred state, increment snd_una for ACK of SYN, in rack_do_compressed_ack_processing()
16008 tp->t_flags &= ~TF_NEEDSYN; in rack_do_compressed_ack_processing()
16009 tp->snd_una++; in rack_do_compressed_ack_processing()
16010 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
16012 if (acked > sbavail(&so->so_snd)) in rack_do_compressed_ack_processing()
16013 acked_amount = sbavail(&so->so_snd); in rack_do_compressed_ack_processing()
16014 if (IN_FASTRECOVERY(tp->t_flags) && in rack_do_compressed_ack_processing()
16015 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
16017 if (IN_RECOVERY(tp->t_flags)) { in rack_do_compressed_ack_processing()
16018 if (SEQ_LT(high_seq, tp->snd_recover) && in rack_do_compressed_ack_processing()
16019 (SEQ_LT(high_seq, tp->snd_max))) { in rack_do_compressed_ack_processing()
16025 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
16026 SEQ_GEQ(high_seq, tp->snd_recover)) { in rack_do_compressed_ack_processing()
16029 * and never re-entered recovery. The timeout(s) in rack_do_compressed_ack_processing()
16033 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
16035 /* Handle the rack-log-ack part (sendmap) */ in rack_do_compressed_ack_processing()
16036 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16038 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16039 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16052 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16055 if ((tp->t_flags & TF_PREVVALID) && in rack_do_compressed_ack_processing()
16056 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_do_compressed_ack_processing()
16057 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16058 if (tp->t_rxtshift == 1 && in rack_do_compressed_ack_processing()
16059 (int)(ticks - tp->t_badrxtwin) < 0) in rack_do_compressed_ack_processing()
16075 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
16077 p_cwnd += tp->snd_cwnd; in rack_do_compressed_ack_processing()
16080 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { in rack_do_compressed_ack_processing()
16081 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_do_compressed_ack_processing()
16082 tp->snd_cwnd = p_cwnd; in rack_do_compressed_ack_processing()
16085 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_do_compressed_ack_processing()
16086 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16088 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
16090 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
16095 tp->t_acktime = ticks; in rack_do_compressed_ack_processing()
16096 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
16099 tp->t_rxtshift = 0; in rack_do_compressed_ack_processing()
16100 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_do_compressed_ack_processing()
16101 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
16102 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
16103 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
16105 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_do_compressed_ack_processing()
16106 tp->snd_recover = tp->snd_una; in rack_do_compressed_ack_processing()
16107 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) in rack_do_compressed_ack_processing()
16108 tp->snd_nxt = tp->snd_max; in rack_do_compressed_ack_processing()
16113 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
16114 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16115 tp->snd_wl2 = high_seq; in rack_do_compressed_ack_processing()
16116 tp->t_dupacks = 0; in rack_do_compressed_ack_processing()
16118 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
16119 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
16120 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
16121 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
16125 if (tp->snd_una == tp->snd_max) { in rack_do_compressed_ack_processing()
16126 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16127 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
16128 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
16129 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
16130 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
16131 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
16132 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
16134 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_do_compressed_ack_processing()
16135 tp->t_acktime = 0; in rack_do_compressed_ack_processing()
16137 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16138 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16139 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
16140 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16141 (sbavail(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16142 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_do_compressed_ack_processing()
16148 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16149 /* tcp_close will kill the inp pre-log the Reset */ in rack_do_compressed_ack_processing()
16154 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16155 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16156 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16169 * We would normally do drop-with-reset which would in rack_do_compressed_ack_processing()
16180 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16181 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16182 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16190 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16199 * We don't change to fin-wait-2 if we have our fin acked in rack_do_compressed_ack_processing()
16207 if (sbavail(&so->so_snd)) { in rack_do_compressed_ack_processing()
16208 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16210 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_compressed_ack_processing()
16219 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16220 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16221 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16232 switch(tp->t_state) { in rack_do_compressed_ack_processing()
16237 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16238 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16239 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16252 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16253 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16254 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16267 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16268 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16269 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16273 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16286 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16295 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16296 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16297 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16304 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16305 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16322 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16323 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16324 (tp->t_flags & TF_ACKNOW )) { in rack_do_compressed_ack_processing()
16334 if (tp->t_flags2 & TF2_HPTS_CALLS) in rack_do_compressed_ack_processing()
16335 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_compressed_ack_processing()
16340 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16362 * cts - is the current time from tv (caller gets ts) in microseconds. in rack_do_segment_nounlock()
16363 * ms_cts - is the current time from tv in milliseconds. in rack_do_segment_nounlock()
16364 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. in rack_do_segment_nounlock()
16387 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16388 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16399 * can happen in the non-LRO path where we are pacing and in rack_do_segment_nounlock()
16404 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16409 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16413 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16414 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16415 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16416 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16423 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16424 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16436 optlen = (th->th_off << 2) - sizeof(struct tcphdr); in rack_do_segment_nounlock()
16462 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16468 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16473 nsegs = m->m_pkthdr.lro_nsegs; in rack_do_segment_nounlock()
16480 if ((m->m_flags & M_TSTMP) || in rack_do_segment_nounlock()
16481 (m->m_flags & M_TSTMP_LRO)) { in rack_do_segment_nounlock()
16483 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16484 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16486 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16490 * Unscale the window into a 32-bit value. For the SYN_SENT state in rack_do_segment_nounlock()
16493 tiwin = th->th_win << tp->snd_scale; in rack_do_segment_nounlock()
16522 (th->th_off << 2) - sizeof(struct tcphdr), in rack_do_segment_nounlock()
16524 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", in rack_do_segment_nounlock()
16526 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", in rack_do_segment_nounlock()
16528 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { in rack_do_segment_nounlock()
16536 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_segment_nounlock()
16537 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_segment_nounlock()
16546 bytes = tp->gput_ack - tp->gput_seq; in rack_do_segment_nounlock()
16547 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_segment_nounlock()
16548 bytes += tp->gput_seq - tp->snd_una; in rack_do_segment_nounlock()
16549 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_segment_nounlock()
16555 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16556 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16557 tp->gput_seq, in rack_do_segment_nounlock()
16559 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_segment_nounlock()
16562 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16568 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_segment_nounlock()
16569 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); in rack_do_segment_nounlock()
16571 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); in rack_do_segment_nounlock()
16575 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16576 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16577 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16580 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16582 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16583 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16584 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16585 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16586 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16587 log.u_bbr.flex3 = m->m_flags; in rack_do_segment_nounlock()
16588 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16595 if (m->m_flags & M_TSTMP) { in rack_do_segment_nounlock()
16601 } else if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment_nounlock()
16610 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; in rack_do_segment_nounlock()
16612 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_do_segment_nounlock()
16614 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_do_segment_nounlock()
16616 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_do_segment_nounlock()
16620 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16622 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16623 log.u_bbr.rttProp = tcp_req->timestamp; in rack_do_segment_nounlock()
16624 log.u_bbr.cur_del_rate = tcp_req->start; in rack_do_segment_nounlock()
16625 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_do_segment_nounlock()
16629 log.u_bbr.bw_inuse = tcp_req->end; in rack_do_segment_nounlock()
16631 log.u_bbr.flex6 = tcp_req->start_seq; in rack_do_segment_nounlock()
16632 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_do_segment_nounlock()
16634 log.u_bbr.epoch = tcp_req->end_seq; in rack_do_segment_nounlock()
16638 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, in rack_do_segment_nounlock()
16643 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16652 * If a segment with the ACK-bit set arrives in the SYN-SENT state in rack_do_segment_nounlock()
16655 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && in rack_do_segment_nounlock()
16656 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_segment_nounlock()
16671 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && in rack_do_segment_nounlock()
16679 * Segment received on connection. Reset idle time and keep-alive in rack_do_segment_nounlock()
16683 if (tp->t_idle_reduce && in rack_do_segment_nounlock()
16684 (tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16685 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_segment_nounlock()
16689 tp->t_rcvtime = ticks; in rack_do_segment_nounlock()
16691 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); in rack_do_segment_nounlock()
16693 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16694 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16700 tcp_packets_this_ack(tp, th->th_ack), in rack_do_segment_nounlock()
16702 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); in rack_do_segment_nounlock()
16710 to.to_tsecr -= tp->ts_offset; in rack_do_segment_nounlock()
16714 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16716 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16727 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16728 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16729 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16740 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16742 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16743 ("%s: rack->rc_inp unexpectedly NULL", __func__)); in rack_do_segment_nounlock()
16744 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16745 rack->rc_inp = inp; in rack_do_segment_nounlock()
16755 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { in rack_do_segment_nounlock()
16759 (tp->t_flags & TF_REQ_SCALE)) { in rack_do_segment_nounlock()
16760 tp->t_flags |= TF_RCVD_SCALE; in rack_do_segment_nounlock()
16761 tp->snd_scale = to.to_wscale; in rack_do_segment_nounlock()
16763 tp->t_flags &= ~TF_REQ_SCALE; in rack_do_segment_nounlock()
16768 tp->snd_wnd = th->th_win; in rack_do_segment_nounlock()
16771 (tp->t_flags & TF_REQ_TSTMP)) { in rack_do_segment_nounlock()
16772 tp->t_flags |= TF_RCVD_TSTMP; in rack_do_segment_nounlock()
16773 tp->ts_recent = to.to_tsval; in rack_do_segment_nounlock()
16774 tp->ts_recent_age = cts; in rack_do_segment_nounlock()
16776 tp->t_flags &= ~TF_REQ_TSTMP; in rack_do_segment_nounlock()
16780 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_do_segment_nounlock()
16782 tp->t_flags &= ~TF_SACK_PERMIT; in rack_do_segment_nounlock()
16783 if (tp->t_flags & TF_FASTOPEN) { in rack_do_segment_nounlock()
16790 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_do_segment_nounlock()
16803 * TF_SACK_PERMIT is set and the sack-not-required is clear. in rack_do_segment_nounlock()
16804 * The code now does do dup-ack counting so if you don't in rack_do_segment_nounlock()
16810 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_do_segment_nounlock()
16812 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, in rack_do_segment_nounlock()
16820 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16824 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16825 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16826 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16827 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16832 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16838 * always. All other times (timers etc) we must have a rack-state in rack_do_segment_nounlock()
16841 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16842 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16844 if (SEQ_GT(th->th_ack, tp->snd_una) && in rack_do_segment_nounlock()
16845 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16847 prev_state = rack->r_state; in rack_do_segment_nounlock()
16849 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && in rack_do_segment_nounlock()
16850 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || in rack_do_segment_nounlock()
16851 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { in rack_do_segment_nounlock()
16853 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16855 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16864 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16865 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16866 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16867 rack->in_probe_rtt && in rack_do_segment_nounlock()
16868 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16875 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16877 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { in rack_do_segment_nounlock()
16879 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16890 * use of 0xf here since we only have 11 counter (0 - 0xa) and in rack_do_segment_nounlock()
16898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_segment_nounlock()
16899 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); in rack_do_segment_nounlock()
16904 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16905 (tp->t_flags & TF_ACKNOW) || in rack_do_segment_nounlock()
16906 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16919 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { in rack_do_segment_nounlock()
16923 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16933 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) in rack_do_segment_nounlock()
16934 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_segment_nounlock()
16945 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16946 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16952 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16955 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16956 (SEQ_GT(tp->snd_max, tp->snd_una) || in rack_do_segment_nounlock()
16957 (tp->t_flags & TF_DELACK) || in rack_do_segment_nounlock()
16958 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16959 (tp->t_state <= TCPS_CLOSING)))) { in rack_do_segment_nounlock()
16961 if ((tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16962 ((tp->t_flags & TF_DELACK) == 0) && in rack_do_segment_nounlock()
16963 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16964 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16970 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16972 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
16973 rack->r_early = 1; in rack_do_segment_nounlock()
16974 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
16977 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
16994 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
17000 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
17016 if (!STAILQ_EMPTY(&tp->t_inqueue)) { in rack_do_segment()
17022 if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment()
17041 /* Return the next guy to be re-transmitted */ in tcp_rack_output()
17042 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
17045 if (tp->t_flags & TF_SENTFIN) { in tcp_rack_output()
17050 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
17051 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
17054 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { in tcp_rack_output()
17062 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
17063 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17070 if (rsm->r_flags & RACK_ACKED) { in tcp_rack_output()
17073 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && in tcp_rack_output()
17074 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17079 idx = rsm->r_rtr_cnt - 1; in tcp_rack_output()
17080 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; in tcp_rack_output()
17087 if ((tsused - ts_low) < thresh) { in tcp_rack_output()
17091 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in tcp_rack_output()
17092 ((rsm->r_flags & RACK_SACK_PASSED))) { in tcp_rack_output()
17094 * We have passed the dup-ack threshold <or> in tcp_rack_output()
17097 * it is only the dup-ack threshold that in tcp_rack_output()
17101 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
17102 rack->r_fast_output = 0; in tcp_rack_output()
17113 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
17134 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
17135 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
17136 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
17137 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
17138 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
17140 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
17142 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
17144 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
17146 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
17148 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
17150 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
17152 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
17154 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
17155 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
17156 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
17159 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
17164 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
17165 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
17167 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
17176 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
17177 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
17179 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
17181 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
17183 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; in rack_log_pacing_delay_calc()
17185 log.u_bbr.cwnd_gain |= rack->rc_always_pace; in rack_log_pacing_delay_calc()
17187 log.u_bbr.cwnd_gain |= rack->gp_ready; in rack_log_pacing_delay_calc()
17189 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
17191 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
17193 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
17194 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
17195 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
17206 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
17207 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
17210 if (rack->use_fixed_rate && in rack_get_pacing_len()
17211 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17212 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17217 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17222 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17223 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17226 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17227 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17228 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17230 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17231 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17232 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17245 * nearly zero, maybe because of a time-out? in rack_arrive_at_discounted_rate()
17246 * Lets drop back to the lt-bw. in rack_arrive_at_discounted_rate()
17252 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17257 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17261 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17263 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17295 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17296 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17298 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17300 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17302 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17303 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17304 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17308 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17311 * first lets calculate the b/w based on the last us-rtt in pace_to_fill_cwnd()
17314 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17315 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17324 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17325 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17328 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17330 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17331 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17336 * We want to limit fill-cw to the some multiplier in pace_to_fill_cwnd()
17350 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17363 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17376 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17377 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17378 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17381 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17386 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17395 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17396 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17397 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17398 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17399 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17400 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17401 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17408 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17417 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17418 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17420 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17448 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17452 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17468 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17469 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17471 srtt = max(tp->t_srtt, 1); in rack_get_pacing_delay()
17472 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17473 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17475 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17495 slot -= reduce; in rack_get_pacing_delay()
17501 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17505 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17510 /* RRS: We insert non-paced call to stats here for len */ in rack_get_pacing_delay()
17518 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17519 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17521 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17523 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17524 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17527 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17533 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17536 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17537 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17540 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17543 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17544 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17546 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17549 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && in rack_get_pacing_delay()
17550 (rack->use_fixed_rate == 0)) { in rack_get_pacing_delay()
17559 segs = (len + segsiz - 1) / segsiz; in rack_get_pacing_delay()
17561 * We need the diff between 1514 bytes (e-mtu with e-hdr) in rack_get_pacing_delay()
17567 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); in rack_get_pacing_delay()
17568 if (rack->r_is_v6) { in rack_get_pacing_delay()
17586 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17591 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17592 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17593 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17594 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17597 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17598 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17604 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17611 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17612 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17613 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17614 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17617 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17622 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17623 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17624 /* Lets re-allow attempting to setup pacing */ in rack_get_pacing_delay()
17625 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17626 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17631 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17632 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17634 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17635 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17636 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17637 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17643 /* Re-check to make sure we are not exceeding our max b/w */ in rack_get_pacing_delay()
17644 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17645 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17651 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17658 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17659 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17660 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17661 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17662 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17666 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17667 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17668 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17670 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17671 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17676 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17677 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17678 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17681 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17682 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17683 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17684 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17685 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17686 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17688 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17690 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17694 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17695 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17699 if (rack->r_up_only && in rack_get_pacing_delay()
17700 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17705 * previous | this-time in rack_get_pacing_delay()
17706 * A) 0 | 0 -- fill_cw not in the picture in rack_get_pacing_delay()
17707 * B) 1 | 0 -- we were doing a fill-cw but now are not in rack_get_pacing_delay()
17708 * C) 1 | 1 -- all rates from fill_cw in rack_get_pacing_delay()
17709 * D) 0 | 1 -- we were doing non-fill and now we are filling in rack_get_pacing_delay()
17716 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17719 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17720 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17728 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17730 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17731 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17732 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17733 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17734 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17737 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17738 rack->rc_tp, in rack_get_pacing_delay()
17739 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17742 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17748 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17749 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17753 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17755 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17756 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17757 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17758 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17759 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17761 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17763 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17767 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17769 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17771 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17777 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17778 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17789 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17790 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17803 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17807 * of gas or we are mis-estimating the time in rack_get_pacing_delay()
17813 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17829 if (tp->t_state < TCPS_ESTABLISHED) { in rack_start_gp_measurement()
17836 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_start_gp_measurement()
17843 if (sbavail(&tptosocket(tp)->so_snd) < in rack_start_gp_measurement()
17850 tp->t_flags |= TF_GPUTINPROG; in rack_start_gp_measurement()
17851 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17852 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17853 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17854 tp->gput_seq = startseq; in rack_start_gp_measurement()
17855 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17856 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17857 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17858 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17859 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17860 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17861 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17862 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17867 tp->gput_ts = tcp_get_usecs(&tv); in rack_start_gp_measurement()
17868 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17874 * initial-windows worth of data to in rack_start_gp_measurement()
17878 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17879 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17880 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17883 tp->gput_seq, in rack_start_gp_measurement()
17884 tp->gput_ack, in rack_start_gp_measurement()
17886 tp->gput_ts, in rack_start_gp_measurement()
17887 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17891 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17900 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17903 * the tp->gput_ts is correctly set based on in rack_start_gp_measurement()
17907 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17909 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17914 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17929 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17933 * after that (after the app-limited). in rack_start_gp_measurement()
17935 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17937 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) in rack_start_gp_measurement()
17939 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17942 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); in rack_start_gp_measurement()
17947 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17950 * the last is the app-limited one. in rack_start_gp_measurement()
17955 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17957 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17963 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17964 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17969 tp->gput_seq = my_rsm->r_end; in rack_start_gp_measurement()
17974 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17985 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17986 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17987 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17988 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
17989 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { in rack_start_gp_measurement()
17995 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17996 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
17999 tp->gput_seq, in rack_start_gp_measurement()
18000 tp->gput_ack, in rack_start_gp_measurement()
18002 tp->gput_ts, in rack_start_gp_measurement()
18003 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18008 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18015 * idle or if this is the first-send. Lets in rack_start_gp_measurement()
18020 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18021 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18022 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18024 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
18026 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18027 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
18032 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
18033 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
18035 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { in rack_start_gp_measurement()
18037 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
18041 * TSNH unless we have some send-map limit, in rack_start_gp_measurement()
18048 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
18052 tp->gput_seq, in rack_start_gp_measurement()
18053 tp->gput_ack, in rack_start_gp_measurement()
18055 tp->gput_ts, in rack_start_gp_measurement()
18056 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18058 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18068 if (tp->snd_wnd > cwnd_to_use) in rack_what_can_we_send()
18071 sendwin = tp->snd_wnd; in rack_what_can_we_send()
18072 if (ctf_outstanding(tp) >= tp->snd_wnd) { in rack_what_can_we_send()
18073 /* We never want to go over our peers rcv-window */ in rack_what_can_we_send()
18078 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
18083 * >= tp->snd_wnd). in rack_what_can_we_send()
18087 len = sendwin - flight; in rack_what_can_we_send()
18088 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { in rack_what_can_we_send()
18090 len = tp->snd_wnd - ctf_outstanding(tp); in rack_what_can_we_send()
18097 len = avail - sb_offset; in rack_what_can_we_send()
18108 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
18113 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
18118 log.u_bbr.flex5 = tp->rcv_numsacks; in rack_log_fsb()
18119 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
18121 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
18122 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
18130 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
18131 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, in rack_log_fsb()
18148 struct mbuf *m, *n, **np, *smb; in rack_fo_base_copym() local
18158 np = ⊤ in rack_fo_base_copym()
18161 if (hw_tls && (m->m_flags & M_EXTPG)) in rack_fo_base_copym()
18162 tls = m->m_epg_tls; in rack_fo_base_copym()
18176 if (m->m_flags & M_EXTPG) in rack_fo_base_copym()
18177 ntls = m->m_epg_tls; in rack_fo_base_copym()
18193 mlen = min(len, m->m_len - off); in rack_fo_base_copym()
18203 if (m->m_flags & M_EXTPG) { in rack_fo_base_copym()
18224 mlen = (seglimit - frags - 1) * fragsize; in rack_fo_base_copym()
18231 seglimit -= frags; in rack_fo_base_copym()
18235 n = m_get(M_NOWAIT, m->m_type); in rack_fo_base_copym()
18236 *np = n; in rack_fo_base_copym()
18239 n->m_len = mlen; in rack_fo_base_copym()
18241 len_cp += n->m_len; in rack_fo_base_copym()
18242 if (m->m_flags & (M_EXT | M_EXTPG)) { in rack_fo_base_copym()
18243 n->m_data = m->m_data + off; in rack_fo_base_copym()
18247 (u_int)n->m_len); in rack_fo_base_copym()
18249 len -= n->m_len; in rack_fo_base_copym()
18251 m = m->m_next; in rack_fo_base_copym()
18252 np = &n->m_next; in rack_fo_base_copym()
18253 if (len || (soff == smb->m_len)) { in rack_fo_base_copym()
18265 fsb->m = smb; in rack_fo_base_copym()
18266 fsb->off = soff; in rack_fo_base_copym()
18274 fsb->o_m_len = smb->m_len; in rack_fo_base_copym()
18275 fsb->o_t_len = M_TRAILINGROOM(smb); in rack_fo_base_copym()
18285 fsb->o_m_len = 0; in rack_fo_base_copym()
18286 fsb->o_t_len = 0; in rack_fo_base_copym()
18308 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18309 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18316 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18321 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18322 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18323 m->m_len)); in rack_fo_m_copym()
18324 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18325 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18327 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18332 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18334 m, m->m_len, in rack_fo_m_copym()
18335 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18336 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18338 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18339 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18341 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18342 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18344 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18349 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18352 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", in rack_fo_m_copym()
18354 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18357 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18359 &rack->r_ctl.fsb, in rack_fo_m_copym()
18360 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18374 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18375 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18378 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18381 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18382 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18383 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18387 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18389 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18405 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18411 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18432 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18437 /* TSNH -- KASSERT? */ in rack_check_queue_level()
18443 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18446 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18447 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18448 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18452 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18455 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18498 if (rack->r_is_v6) { in rack_fast_rsm_output()
18499 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18504 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18507 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_rsm_output()
18512 rsm->r_flags |= RACK_TLP; in rack_fast_rsm_output()
18515 rsm->r_flags &= ~RACK_TLP; in rack_fast_rsm_output()
18517 startseq = rsm->r_start; in rack_fast_rsm_output()
18518 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18519 inp = rack->rc_inp; in rack_fast_rsm_output()
18521 flags = tcp_outflags[tp->t_state]; in rack_fast_rsm_output()
18525 if (rsm->r_flags & RACK_HAS_FIN) { in rack_fast_rsm_output()
18533 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_rsm_output()
18534 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_rsm_output()
18535 to.to_tsecr = tp->ts_recent; in rack_fast_rsm_output()
18539 /* TCP-MD5 (RFC2385). */ in rack_fast_rsm_output()
18540 if (tp->t_flags & TF_SIGNATURE) in rack_fast_rsm_output()
18545 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18548 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18549 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18550 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18551 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18554 if ((tp->t_flags & TF_TSO) && in rack_fast_rsm_output()
18557 (tp->t_port == 0)) in rack_fast_rsm_output()
18567 m->m_data += max_linkhdr; in rack_fast_rsm_output()
18568 m->m_len = hdrlen; in rack_fast_rsm_output()
18569 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18578 if_hw_tsomax = tp->t_tsomax; in rack_fast_rsm_output()
18579 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_rsm_output()
18580 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_rsm_output()
18587 max_len = (if_hw_tsomax - hdrlen - in rack_fast_rsm_output()
18609 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_rsm_output()
18612 th->th_seq = htonl(rsm->r_start); in rack_fast_rsm_output()
18613 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_rsm_output()
18621 if ((rsm->r_flags & RACK_HAD_PUSH) && in rack_fast_rsm_output()
18622 (len == (rsm->r_end - rsm->r_start))) in rack_fast_rsm_output()
18624 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18625 if (th->th_win == 0) { in rack_fast_rsm_output()
18626 tp->t_sndzerowin++; in rack_fast_rsm_output()
18627 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_rsm_output()
18629 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_rsm_output()
18630 if (rsm->r_flags & RACK_TLP) { in rack_fast_rsm_output()
18638 tp->t_sndrexmitpack++; in rack_fast_rsm_output()
18643 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_fast_rsm_output()
18646 if (rsm->m == NULL) in rack_fast_rsm_output()
18648 if (rsm->m && in rack_fast_rsm_output()
18649 ((rsm->orig_m_len != rsm->m->m_len) || in rack_fast_rsm_output()
18650 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_fast_rsm_output()
18654 …m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxse… in rack_fast_rsm_output()
18664 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_rsm_output()
18668 if (rack->r_is_v6) in rack_fast_rsm_output()
18669 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_rsm_output()
18671 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_rsm_output()
18672 udp->uh_ulen = htons(ulen); in rack_fast_rsm_output()
18674 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_rsm_output()
18675 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_rsm_output()
18676 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_rsm_output()
18678 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_rsm_output()
18679 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_rsm_output()
18680 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_rsm_output()
18682 if (rack->r_is_v6) { in rack_fast_rsm_output()
18683 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_rsm_output()
18684 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_rsm_output()
18689 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_rsm_output()
18690 ip->ip_tos |= ect; in rack_fast_rsm_output()
18693 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18701 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_rsm_output()
18711 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_rsm_output()
18721 if (rack->r_is_v6) { in rack_fast_rsm_output()
18722 if (tp->t_port) { in rack_fast_rsm_output()
18723 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_rsm_output()
18724 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18725 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_rsm_output()
18726 th->th_sum = htons(0); in rack_fast_rsm_output()
18729 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_rsm_output()
18730 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18731 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_rsm_output()
18742 if (tp->t_port) { in rack_fast_rsm_output()
18743 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_rsm_output()
18744 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18745 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18746 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_rsm_output()
18747 th->th_sum = htons(0); in rack_fast_rsm_output()
18750 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_rsm_output()
18751 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18752 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18753 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_rsm_output()
18757 KASSERT(ip->ip_v == IPVERSION, in rack_fast_rsm_output()
18758 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_rsm_output()
18765 * via either fast-path). in rack_fast_rsm_output()
18769 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_rsm_output()
18770 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_rsm_output()
18773 if (rack->r_is_v6) { in rack_fast_rsm_output()
18774 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18775 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_rsm_output()
18776 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_rsm_output()
18777 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18779 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18787 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_rsm_output()
18788 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18789 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_rsm_output()
18790 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18791 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_rsm_output()
18792 ip->ip_off |= htons(IP_DF); in rack_fast_rsm_output()
18795 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18801 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18804 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_rsm_output()
18806 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18809 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18814 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18815 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18818 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_rsm_output()
18820 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_rsm_output()
18822 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18825 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_fast_rsm_output()
18826 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18828 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_fast_rsm_output()
18831 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18832 if (rack->rack_no_prr) in rack_fast_rsm_output()
18835 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18836 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18837 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18840 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18841 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18843 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18850 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_rsm_output()
18852 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18853 if (rsm->r_rtr_cnt > 0) { in rack_fast_rsm_output()
18858 log.u_bbr.flex5 = rsm->r_fas; in rack_fast_rsm_output()
18859 log.u_bbr.bbr_substate = rsm->r_bas; in rack_fast_rsm_output()
18866 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_rsm_output()
18868 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18871 log.u_bbr.delRate = rsm->r_flags; in rack_fast_rsm_output()
18873 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18881 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18886 if (rack->r_is_v6) { in rack_fast_rsm_output()
18887 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_rsm_output()
18888 &inp->inp_route6, in rack_fast_rsm_output()
18896 &inp->inp_route, in rack_fast_rsm_output()
18902 lgb->tlb_errno = error; in rack_fast_rsm_output()
18906 tp->snd_nxt = tp->snd_max; in rack_fast_rsm_output()
18909 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18910 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18911 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18912 rack->r_early = 0; in rack_fast_rsm_output()
18913 rack->r_late = 0; in rack_fast_rsm_output()
18914 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18916 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), in rack_fast_rsm_output()
18917 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); in rack_fast_rsm_output()
18919 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18920 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18924 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); in rack_fast_rsm_output()
18926 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18927 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18928 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18929 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18931 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18932 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18934 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18936 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_rsm_output()
18937 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18938 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_fast_rsm_output()
18939 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18945 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_rsm_output()
18949 if (tp->t_rtttime == 0) { in rack_fast_rsm_output()
18950 tp->t_rtttime = ticks; in rack_fast_rsm_output()
18951 tp->t_rtseq = startseq; in rack_fast_rsm_output()
18956 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18957 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18958 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18961 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18962 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18963 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18964 rack->rc_enobuf++; in rack_fast_rsm_output()
18967 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18969 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
18978 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
18979 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_rsm_output()
18980 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_rsm_output()
18981 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); in rack_fast_rsm_output()
18989 return (-1); in rack_fast_rsm_output()
19000 * delay (eg. trans-continental/oceanic links). Setting the in rack_sndbuf_autoscale()
19022 tp = rack->rc_tp; in rack_sndbuf_autoscale()
19023 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
19024 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
19025 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { in rack_sndbuf_autoscale()
19026 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && in rack_sndbuf_autoscale()
19027 sbused(&so->so_snd) >= in rack_sndbuf_autoscale()
19028 (so->so_snd.sb_hiwat / 8 * 7) && in rack_sndbuf_autoscale()
19029 sbused(&so->so_snd) < V_tcp_autosndbuf_max && in rack_sndbuf_autoscale()
19030 sendwin >= (sbused(&so->so_snd) - in rack_sndbuf_autoscale()
19031 (tp->snd_max - tp->snd_una))) { in rack_sndbuf_autoscale()
19033 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; in rack_sndbuf_autoscale()
19038 scaleup += so->so_snd.sb_hiwat; in rack_sndbuf_autoscale()
19042 so->so_snd.sb_flags &= ~SB_AUTOSIZE; in rack_sndbuf_autoscale()
19057 * the max-burst). We have how much to send and all the info we in rack_fast_output()
19087 if (rack->r_is_v6) { in rack_fast_output()
19088 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19094 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19098 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_output()
19102 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
19103 startseq = tp->snd_max; in rack_fast_output()
19104 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
19105 inp = rack->rc_inp; in rack_fast_output()
19106 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19108 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
19109 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_output()
19110 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_output()
19111 to.to_tsecr = tp->ts_recent; in rack_fast_output()
19115 /* TCP-MD5 (RFC2385). */ in rack_fast_output()
19116 if (tp->t_flags & TF_SIGNATURE) in rack_fast_output()
19121 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
19124 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
19125 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19126 else if (rack->rc_user_set_max_segs) in rack_fast_output()
19127 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
19130 if ((tp->t_flags & TF_TSO) && in rack_fast_output()
19133 (tp->t_port == 0)) in rack_fast_output()
19144 m->m_data += max_linkhdr; in rack_fast_output()
19145 m->m_len = hdrlen; in rack_fast_output()
19146 th = rack->r_ctl.fsb.th; in rack_fast_output()
19155 if_hw_tsomax = tp->t_tsomax; in rack_fast_output()
19156 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_output()
19157 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_output()
19164 max_len = (if_hw_tsomax - hdrlen - in rack_fast_output()
19186 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_output()
19189 sb_offset = tp->snd_max - tp->snd_una; in rack_fast_output()
19190 th->th_seq = htonl(tp->snd_max); in rack_fast_output()
19191 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_output()
19192 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
19193 if (th->th_win == 0) { in rack_fast_output()
19194 tp->t_sndzerowin++; in rack_fast_output()
19195 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_output()
19197 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_output()
19198 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_fast_output()
19202 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_fast_output()
19205 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
19209 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19220 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19221 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19225 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_output()
19229 if (rack->r_is_v6) in rack_fast_output()
19230 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_output()
19232 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_output()
19233 udp->uh_ulen = htons(ulen); in rack_fast_output()
19235 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_output()
19236 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_output()
19237 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_output()
19239 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_output()
19240 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_output()
19241 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_output()
19243 if (rack->r_is_v6) { in rack_fast_output()
19244 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_output()
19245 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_output()
19251 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_output()
19252 ip->ip_tos |= ect; in rack_fast_output()
19257 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_output()
19267 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_output()
19277 if (rack->r_is_v6) { in rack_fast_output()
19278 if (tp->t_port) { in rack_fast_output()
19279 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_output()
19280 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19281 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_output()
19282 th->th_sum = htons(0); in rack_fast_output()
19285 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_output()
19286 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19287 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_output()
19298 if (tp->t_port) { in rack_fast_output()
19299 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_output()
19300 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19301 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19302 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_output()
19303 th->th_sum = htons(0); in rack_fast_output()
19306 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_output()
19307 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19308 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19309 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_output()
19313 KASSERT(ip->ip_v == IPVERSION, in rack_fast_output()
19314 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_output()
19321 * via either fast-path). in rack_fast_output()
19325 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_output()
19326 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_output()
19329 if (rack->r_is_v6) { in rack_fast_output()
19330 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19331 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_output()
19332 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_output()
19333 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19335 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19343 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_output()
19344 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19345 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_output()
19346 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19347 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_output()
19348 ip->ip_off |= htons(IP_DF); in rack_fast_output()
19351 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19355 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_output()
19357 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19360 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19364 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19365 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19368 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_output()
19370 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_output()
19372 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19376 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19380 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19381 if (rack->rack_no_prr) in rack_fast_output()
19384 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19385 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19386 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19389 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19390 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19392 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19396 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_output()
19398 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19400 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19401 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19403 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19408 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_output()
19414 if (rack->r_is_v6) { in rack_fast_output()
19415 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_output()
19416 &inp->inp_route6, in rack_fast_output()
19426 &inp->inp_route, in rack_fast_output()
19431 lgb->tlb_errno = error; in rack_fast_output()
19438 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19439 rack->rc_hw_nobuf = 0; in rack_fast_output()
19440 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19441 rack->r_early = 0; in rack_fast_output()
19442 rack->r_late = 0; in rack_fast_output()
19443 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19445 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19447 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); in rack_fast_output()
19448 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19449 rack->lt_bw_up = 1; in rack_fast_output()
19451 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19459 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19460 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19462 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19463 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19464 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19467 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), in rack_fast_output()
19468 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19469 if (tp->snd_una == tp->snd_max) { in rack_fast_output()
19470 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19472 tp->t_acktime = ticks; in rack_fast_output()
19475 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19477 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19479 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_fast_output()
19480 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19481 tp->snd_max += len; in rack_fast_output()
19482 tp->snd_nxt = tp->snd_max; in rack_fast_output()
19483 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19484 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19491 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_output()
19495 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19496 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19498 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19499 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19500 rack->r_fast_output = 0; in rack_fast_output()
19501 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19503 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19505 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19507 if (tp->t_rtttime == 0) { in rack_fast_output()
19508 tp->t_rtttime = ticks; in rack_fast_output()
19509 tp->t_rtseq = startseq; in rack_fast_output()
19512 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19514 (*tot_len < rack->r_ctl.rc_pace_max_segs) && in rack_fast_output()
19516 max_val -= len; in rack_fast_output()
19518 th = rack->r_ctl.fsb.th; in rack_fast_output()
19524 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_output()
19530 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19531 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_output()
19532 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_output()
19533 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); in rack_fast_output()
19541 rack->r_fast_output = 0; in rack_fast_output()
19542 return (-1); in rack_fast_output()
19552 rack->r_fast_output = 1; in rack_setup_fast_output()
19553 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19554 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19555 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19556 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19557 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19558 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19560 rack->r_fast_output = 0; in rack_setup_fast_output()
19564 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19567 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19569 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19570 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19572 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19573 (tp->snd_max - tp->snd_una))); in rack_setup_fast_output()
19574 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19575 rack->r_fast_output = 0; in rack_setup_fast_output()
19577 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19578 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19580 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19591 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19603 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19604 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_check_collapsed()
19606 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19610 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19617 if (rsm->r_flags & RACK_ACKED) { in rack_check_collapsed()
19622 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19624 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19625 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19626 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19632 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19633 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { in rack_check_collapsed()
19634 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19635 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19636 thresh, __LINE__, 6, rsm->r_flags, rsm); in rack_check_collapsed()
19640 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19641 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19642 thresh, __LINE__, 7, rsm->r_flags, rsm); in rack_check_collapsed()
19649 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19650 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19653 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19654 rack->gp_ready) { in rack_validate_sizes()
19743 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19748 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); in rack_output()
19749 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_output()
19751 if (tp->t_flags & TF_TOE) { in rack_output()
19758 if (rack->rack_deferred_inited == 0) { in rack_output()
19771 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19772 (tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
19773 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ in rack_output()
19774 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19781 if (rack->r_state) { in rack_output()
19783 isipv6 = rack->r_is_v6; in rack_output()
19785 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19791 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19792 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19800 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19801 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19803 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19808 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19825 if (rack->rc_in_persist) { in rack_output()
19826 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19835 if ((rack->rc_ack_required == 1) && in rack_output()
19836 (rack->r_timer_override == 0)){ in rack_output()
19838 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19847 if ((rack->r_timer_override) || in rack_output()
19848 (rack->rc_ack_can_sendout_data) || in rack_output()
19850 (tp->t_state < TCPS_ESTABLISHED)) { in rack_output()
19851 rack->rc_ack_can_sendout_data = 0; in rack_output()
19852 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19853 tcp_hpts_remove(rack->rc_tp); in rack_output()
19854 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19861 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19862 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); in rack_output()
19863 tp->tcp_cnt_counters[SND_BLOCKED]++; in rack_output()
19871 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19872 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19873 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19876 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19877 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19878 rack->r_late = 1; in rack_output()
19879 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19880 rack->r_ctl.rc_agg_early += early; in rack_output()
19881 rack->r_early = 1; in rack_output()
19882 } else if (rack->rc_always_pace == 0) { in rack_output()
19883 /* Non-paced we are not late */ in rack_output()
19884 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19885 rack->r_early = rack->r_late = 0; in rack_output()
19888 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19889 rack->r_wanted_output = 0; in rack_output()
19890 rack->r_timer_override = 0; in rack_output()
19891 if ((tp->t_state != rack->r_state) && in rack_output()
19892 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
19895 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
19897 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
19898 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
19900 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
19901 if ((rack->r_fast_output) && in rack_output()
19903 (tp->rcv_numsacks == 0)) { in rack_output()
19911 inp = rack->rc_inp; in rack_output()
19912 so = inp->inp_socket; in rack_output()
19913 sb = &so->so_snd; in rack_output()
19920 /* We need to re-pin since fast_output un-pined */ in rack_output()
19927 inp = rack->rc_inp; in rack_output()
19933 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19934 ((tp->t_state == TCPS_SYN_RECEIVED) || in rack_output()
19935 (tp->t_state == TCPS_SYN_SENT)) && in rack_output()
19936 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ in rack_output()
19937 (tp->t_rxtshift == 0)) { /* not a retransmit */ in rack_output()
19950 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); in rack_output()
19951 if (tp->t_idle_reduce) { in rack_output()
19952 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) in rack_output()
19955 tp->t_flags &= ~TF_LASTIDLE; in rack_output()
19957 if (tp->t_flags & TF_MORETOCOME) { in rack_output()
19958 tp->t_flags |= TF_LASTIDLE; in rack_output()
19962 if ((tp->snd_una == tp->snd_max) && in rack_output()
19963 rack->r_ctl.rc_went_idle_time && in rack_output()
19964 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19965 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19968 if (rack->in_probe_rtt == 0) { in rack_output()
19969 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19970 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19971 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19972 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19980 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19981 (rack->r_fsb_inited == 0) && in rack_output()
19982 (rack->r_state != TCPS_CLOSED)) in rack_output()
19983 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
19984 if (rack->rc_sendvars_notset == 1) { in rack_output()
19985 rack->rc_sendvars_notset = 0; in rack_output()
19987 * Make sure any TCP timers (keep-alive) is not running. in rack_output()
19991 if ((rack->rack_no_prr == 1) && in rack_output()
19992 (rack->rc_always_pace == 0)) { in rack_output()
19995 * no-pacing enabled and prr is turned off that in rack_output()
20003 rack->rack_no_prr = 0; in rack_output()
20005 if ((rack->pcm_enabled == 1) && in rack_output()
20006 (rack->pcm_needed == 0) && in rack_output()
20014 if (tp->t_srtt) in rack_output()
20015 rtts_idle = tot_idle / tp->t_srtt; in rack_output()
20018 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
20019 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
20020 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
20021 rack->pcm_needed = 1; in rack_output()
20022 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
20031 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20032 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
20038 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
20039 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
20043 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
20046 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
20049 if (tp->snd_wnd > ctf_outstanding(tp)) in rack_output()
20050 rw_avail = tp->snd_wnd - ctf_outstanding(tp); in rack_output()
20053 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
20054 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
20057 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
20058 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
20060 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
20062 rack->r_fast_output = 0; in rack_output()
20066 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
20069 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20070 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
20071 flags = tcp_outflags[tp->t_state]; in rack_output()
20072 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
20078 so = inp->inp_socket; in rack_output()
20079 sb = &so->so_snd; in rack_output()
20082 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
20083 rack->rc_free_cnt++; in rack_output()
20090 SOCK_SENDBUF_LOCK(inp->inp_socket); in rack_output()
20091 so = inp->inp_socket; in rack_output()
20092 sb = &so->so_snd; in rack_output()
20095 if (rack->r_ctl.rc_resend) { in rack_output()
20097 rsm = rack->r_ctl.rc_resend; in rack_output()
20098 rack->r_ctl.rc_resend = NULL; in rack_output()
20099 len = rsm->r_end - rsm->r_start; in rack_output()
20102 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20105 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20106 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20108 } else if (rack->r_collapse_point_valid && in rack_output()
20115 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
20116 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
20118 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
20119 rack->r_ctl.high_collapse_point)) in rack_output()
20120 rack->r_collapse_point_valid = 0; in rack_output()
20124 len = rsm->r_end - rsm->r_start; in rack_output()
20125 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20130 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_output()
20131 ((rsm->r_flags & RACK_MUST_RXT) == 0) && in rack_output()
20132 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { in rack_output()
20133 /* Enter recovery if not induced by a time-out */ in rack_output()
20134 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_output()
20137 if (SEQ_LT(rsm->r_start, tp->snd_una)) { in rack_output()
20139 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
20142 len = rsm->r_end - rsm->r_start; in rack_output()
20143 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20146 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20147 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20156 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
20167 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
20169 rsm->r_flags |= RACK_TLP; in rack_output()
20170 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
20172 tlen = rsm->r_end - rsm->r_start; in rack_output()
20175 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20178 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20179 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20180 cwin = min(tp->snd_wnd, tlen); in rack_output()
20183 if (rack->r_must_retran && in rack_output()
20185 (SEQ_GT(tp->snd_max, tp->snd_una)) && in rack_output()
20190 * a) This is a non-sack connection, we had a time-out in rack_output()
20204 sendwin = min(tp->snd_wnd, tp->snd_cwnd); in rack_output()
20205 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
20210 so = inp->inp_socket; in rack_output()
20211 sb = &so->so_snd; in rack_output()
20216 * outstanding/not-acked should be marked. in rack_output()
20219 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20222 rack->r_must_retran = 0; in rack_output()
20223 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20224 so = inp->inp_socket; in rack_output()
20225 sb = &so->so_snd; in rack_output()
20228 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { in rack_output()
20233 rack->r_must_retran = 0; in rack_output()
20234 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20239 len = rsm->r_end - rsm->r_start; in rack_output()
20240 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20242 if ((rack->full_size_rxt == 0) && in rack_output()
20243 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20246 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20247 rack->gp_ready) { in rack_output()
20268 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20270 if (!rack->alloc_limit_reported) { in rack_output()
20271 rack->alloc_limit_reported = 1; in rack_output()
20274 so = inp->inp_socket; in rack_output()
20275 sb = &so->so_snd; in rack_output()
20278 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { in rack_output()
20280 len--; in rack_output()
20289 if (rsm && rack->r_fsb_inited && in rack_output()
20291 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { in rack_output()
20298 so = inp->inp_socket; in rack_output()
20299 sb = &so->so_snd; in rack_output()
20305 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && in rack_output()
20306 rack->rack_enable_scwnd) { in rack_output()
20308 if (rack->gp_ready && in rack_output()
20309 (rack->rack_attempted_scwnd == 0) && in rack_output()
20310 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20311 tp->t_lib) { in rack_output()
20314 rack->rack_attempted_scwnd = 1; in rack_output()
20315 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20316 &rack->r_ctl.rc_scw_index, in rack_output()
20319 if (rack->r_ctl.rc_scw && in rack_output()
20320 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20321 sbavail(&so->so_snd)) { in rack_output()
20323 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20324 rack->rack_scwnd_is_idle = 0; in rack_output()
20326 if (rack->r_ctl.rc_scw) { in rack_output()
20328 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20329 rack->r_ctl.rc_scw_index, in rack_output()
20330 tp->snd_cwnd, tp->snd_wnd, segsiz); in rack_output()
20338 if (tp->t_flags & TF_NEEDFIN) in rack_output()
20340 if (tp->t_flags & TF_NEEDSYN) in rack_output()
20344 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20351 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_output()
20352 (tp->t_flags & TF_FASTOPEN))) { in rack_output()
20362 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) in rack_output()
20363 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20366 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20367 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20369 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20370 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20372 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20373 if (tp->snd_wnd > sb_offset) in rack_output()
20374 len = tp->snd_wnd - sb_offset; in rack_output()
20378 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20380 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20384 if ((rack->r_ctl.crte == NULL) && in rack_output()
20385 IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20386 (rack->full_size_rxt == 0) && in rack_output()
20387 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20397 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20398 rack->gp_ready) { in rack_output()
20416 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20417 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20418 if (tp->snd_wnd > outstanding) { in rack_output()
20419 len = tp->snd_wnd - outstanding; in rack_output()
20424 len = avail - sb_offset; in rack_output()
20432 len = avail - sb_offset; in rack_output()
20437 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20438 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20450 * let us send a lot as well :-) in rack_output()
20452 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20464 leftinsb = sbavail(sb) - sb_offset; in rack_output()
20471 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
20478 !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20490 * SYN-SENT state and if segment contains data and if we don't know in rack_output()
20494 SEQ_GT(tp->snd_max, tp->snd_una) && in rack_output()
20496 (tp->t_rxtshift == 0))) { in rack_output()
20501 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20502 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_output()
20510 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { in rack_output()
20517 * - When retransmitting SYN|ACK on a passively-created socket in rack_output()
20519 * - When retransmitting SYN on an actively created socket in rack_output()
20521 * - When sending a zero-length cookie (cookie request) on an in rack_output()
20524 * - When the socket is in the CLOSED state (RST is being sent) in rack_output()
20526 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20527 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || in rack_output()
20528 ((tp->t_state == TCPS_SYN_SENT) && in rack_output()
20529 (tp->t_tfo_client_cookie_len == 0)) || in rack_output()
20534 /* Without fast-open there should never be data sent on a SYN */ in rack_output()
20535 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20549 if ((tp->snd_wnd == 0) && in rack_output()
20550 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20551 (tp->snd_una == tp->snd_max) && in rack_output()
20553 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20563 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20564 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20566 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20576 if (tp->snd_max == tp->snd_una) { in rack_output()
20581 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20584 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20585 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20598 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < in rack_output()
20599 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20600 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20601 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20602 (TCPS_HAVEESTABLISHED(tp->t_state))) { in rack_output()
20612 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20613 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && in rack_output()
20615 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20616 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20636 * defeats the point of hw-pacing (i.e. to help us get in rack_output()
20651 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP in rack_output()
20665 * Pre-calculate here as we save another lookup into the darknesses in rack_output()
20684 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && in rack_output()
20685 (tp->t_port == 0) && in rack_output()
20686 ((tp->t_flags & TF_SIGNATURE) == 0) && in rack_output()
20693 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20694 if (tp->t_flags & TF_SENTFIN) { in rack_output()
20699 outstanding--; in rack_output()
20702 if ((rsm->r_flags & RACK_HAS_FIN) == 0) in rack_output()
20706 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), in rack_output()
20707 (long)TCP_MAXWIN << tp->rcv_scale); in rack_output()
20711 * conditions when len is non-zero: in rack_output()
20713 * - We have a full segment (or more with TSO) - This is the last in rack_output()
20715 * NODELAY - we've timed out (e.g. persist timer) - we have more in rack_output()
20717 * limited the window size) - we need to retransmit in rack_output()
20729 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ in rack_output()
20730 (idle || (tp->t_flags & TF_NODELAY)) && in rack_output()
20732 (tp->t_flags & TF_NOPUSH) == 0) { in rack_output()
20736 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ in rack_output()
20740 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { in rack_output()
20748 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && in rack_output()
20785 * pending (it will get piggy-backed on it) or the remote side in rack_output()
20786 * already has done a half-close and won't send more data. Skip in rack_output()
20787 * this if the connection is in T/TCP half-open state. in rack_output()
20789 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && in rack_output()
20790 !(tp->t_flags & TF_DELACK) && in rack_output()
20791 !TCPS_HAVERCVDFIN(tp->t_state)) { in rack_output()
20795 * tp->rcv_scale. in rack_output()
20801 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { in rack_output()
20802 oldwin = (tp->rcv_adv - tp->rcv_nxt); in rack_output()
20804 adv -= oldwin; in rack_output()
20817 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) in rack_output()
20821 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || in rack_output()
20822 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || in rack_output()
20823 so->so_rcv.sb_hiwat <= 8 * segsiz)) { in rack_output()
20827 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { in rack_output()
20836 * is also a catch-all for the retransmit timer timeout case. in rack_output()
20838 if (tp->t_flags & TF_ACKNOW) { in rack_output()
20842 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { in rack_output()
20851 (tp->snd_max == tp->snd_una)) { in rack_output()
20864 if ((tp->t_flags & TF_FASTOPEN) == 0 && in rack_output()
20867 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20868 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20877 * the peer wait for the delayed-ack timer to run off in rack_output()
20883 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20889 rack->r_fsb_inited && in rack_output()
20890 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20891 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
20893 (rack->r_must_retran == 0) && in rack_output()
20894 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
20897 ((orig_len - len) >= segsiz) && in rack_output()
20904 rack->r_fast_output = 0; in rack_output()
20909 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
20910 tp->snd_nxt = tp->snd_max; in rack_output()
20913 uint32_t seq = tp->gput_ack; in rack_output()
20915 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20918 * Mark the last sent that we just-returned (hinting in rack_output()
20921 rsm->r_just_ret = 1; in rack_output()
20924 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20925 rack->r_early = 0; in rack_output()
20926 rack->r_late = 0; in rack_output()
20927 rack->r_ctl.rc_agg_early = 0; in rack_output()
20929 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20930 minseg)) >= tp->snd_wnd) { in rack_output()
20933 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20934 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20936 /* We are limited by whats available -- app limited */ in rack_output()
20938 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20939 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20941 ((tp->t_flags & TF_NODELAY) == 0) && in rack_output()
20948 * don't send. Another app-limited case. in rack_output()
20951 } else if (tp->t_flags & TF_NOPUSH) { in rack_output()
20962 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20963 (rack->rack_no_prr == 0) && in rack_output()
20964 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
21019 if ((tp->t_flags & TF_GPUTINPROG) && in rack_output()
21020 SEQ_GT(tp->gput_ack, tp->snd_max)) { in rack_output()
21021 tp->gput_ack = tp->snd_max; in rack_output()
21022 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_output()
21026 tp->t_flags &= ~TF_GPUTINPROG; in rack_output()
21027 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
21028 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
21029 tp->gput_seq, in rack_output()
21035 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
21036 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_output()
21037 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
21038 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
21045 if (rack->r_ctl.rc_end_appl) in rack_output()
21046 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
21047 rack->r_ctl.rc_end_appl = rsm; in rack_output()
21049 rsm->r_flags |= RACK_APP_LIMITED; in rack_output()
21050 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
21054 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
21055 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); in rack_output()
21059 if ((tp->snd_max == tp->snd_una) && in rack_output()
21060 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21062 (sbavail(sb) > tp->snd_wnd) && in rack_output()
21063 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
21064 /* Yes lets make sure to move to persist before timer-start */ in rack_output()
21065 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
21072 rack->r_ctl.rc_scw) { in rack_output()
21073 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
21074 rack->rack_scwnd_is_idle = 1; in rack_output()
21080 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21081 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
21082 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_output()
21083 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); in rack_output()
21087 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21088 tp->tcp_cnt_counters[SND_LIMITED]++; in rack_output()
21089 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); in rack_output()
21097 if ((rack->r_ctl.crte != NULL) && in rack_output()
21099 ((rack->rc_hw_nobuf == 1) || in rack_output()
21109 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21110 rack->r_ctl.rc_agg_early = 0; in rack_output()
21111 rack->r_early = 0; in rack_output()
21112 rack->r_late = 0; in rack_output()
21130 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21131 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21132 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21141 * the peer wait for the delayed-ack timer to run off in rack_output()
21154 (rack->pcm_in_progress == 0) && in rack_output()
21155 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
21156 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
21159 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21161 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21167 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; in rack_output()
21169 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; in rack_output()
21191 * be snd_max-1 else its snd_max. in rack_output()
21195 rack_seq = tp->iss; in rack_output()
21197 (tp->t_flags & TF_SENTFIN)) in rack_output()
21198 rack_seq = tp->snd_max - 1; in rack_output()
21200 rack_seq = tp->snd_max; in rack_output()
21202 rack_seq = rsm->r_start; in rack_output()
21206 * established connection segments. Options for SYN-ACK segments in rack_output()
21210 if ((tp->t_flags & TF_NOOPT) == 0) { in rack_output()
21213 to.to_mss = tcp_mssopt(&inp->inp_inc); in rack_output()
21214 if (tp->t_port) in rack_output()
21215 to.to_mss -= V_tcp_udp_tunneling_overhead; in rack_output()
21225 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
21226 (tp->t_rxtshift == 0)) { in rack_output()
21227 if (tp->t_state == TCPS_SYN_RECEIVED) { in rack_output()
21230 (u_int8_t *)&tp->t_tfo_cookie.server; in rack_output()
21233 } else if (tp->t_state == TCPS_SYN_SENT) { in rack_output()
21235 tp->t_tfo_client_cookie_len; in rack_output()
21237 tp->t_tfo_cookie.client; in rack_output()
21252 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { in rack_output()
21253 to.to_wscale = tp->request_r_scale; in rack_output()
21257 if ((tp->t_flags & TF_RCVD_TSTMP) || in rack_output()
21258 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { in rack_output()
21261 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21262 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21270 * our ack-probe. in rack_output()
21276 to.to_tsval = ts_to_use + tp->ts_offset; in rack_output()
21277 to.to_tsecr = tp->ts_recent; in rack_output()
21280 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
21281 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21282 (tp->snd_una == tp->snd_max) && in rack_output()
21285 (rack->r_ctl.current_round != 0) && in rack_output()
21287 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21288 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21289 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21290 rack->r_rcvpath_rtt_up = 1; in rack_output()
21292 rack_seq--; in rack_output()
21296 if (tp->rfbuf_ts == 0 && in rack_output()
21297 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { in rack_output()
21298 tp->rfbuf_ts = ms_cts; in rack_output()
21301 if (tp->t_flags & TF_SACK_PERMIT) { in rack_output()
21304 else if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21305 tp->rcv_numsacks > 0) { in rack_output()
21307 to.to_nsacks = tp->rcv_numsacks; in rack_output()
21308 to.to_sacks = (u_char *)tp->sackblks; in rack_output()
21312 /* TCP-MD5 (RFC2385). */ in rack_output()
21313 if (tp->t_flags & TF_SIGNATURE) in rack_output()
21323 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && in rack_output()
21327 if (tp->t_port) { in rack_output()
21333 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21334 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
21335 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
21348 if (inp->inp_options) in rack_output()
21349 ipoptlen = inp->inp_options->m_len - in rack_output()
21362 if (len + optlen + ipoptlen > tp->t_maxseg) { in rack_output()
21369 if_hw_tsomax = tp->t_tsomax; in rack_output()
21370 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_output()
21371 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_output()
21381 max_len = (if_hw_tsomax - hdrlen - in rack_output()
21396 max_len = (tp->t_maxseg - optlen); in rack_output()
21401 len -= moff; in rack_output()
21418 if (tp->t_flags & TF_NEEDFIN) { in rack_output()
21423 if (optlen + ipoptlen >= tp->t_maxseg) { in rack_output()
21437 len = tp->t_maxseg - optlen - ipoptlen; in rack_output()
21469 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21470 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21479 * the peer wait for the delayed-ack timer to run off in rack_output()
21491 hw_tls = tp->t_nic_ktls_xmit != 0; in rack_output()
21520 m->m_data += max_linkhdr; in rack_output()
21521 m->m_len = hdrlen; in rack_output()
21530 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { in rack_output()
21540 m->m_len += len; in rack_output()
21555 m->m_next = tcp_m_copym( in rack_output()
21563 if (len <= (tp->t_maxseg - optlen)) { in rack_output()
21572 if (m->m_next == NULL) { in rack_output()
21581 if (rsm && (rsm->r_flags & RACK_TLP)) { in rack_output()
21589 tp->t_sndrexmitpack++; in rack_output()
21594 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_output()
21601 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_output()
21619 if (tp->t_flags & TF_ACKNOW) in rack_output()
21638 m->m_data += max_linkhdr; in rack_output()
21639 m->m_len = hdrlen; in rack_output()
21642 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_output()
21646 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21649 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21653 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21655 th = rack->r_ctl.fsb.th; in rack_output()
21656 udp = rack->r_ctl.fsb.udp; in rack_output()
21660 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21663 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21664 udp->uh_ulen = htons(ulen); in rack_output()
21670 if (tp->t_port) { in rack_output()
21672 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21673 udp->uh_dport = tp->t_port; in rack_output()
21674 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21675 udp->uh_ulen = htons(ulen); in rack_output()
21679 tcpip_fillheaders(inp, tp->t_port, ip6, th); in rack_output()
21685 if (tp->t_port) { in rack_output()
21687 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21688 udp->uh_dport = tp->t_port; in rack_output()
21689 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21690 udp->uh_ulen = htons(ulen); in rack_output()
21694 tcpip_fillheaders(inp, tp->t_port, ip, th); in rack_output()
21703 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { in rack_output()
21707 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_output()
21708 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_output()
21710 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
21711 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_output()
21712 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_output()
21715 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_output()
21716 ip6->ip6_flow |= htonl(ect << 20); in rack_output()
21722 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_output()
21723 ip->ip_tos |= ect; in rack_output()
21727 th->th_seq = htonl(rack_seq); in rack_output()
21728 th->th_ack = htonl(tp->rcv_nxt); in rack_output()
21738 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && in rack_output()
21742 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && in rack_output()
21743 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) in rack_output()
21744 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); in rack_output()
21753 th->th_win = htons((u_short) in rack_output()
21754 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); in rack_output()
21757 recwin = roundup2(recwin, 1 << tp->rcv_scale); in rack_output()
21758 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); in rack_output()
21761 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 in rack_output()
21768 if (th->th_win == 0) { in rack_output()
21769 tp->t_sndzerowin++; in rack_output()
21770 tp->t_flags |= TF_RXWIN0SENT; in rack_output()
21772 tp->t_flags &= ~TF_RXWIN0SENT; in rack_output()
21773 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_output()
21775 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21779 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21799 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21802 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21806 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_output()
21812 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_output()
21822 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_output()
21837 if (tp->t_port) { in rack_output()
21838 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_output()
21839 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21840 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_output()
21841 th->th_sum = htons(0); in rack_output()
21844 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_output()
21845 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21846 th->th_sum = in6_cksum_pseudo(ip6, in rack_output()
21857 if (tp->t_port) { in rack_output()
21858 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_output()
21859 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21860 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21861 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_output()
21862 th->th_sum = htons(0); in rack_output()
21865 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_output()
21866 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21867 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21868 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_output()
21872 KASSERT(ip->ip_v == IPVERSION, in rack_output()
21873 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_output()
21886 KASSERT(len > tp->t_maxseg - optlen, in rack_output()
21888 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_output()
21889 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; in rack_output()
21899 if ((rack->r_ctl.crte != NULL) && in rack_output()
21900 (rack->rc_hw_nobuf == 0) && in rack_output()
21905 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21909 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21910 if (rack->rack_no_prr) in rack_output()
21913 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21914 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21915 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21918 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21919 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21921 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21924 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_output()
21925 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21927 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_output()
21941 log.u_bbr.pkts_out = tp->t_maxseg; in rack_output()
21943 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21944 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_output()
21949 log.u_bbr.flex5 = rsm->r_fas; in rack_output()
21950 log.u_bbr.bbr_substate = rsm->r_bas; in rack_output()
21958 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_output()
21965 log.u_bbr.delRate = rsm->r_flags; in rack_output()
21967 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
21971 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
21975 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, in rack_output()
21986 * m->m_pkthdr.len should have been set before cksum calcuration, in rack_output()
21997 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
22004 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_output()
22006 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_output()
22007 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22009 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22011 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22017 inp->in6p_outputopts, in rack_output()
22018 &inp->inp_route6, in rack_output()
22022 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) in rack_output()
22023 mtu = inp->inp_route6.ro_nh->nh_mtu; in rack_output()
22031 ip->ip_len = htons(m->m_pkthdr.len); in rack_output()
22033 if (inp->inp_vflag & INP_IPV6PROTO) in rack_output()
22034 ip->ip_ttl = in6_selecthlim(inp, NULL); in rack_output()
22036 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
22047 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_output()
22048 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22049 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_output()
22050 ip->ip_off |= htons(IP_DF); in rack_output()
22053 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22056 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22063 inp->inp_options, in rack_output()
22067 &inp->inp_route, in rack_output()
22070 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) in rack_output()
22071 mtu = inp->inp_route.ro_nh->nh_mtu; in rack_output()
22075 lgb->tlb_errno = error; in rack_output()
22093 rack->pcm_in_progress = 1; in rack_output()
22094 rack->pcm_needed = 0; in rack_output()
22095 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
22098 if (rack->lt_bw_up == 0) { in rack_output()
22099 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); in rack_output()
22100 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22101 rack->lt_bw_up = 1; in rack_output()
22102 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
22109 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
22110 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22112 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
22113 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
22114 rack->r_ctl.lt_timemark = tmark; in rack_output()
22118 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
22122 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
22123 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
22124 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
22125 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
22127 if (rack->rc_hw_nobuf) { in rack_output()
22128 rack->rc_hw_nobuf = 0; in rack_output()
22129 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22130 rack->r_early = 0; in rack_output()
22131 rack->r_late = 0; in rack_output()
22132 rack->r_ctl.rc_agg_early = 0; in rack_output()
22136 rack->rc_gp_saw_rec = 1; in rack_output()
22138 if (cwnd_to_use > tp->snd_ssthresh) { in rack_output()
22140 rack->rc_gp_saw_ca = 1; in rack_output()
22143 rack->rc_gp_saw_ss = 1; in rack_output()
22146 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22147 (tp->t_flags & TF_SACK_PERMIT) && in rack_output()
22148 tp->rcv_numsacks > 0) in rack_output()
22158 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_output()
22163 if ((rack->rack_no_prr == 0) && in rack_output()
22166 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
22167 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
22169 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
22175 rsm->r_flags |= RACK_TLP; in rack_output()
22178 rsm->r_flags &= ~RACK_TLP; in rack_output()
22182 (tp->snd_una == tp->snd_max)) in rack_output()
22183 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
22190 tcp_seq startseq = tp->snd_max; in rack_output()
22194 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
22205 rack->rc_tlp_in_progress = 0; in rack_output()
22206 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
22214 rack->rc_tlp_in_progress = 1; in rack_output()
22215 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22223 if ((tp->snd_una == tp->snd_max) && (len > 0)) { in rack_output()
22229 tp->t_acktime = ticks; in rack_output()
22236 ((tp->t_flags & TF_SENTSYN) == 0)) { in rack_output()
22237 tp->snd_max++; in rack_output()
22238 tp->t_flags |= TF_SENTSYN; in rack_output()
22241 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_output()
22242 tp->snd_max++; in rack_output()
22243 tp->t_flags |= TF_SENTFIN; in rack_output()
22246 tp->snd_max += len; in rack_output()
22247 if (rack->rc_new_rnd_needed) { in rack_output()
22248 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22256 if (tp->t_rtttime == 0) { in rack_output()
22257 tp->t_rtttime = ticks; in rack_output()
22258 tp->t_rtseq = startseq; in rack_output()
22262 ((tp->t_flags & TF_GPUTINPROG) == 0)) in rack_output()
22273 if (rack->r_fast_output && len) { in rack_output()
22274 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22275 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22277 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22278 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22279 rack->r_fast_output = 0; in rack_output()
22280 if (rack->r_fast_output) { in rack_output()
22281 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22282 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22283 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22290 ((pace_max_seg - len) > segsiz)) { in rack_output()
22298 n_len = (orig_len - len); in rack_output()
22299 orig_len -= len; in rack_output()
22300 pace_max_seg -= len; in rack_output()
22302 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22303 /* Re-lock for the next spin */ in rack_output()
22310 ((orig_len - len) > segsiz)) { in rack_output()
22318 n_len = (orig_len - len); in rack_output()
22319 orig_len -= len; in rack_output()
22321 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22322 /* Re-lock for the next spin */ in rack_output()
22330 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22331 rack->r_early = 0; in rack_output()
22332 rack->r_late = 0; in rack_output()
22333 rack->r_ctl.rc_agg_early = 0; in rack_output()
22348 tp->t_softerror = error; in rack_output()
22351 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22352 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22353 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22363 if (rack->r_ctl.crte != NULL) { in rack_output()
22364 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22365 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22368 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22369 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22370 if (rack->rc_enobuf < 0x7f) in rack_output()
22371 rack->rc_enobuf++; in rack_output()
22374 if (rack->r_ctl.crte != NULL) { in rack_output()
22376 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22390 tp->t_flags &= ~TF_TSO; in rack_output()
22394 saved_mtu = tp->t_maxseg; in rack_output()
22395 tcp_mss_update(tp, -1, mtu, NULL, NULL); in rack_output()
22396 if (saved_mtu > tp->t_maxseg) { in rack_output()
22404 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22405 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22406 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22417 if (TCPS_HAVERCVDSYN(tp->t_state)) { in rack_output()
22418 tp->t_softerror = error; in rack_output()
22427 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22428 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22429 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22436 rack->rc_enobuf = 0; in rack_output()
22437 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_output()
22438 rack->r_ctl.retran_during_recovery += len; in rack_output()
22447 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) in rack_output()
22448 tp->rcv_adv = tp->rcv_nxt + recwin; in rack_output()
22450 tp->last_ack_sent = tp->rcv_nxt; in rack_output()
22451 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_output()
22481 rack->r_ent_rec_ns = 0; in rack_output()
22482 if (rack->r_must_retran) { in rack_output()
22484 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22485 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22489 rack->r_must_retran = 0; in rack_output()
22490 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22492 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22497 rack->r_must_retran = 0; in rack_output()
22498 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22501 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22502 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && in rack_output()
22503 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22508 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); in rack_output()
22518 rack->r_fsb_inited && in rack_output()
22519 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22520 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22521 (rack->r_must_retran == 0) && in rack_output()
22522 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22525 ((orig_len - len) >= segsiz) && in rack_output()
22532 rack->r_fast_output = 0; in rack_output()
22546 (rack->r_must_retran == 0) && in rack_output()
22547 rack->r_fsb_inited && in rack_output()
22548 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22549 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22550 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22553 ((orig_len - len) >= segsiz) && in rack_output()
22559 if (rack->r_fast_output) { in rack_output()
22573 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
22574 tp->snd_nxt = tp->snd_max; in rack_output()
22577 crtsc = get_cyclecount() - ts_val; in rack_output()
22579 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22580 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
22581 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; in rack_output()
22582 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); in rack_output()
22585 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22586 tp->tcp_cnt_counters[SND_OUT_ACK]++; in rack_output()
22587 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; in rack_output()
22602 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22603 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22604 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22617 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22618 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22627 rack->r_fast_output = 0; in rack_mtu_change()
22628 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22629 rack->r_ctl.rc_sacked); in rack_mtu_change()
22630 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22631 rack->r_must_retran = 1; in rack_mtu_change()
22633 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22634 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); in rack_mtu_change()
22637 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22639 tp->snd_nxt = tp->snd_max; in rack_mtu_change()
22645 if (rack->dgp_on == 1) in rack_set_dgp()
22647 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22648 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22655 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22660 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22661 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22662 rack->dgp_on = 1; in rack_set_dgp()
22663 rack->rc_always_pace = 1; in rack_set_dgp()
22664 rack->rc_pace_dnd = 1; in rack_set_dgp()
22665 rack->use_fixed_rate = 0; in rack_set_dgp()
22666 if (rack->gp_ready) in rack_set_dgp()
22668 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22669 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22671 rack->full_size_rxt = 1; in rack_set_dgp()
22672 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22674 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22675 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22676 rack->r_use_cmp_ack) in rack_set_dgp()
22677 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22679 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22681 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22683 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22685 rack->r_rr_config = 3; in rack_set_dgp()
22687 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22689 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22690 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22691 rack->rtt_limit_mul = 0; in rack_set_dgp()
22693 rack->rack_no_prr = 1; in rack_set_dgp()
22695 rack->r_limit_scw = 1; in rack_set_dgp()
22697 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22719 * fill-cw the same settings that profile5 does in rack_set_profile()
22720 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). in rack_set_profile()
22722 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22725 if (rack->rc_always_pace == 1) { in rack_set_profile()
22729 rack->dgp_on = 0; in rack_set_profile()
22730 rack->rc_hybrid_mode = 0; in rack_set_profile()
22731 rack->use_fixed_rate = 0; in rack_set_profile()
22735 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22737 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22740 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22741 rack->rc_always_pace = 1; in rack_set_profile()
22742 if (rack->rack_hibeta) in rack_set_profile()
22745 rack->rc_always_pace = 0; in rack_set_profile()
22748 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22752 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22755 rack->r_use_cmp_ack = 1; in rack_set_profile()
22757 rack->r_use_cmp_ack = 0; in rack_set_profile()
22759 rack->rack_no_prr = 1; in rack_set_profile()
22761 rack->rack_no_prr = 0; in rack_set_profile()
22763 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22765 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22766 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22767 rack->r_mbuf_queue = 1; in rack_set_profile()
22768 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22769 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22770 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22772 rack->r_mbuf_queue = 0; in rack_set_profile()
22773 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22776 rack->rack_enable_scwnd = 1; in rack_set_profile()
22778 rack->rack_enable_scwnd = 0; in rack_set_profile()
22781 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22783 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22785 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22786 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22788 rack->r_rr_config = 0; in rack_set_profile()
22789 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22790 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22791 rack->rtt_limit_mul = 0; in rack_set_profile()
22794 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22796 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22798 rack->rack_no_prr = 1; in rack_set_profile()
22800 rack->rack_no_prr = 0; in rack_set_profile()
22802 rack->r_limit_scw = 1; in rack_set_profile()
22804 rack->r_limit_scw = 0; in rack_set_profile()
22820 * No space yikes -- fail out.. in rack_add_deferred_option()
22824 dol->optname = sopt_name; in rack_add_deferred_option()
22825 dol->optval = loptval; in rack_add_deferred_option()
22826 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22842 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22843 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22844 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22845 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22847 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); in process_hybrid_pacing()
22849 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22851 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22856 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; in process_hybrid_pacing()
22858 seq = sft->start_seq; in process_hybrid_pacing()
22859 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { in process_hybrid_pacing()
22861 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22863 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22868 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22876 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22885 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22888 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22889 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22893 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22898 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22902 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; in process_hybrid_pacing()
22903 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) in process_hybrid_pacing()
22904 sft->cspr = hybrid->cspr; in process_hybrid_pacing()
22906 sft->cspr = 0; in process_hybrid_pacing()
22907 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) in process_hybrid_pacing()
22908 sft->hint_maxseg = hybrid->hint_maxseg; in process_hybrid_pacing()
22910 sft->hint_maxseg = 0; in process_hybrid_pacing()
22911 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22923 si->bytes_transmitted = tp->t_sndbytes; in rack_stack_information()
22924 si->bytes_retransmitted = tp->t_snd_rxt_bytes; in rack_stack_information()
22955 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22957 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22960 rack->rc_rack_use_dsack = 1; in rack_process_option()
22962 rack->rc_rack_use_dsack = 0; in rack_process_option()
22969 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
22972 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
22974 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
22980 rack->rack_hibeta = 1; in rack_process_option()
22984 * User wants to set a custom beta. in rack_process_option()
22986 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
22987 if (rack->rc_pacing_cc_set) in rack_process_option()
22989 rack->r_ctl.rc_saved_beta = optval; in rack_process_option()
22991 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
22994 rack->rack_hibeta = 0; in rack_process_option()
22995 if (rack->rc_pacing_cc_set) in rack_process_option()
23004 rack->r_ctl.timer_slop = optval; in rack_process_option()
23005 if (rack->rc_tp->t_srtt) { in rack_process_option()
23010 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_option()
23012 rack->r_ctl.timer_slop); in rack_process_option()
23017 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_process_option()
23022 if (rack->rc_pacing_cc_set) { in rack_process_option()
23031 if (CC_ALGO(tp)->ctl_output != NULL) in rack_process_option()
23032 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_process_option()
23040 rack->r_ctl.rc_saved_beta_ecn = optval; in rack_process_option()
23046 if (rack->gp_ready) { in rack_process_option()
23051 rack->defer_options = 1; in rack_process_option()
23053 rack->defer_options = 0; in rack_process_option()
23058 rack->r_ctl.req_measurements = optval; in rack_process_option()
23065 rack->r_use_labc_for_rec = 1; in rack_process_option()
23067 rack->r_use_labc_for_rec = 0; in rack_process_option()
23072 rack->rc_labc = optval; in rack_process_option()
23079 rack->r_up_only = 1; in rack_process_option()
23081 rack->r_up_only = 0; in rack_process_option()
23085 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
23089 if ((rack->dgp_on == 1) && in rack_process_option()
23090 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23100 * Now change up the flags and counts to be correct. in rack_process_option()
23102 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23104 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23106 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
23113 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
23121 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
23123 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
23131 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { in rack_process_option()
23134 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
23135 rack->r_use_cmp_ack = 1; in rack_process_option()
23136 rack->r_mbuf_queue = 1; in rack_process_option()
23137 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23139 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
23140 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_process_option()
23145 rack->r_limit_scw = 1; in rack_process_option()
23147 rack->r_limit_scw = 0; in rack_process_option()
23155 rack->rc_pace_to_cwnd = 0; in rack_process_option()
23157 rack->rc_pace_to_cwnd = 1; in rack_process_option()
23162 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
23163 rack->rtt_limit_mul = optval; in rack_process_option()
23165 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
23166 rack->rtt_limit_mul = 0; in rack_process_option()
23172 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
23174 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
23181 rack->rack_enable_scwnd = 0; in rack_process_option()
23183 rack->rack_enable_scwnd = 1; in rack_process_option()
23186 /* Now do we use the LRO mbuf-queue feature */ in rack_process_option()
23188 if (optval || rack->r_use_cmp_ack) in rack_process_option()
23189 rack->r_mbuf_queue = 1; in rack_process_option()
23191 rack->r_mbuf_queue = 0; in rack_process_option()
23192 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23193 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23195 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23200 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
23202 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
23207 rack->rack_no_prr = 0; in rack_process_option()
23209 rack->rack_no_prr = 1; in rack_process_option()
23211 rack->no_prr_addback = 1; in rack_process_option()
23217 rack->cspr_is_fcc = 1; in rack_process_option()
23219 rack->cspr_is_fcc = 0; in rack_process_option()
23224 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23226 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23232 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23245 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23250 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23259 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23264 if (rack->rc_always_pace) { in rack_process_option()
23268 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23269 rack->rc_always_pace = 1; in rack_process_option()
23270 if (rack->rack_hibeta) in rack_process_option()
23278 if (rack->rc_always_pace == 1) { in rack_process_option()
23282 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23283 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23285 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23295 rack->r_ctl.init_rate = val; in rack_process_option()
23296 if (rack->rc_always_pace) in rack_process_option()
23305 rack->rc_force_max_seg = 1; in rack_process_option()
23307 rack->rc_force_max_seg = 0; in rack_process_option()
23311 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23317 if ((rack->dgp_on == 1) && in rack_process_option()
23318 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23320 * If we set a max-seg and are doing DGP then in rack_process_option()
23329 * Now change up the flags and counts to be correct. in rack_process_option()
23331 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23333 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23336 rack->rc_user_set_max_segs = optval; in rack_process_option()
23338 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23344 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23348 if (rack->dgp_on) { in rack_process_option()
23356 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23357 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23358 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23359 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23360 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23361 rack->use_fixed_rate = 1; in rack_process_option()
23362 if (rack->rack_hibeta) in rack_process_option()
23365 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23366 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23367 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23374 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23378 if (rack->dgp_on) { in rack_process_option()
23386 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23387 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23388 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23389 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23390 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23391 rack->use_fixed_rate = 1; in rack_process_option()
23392 if (rack->rack_hibeta) in rack_process_option()
23395 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23396 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23397 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23404 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23408 if (rack->dgp_on) { in rack_process_option()
23416 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23417 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23418 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23419 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23420 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23421 rack->use_fixed_rate = 1; in rack_process_option()
23422 if (rack->rack_hibeta) in rack_process_option()
23425 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23426 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23427 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23432 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23434 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23435 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23436 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23450 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23452 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23453 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23454 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23468 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23470 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23471 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23472 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23478 rack->r_rr_config = optval; in rack_process_option()
23480 rack->r_rr_config = 0; in rack_process_option()
23484 rack->rc_pace_dnd = 1; in rack_process_option()
23486 rack->rc_pace_dnd = 0; in rack_process_option()
23491 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23492 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23496 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23503 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23505 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23510 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23512 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23514 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23517 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23519 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23526 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23530 rack->rc_initial_ss_comp = 1; in rack_process_option()
23531 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23536 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23541 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23542 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23543 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23547 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23549 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23550 rack->rack_hdrw_pacing = 0; in rack_process_option()
23551 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23552 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23553 rack->r_ctl.crte = NULL; in rack_process_option()
23562 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23565 /* Minimum time between rack t-o's in ms */ in rack_process_option()
23567 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23572 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23577 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_process_option()
23579 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_process_option()
23581 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_process_option()
23583 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_process_option()
23591 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23598 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23604 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23611 rack->use_rack_rr = 1; in rack_process_option()
23613 rack->use_rack_rr = 0; in rack_process_option()
23616 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_process_option()
23618 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23623 tp->t_delayed_ack = 0; in rack_process_option()
23625 tp->t_delayed_ack = 1; in rack_process_option()
23626 if (tp->t_flags & TF_DELACK) { in rack_process_option()
23627 tp->t_flags &= ~TF_DELACK; in rack_process_option()
23628 tp->t_flags |= TF_ACKNOW; in rack_process_option()
23642 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23647 rack->r_use_hpts_min = 1; in rack_process_option()
23649 * Must be between 2 - 80% to be a reduction else in rack_process_option()
23653 rack->r_ctl.max_reduction = optval; in rack_process_option()
23656 rack->r_use_hpts_min = 0; in rack_process_option()
23661 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23663 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23668 rack->rc_skip_timely = 1; in rack_process_option()
23669 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23670 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23671 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23673 rack->rc_skip_timely = 0; in rack_process_option()
23678 rack->use_lesser_lt_bw = 0; in rack_process_option()
23679 rack->dis_lt_bw = 1; in rack_process_option()
23681 rack->use_lesser_lt_bw = 1; in rack_process_option()
23682 rack->dis_lt_bw = 0; in rack_process_option()
23684 rack->use_lesser_lt_bw = 0; in rack_process_option()
23685 rack->dis_lt_bw = 0; in rack_process_option()
23691 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23693 rack->rc_allow_data_af_clo = 0; in rack_process_option()
23708 * apply a read-lock to the parent (we are already in rack_inherit()
23719 if (par->t_fb != tp->t_fb) { in rack_inherit()
23725 dest = (struct tcp_rack *)tp->t_fb_ptr; in rack_inherit()
23726 src = (struct tcp_rack *)par->t_fb_ptr; in rack_inherit()
23732 /* Now copy out anything we wish to inherit i.e. things in socket-options */ in rack_inherit()
23734 if ((src->dgp_on) && (dest->dgp_on == 0)) { in rack_inherit()
23740 if (dest->full_size_rxt != src->full_size_rxt) { in rack_inherit()
23741 dest->full_size_rxt = src->full_size_rxt; in rack_inherit()
23744 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { in rack_inherit()
23745 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; in rack_inherit()
23749 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { in rack_inherit()
23750 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; in rack_inherit()
23753 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { in rack_inherit()
23754 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; in rack_inherit()
23758 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { in rack_inherit()
23759 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; in rack_inherit()
23763 if (src->rack_hibeta != dest->rack_hibeta) { in rack_inherit()
23765 if (src->rack_hibeta) { in rack_inherit()
23766 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; in rack_inherit()
23767 dest->rack_hibeta = 1; in rack_inherit()
23769 dest->rack_hibeta = 0; in rack_inherit()
23773 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { in rack_inherit()
23774 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; in rack_inherit()
23778 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { in rack_inherit()
23779 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; in rack_inherit()
23784 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { in rack_inherit()
23785 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; in rack_inherit()
23789 if (dest->r_up_only != src->r_up_only) { in rack_inherit()
23790 dest->r_up_only = src->r_up_only; in rack_inherit()
23794 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { in rack_inherit()
23795 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; in rack_inherit()
23799 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { in rack_inherit()
23800 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; in rack_inherit()
23805 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { in rack_inherit()
23806 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; in rack_inherit()
23810 if (dest->r_limit_scw != src->r_limit_scw) { in rack_inherit()
23811 dest->r_limit_scw = src->r_limit_scw; in rack_inherit()
23815 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { in rack_inherit()
23816 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; in rack_inherit()
23819 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { in rack_inherit()
23820 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; in rack_inherit()
23823 if (dest->rtt_limit_mul != src->rtt_limit_mul) { in rack_inherit()
23824 dest->rtt_limit_mul = src->rtt_limit_mul; in rack_inherit()
23828 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { in rack_inherit()
23829 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; in rack_inherit()
23833 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { in rack_inherit()
23834 dest->rack_enable_scwnd = src->rack_enable_scwnd; in rack_inherit()
23838 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { in rack_inherit()
23839 dest->r_use_cmp_ack = src->r_use_cmp_ack; in rack_inherit()
23843 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23844 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23848 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23849 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23852 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { in rack_inherit()
23853 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_inherit()
23855 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_inherit()
23857 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_inherit()
23858 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_inherit()
23861 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { in rack_inherit()
23862 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; in rack_inherit()
23866 if (dest->rack_no_prr != src->rack_no_prr) { in rack_inherit()
23867 dest->rack_no_prr = src->rack_no_prr; in rack_inherit()
23870 if (dest->no_prr_addback != src->no_prr_addback) { in rack_inherit()
23871 dest->no_prr_addback = src->no_prr_addback; in rack_inherit()
23875 if (dest->cspr_is_fcc != src->cspr_is_fcc) { in rack_inherit()
23876 dest->cspr_is_fcc = src->cspr_is_fcc; in rack_inherit()
23880 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { in rack_inherit()
23881 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; in rack_inherit()
23884 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23885 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23889 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { in rack_inherit()
23890 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; in rack_inherit()
23895 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { in rack_inherit()
23896 dest->r_ctl.init_rate = src->r_ctl.init_rate; in rack_inherit()
23900 if (dest->rc_force_max_seg != src->rc_force_max_seg) { in rack_inherit()
23901 dest->rc_force_max_seg = src->rc_force_max_seg; in rack_inherit()
23905 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { in rack_inherit()
23906 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; in rack_inherit()
23911 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { in rack_inherit()
23912 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; in rack_inherit()
23915 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { in rack_inherit()
23916 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; in rack_inherit()
23919 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { in rack_inherit()
23920 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; in rack_inherit()
23924 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { in rack_inherit()
23925 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; in rack_inherit()
23928 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23929 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23933 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { in rack_inherit()
23934 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; in rack_inherit()
23938 if (dest->r_rr_config != src->r_rr_config) { in rack_inherit()
23939 dest->r_rr_config = src->r_rr_config; in rack_inherit()
23943 if (dest->rc_pace_dnd != src->rc_pace_dnd) { in rack_inherit()
23944 dest->rc_pace_dnd = src->rc_pace_dnd; in rack_inherit()
23948 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { in rack_inherit()
23949 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; in rack_inherit()
23953 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { in rack_inherit()
23954 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; in rack_inherit()
23957 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { in rack_inherit()
23958 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; in rack_inherit()
23962 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { in rack_inherit()
23963 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; in rack_inherit()
23966 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { in rack_inherit()
23967 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; in rack_inherit()
23970 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { in rack_inherit()
23971 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; in rack_inherit()
23974 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { in rack_inherit()
23975 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; in rack_inherit()
23979 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { in rack_inherit()
23980 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; in rack_inherit()
23983 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { in rack_inherit()
23984 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; in rack_inherit()
23988 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { in rack_inherit()
23989 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; in rack_inherit()
23993 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { in rack_inherit()
23994 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; in rack_inherit()
23998 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { in rack_inherit()
23999 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; in rack_inherit()
24003 if (par->t_ccv.flags != tp->t_ccv.flags) { in rack_inherit()
24005 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_inherit()
24006 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_inherit()
24008 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_inherit()
24010 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_inherit()
24012 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_inherit()
24016 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { in rack_inherit()
24017 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; in rack_inherit()
24021 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { in rack_inherit()
24022 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; in rack_inherit()
24026 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { in rack_inherit()
24027 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; in rack_inherit()
24031 if (dest->use_rack_rr != src->use_rack_rr) { in rack_inherit()
24032 dest->use_rack_rr = src->use_rack_rr; in rack_inherit()
24036 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { in rack_inherit()
24037 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; in rack_inherit()
24042 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { in rack_inherit()
24043 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; in rack_inherit()
24047 if (dest->r_use_hpts_min != src->r_use_hpts_min) { in rack_inherit()
24048 dest->r_use_hpts_min = src->r_use_hpts_min; in rack_inherit()
24051 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { in rack_inherit()
24052 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; in rack_inherit()
24056 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { in rack_inherit()
24057 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; in rack_inherit()
24060 if (dest->rc_skip_timely != src->rc_skip_timely) { in rack_inherit()
24061 dest->rc_skip_timely = src->rc_skip_timely; in rack_inherit()
24065 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { in rack_inherit()
24066 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; in rack_inherit()
24070 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { in rack_inherit()
24071 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; in rack_inherit()
24074 if (dest->dis_lt_bw != src->dis_lt_bw) { in rack_inherit()
24075 dest->dis_lt_bw = src->dis_lt_bw; in rack_inherit()
24088 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
24089 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
24091 s_optval = (uint32_t)dol->optval; in rack_apply_deferred_options()
24092 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
24103 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
24105 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
24107 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
24125 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
24126 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
24128 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
24132 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
24134 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
24138 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
24172 * socket option arguments. When it re-acquires the lock after the copy, it
24188 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
24194 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
24197 switch (sopt->sopt_level) { in rack_set_sockopt()
24200 MPASS(inp->inp_vflag & INP_IPV6PROTO); in rack_set_sockopt()
24201 switch (sopt->sopt_name) { in rack_set_sockopt()
24211 switch (sopt->sopt_name) { in rack_set_sockopt()
24216 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24222 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24230 switch (sopt->sopt_name) { in rack_set_sockopt()
24231 case SO_PEERPRIO: /* SC-URL:bs */ in rack_set_sockopt()
24233 if (inp->inp_socket) { in rack_set_sockopt()
24234 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24242 switch (sopt->sopt_name) { in rack_set_sockopt()
24259 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ in rack_set_sockopt()
24260 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ in rack_set_sockopt()
24320 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_set_sockopt()
24321 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { in rack_set_sockopt()
24324 * We truncate it down to 32 bits for the socket-option trace this in rack_set_sockopt()
24328 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { in rack_set_sockopt()
24338 if (tp->t_fb != &__tcp_rack) { in rack_set_sockopt()
24342 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24343 (sopt->sopt_name != TCP_DEFER_OPTIONS) && in rack_set_sockopt()
24344 (sopt->sopt_name != TCP_HYBRID_PACING) && in rack_set_sockopt()
24345 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && in rack_set_sockopt()
24346 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && in rack_set_sockopt()
24347 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { in rack_set_sockopt()
24349 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24358 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24370 ti->tcpi_state = tp->t_state; in rack_fill_info()
24371 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) in rack_fill_info()
24372 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; in rack_fill_info()
24373 if (tp->t_flags & TF_SACK_PERMIT) in rack_fill_info()
24374 ti->tcpi_options |= TCPI_OPT_SACK; in rack_fill_info()
24375 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { in rack_fill_info()
24376 ti->tcpi_options |= TCPI_OPT_WSCALE; in rack_fill_info()
24377 ti->tcpi_snd_wscale = tp->snd_scale; in rack_fill_info()
24378 ti->tcpi_rcv_wscale = tp->rcv_scale; in rack_fill_info()
24380 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) in rack_fill_info()
24381 ti->tcpi_options |= TCPI_OPT_ECN; in rack_fill_info()
24382 if (tp->t_flags & TF_FASTOPEN) in rack_fill_info()
24383 ti->tcpi_options |= TCPI_OPT_TFO; in rack_fill_info()
24385 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; in rack_fill_info()
24387 ti->tcpi_rtt = tp->t_srtt; in rack_fill_info()
24388 ti->tcpi_rttvar = tp->t_rttvar; in rack_fill_info()
24389 ti->tcpi_rto = tp->t_rxtcur; in rack_fill_info()
24390 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; in rack_fill_info()
24391 ti->tcpi_snd_cwnd = tp->snd_cwnd; in rack_fill_info()
24393 * FreeBSD-specific extension fields for tcp_info. in rack_fill_info()
24395 ti->tcpi_rcv_space = tp->rcv_wnd; in rack_fill_info()
24396 ti->tcpi_rcv_nxt = tp->rcv_nxt; in rack_fill_info()
24397 ti->tcpi_snd_wnd = tp->snd_wnd; in rack_fill_info()
24398 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ in rack_fill_info()
24399 ti->tcpi_snd_nxt = tp->snd_nxt; in rack_fill_info()
24400 ti->tcpi_snd_mss = tp->t_maxseg; in rack_fill_info()
24401 ti->tcpi_rcv_mss = tp->t_maxseg; in rack_fill_info()
24402 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; in rack_fill_info()
24403 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; in rack_fill_info()
24404 ti->tcpi_snd_zerowin = tp->t_sndzerowin; in rack_fill_info()
24405 ti->tcpi_total_tlp = tp->t_sndtlppack; in rack_fill_info()
24406 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; in rack_fill_info()
24407 ti->tcpi_rttmin = tp->t_rttlow; in rack_fill_info()
24409 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); in rack_fill_info()
24412 if (tp->t_flags & TF_TOE) { in rack_fill_info()
24413 ti->tcpi_options |= TCPI_OPT_TOE; in rack_fill_info()
24434 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24439 switch (sopt->sopt_name) { in rack_get_sockopt()
24448 * Beta is the congestion control value for NewReno that influences how in rack_get_sockopt()
24454 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24456 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24457 optval = rack->r_ctl.rc_saved_beta; in rack_get_sockopt()
24464 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24465 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; in rack_get_sockopt()
24474 * you exit recovery. Note that classic ECN has a beta of 50, it is only in rack_get_sockopt()
24478 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24480 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24481 optval = rack->r_ctl.rc_saved_beta_ecn; in rack_get_sockopt()
24488 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24489 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; in rack_get_sockopt()
24496 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24499 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24505 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_get_sockopt()
24507 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) in rack_get_sockopt()
24509 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) in rack_get_sockopt()
24520 optval = rack->rack_hibeta; in rack_get_sockopt()
24523 optval = rack->defer_options; in rack_get_sockopt()
24526 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24529 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24532 optval = rack->rc_labc; in rack_get_sockopt()
24535 optval= rack->r_up_only; in rack_get_sockopt()
24538 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24541 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24548 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24555 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24558 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24561 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24564 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24567 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24570 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24572 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24578 if (rack->dis_lt_bw) { in rack_get_sockopt()
24581 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24593 /* Now do we use the LRO mbuf-queue feature */ in rack_get_sockopt()
24594 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24597 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24600 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24607 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24610 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24617 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24620 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24624 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24628 optval = rack->rc_always_pace; in rack_get_sockopt()
24632 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24635 /* Minimum time between rack t-o's in ms */ in rack_get_sockopt()
24636 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24639 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24643 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24647 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24650 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24653 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24655 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24656 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24663 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24667 optval = rack->use_rack_rr; in rack_get_sockopt()
24670 optval = rack->r_rr_config; in rack_get_sockopt()
24673 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24676 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24680 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24683 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_get_sockopt()
24684 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24687 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24690 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24693 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24696 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24699 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24702 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24704 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24707 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24710 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24713 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24716 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24719 optval = tp->t_delayed_ack; in rack_get_sockopt()
24722 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24725 optval = rack->r_limit_scw; in rack_get_sockopt()
24728 if (rack->r_use_hpts_min) in rack_get_sockopt()
24729 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24734 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24737 optval = rack->rc_skip_timely; in rack_get_sockopt()
24740 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()
24748 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_get_sockopt()
24749 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) in rack_get_sockopt()
24760 if (sopt->sopt_dir == SOPT_SET) { in rack_ctloutput()
24762 } else if (sopt->sopt_dir == SOPT_GET) { in rack_ctloutput()
24765 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); in rack_ctloutput()
24836 printf("Failed to register rack module -- err:%d\n", err); in tcp_addrack()