Lines Matching +full:tcb +full:- +full:capture

1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
162 * - Matt Mathis's Rate Halving which slowly drops
165 * - Yuchung Cheng's RACK TCP (for which its named) that
168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
186 * TCP output is also over-written with a new version since it
191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 * - 60 seconds */
199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto ->…
216 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
252 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/c…
257 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
264 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
294 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
295 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
310 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
312 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top …
313 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bott…
332 * the way fill-cw interacts with timely and caps how much
333 * timely can boost the fill-cw b/w.
339 * probeRTT as well as fixed-rate-pacing.
431 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
602 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
603 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
604 if (rack->lt_bw_up) { in rack_get_lt_bw()
607 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
608 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
626 tp = rack->rc_tp; in rack_swap_beta_values()
627 if (tp->t_cc == NULL) { in rack_swap_beta_values()
628 /* Tcb is leaving */ in rack_swap_beta_values()
631 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
632 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_swap_beta_values()
633 /* Not new-reno we can't play games with beta! */ in rack_swap_beta_values()
638 if (CC_ALGO(tp)->ctl_output == NULL) { in rack_swap_beta_values()
639 /* Huh, not using new-reno so no swaps.? */ in rack_swap_beta_values()
647 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
654 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
664 opt.val = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
665 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
671 opt.val = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
672 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
678 rack->r_ctl.rc_saved_beta = old_beta; in rack_swap_beta_values()
679 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; in rack_swap_beta_values()
681 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
686 ptr = ((struct newreno *)tp->t_ccv.cc_data); in rack_swap_beta_values()
689 log.u_bbr.flex1 = ptr->beta; in rack_swap_beta_values()
690 log.u_bbr.flex2 = ptr->beta_ecn; in rack_swap_beta_values()
691 log.u_bbr.flex3 = ptr->newreno_flags; in rack_swap_beta_values()
692 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
693 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
695 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
697 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
699 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
700 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
710 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
716 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
723 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
729 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
736 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
738 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
740 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
742 rack->rc_always_pace = 0; in rack_remove_pacing()
743 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
744 rack->dgp_on = 0; in rack_remove_pacing()
745 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
746 rack->use_fixed_rate = 0; in rack_remove_pacing()
753 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
759 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
761 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
763 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
766 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
767 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
769 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
770 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
771 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
772 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
774 log.u_bbr.applimited = rsm->r_start; in rack_log_gpset()
775 log.u_bbr.delivered = rsm->r_end; in rack_log_gpset()
776 log.u_bbr.epoch = rsm->r_flags; in rack_log_gpset()
779 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
780 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
781 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
794 if (error || req->newptr == NULL) in sysctl_rack_clear()
917 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); in rack_init_sysctls()
922 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); in rack_init_sysctls()
967 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); in rack_init_sysctls()
972 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); in rack_init_sysctls()
997 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); in rack_init_sysctls()
1007 "Do we clear I/S counts on exiting probe-rtt"); in rack_init_sysctls()
1017 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); in rack_init_sysctls()
1191 "If we fall below this rate, dis-engage hw pacing?"); in rack_init_sysctls()
1332 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); in rack_init_sysctls()
1352 "Should we always send the oldest TLP and RACK-TLP"); in rack_init_sysctls()
1390 "When doing recovery -> rto -> recovery do we reset SSthresh?"); in rack_init_sysctls()
1425 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); in rack_init_sysctls()
1430 "Maximum RTO in microseconds -- should be at least as large as min_rto"); in rack_init_sysctls()
1452 "Does a cwnd just-return end the measurement window (app limited)"); in rack_init_sysctls()
1457 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); in rack_init_sysctls()
1514 "Should RACK use mbuf queuing for non-paced connections"); in rack_init_sysctls()
1558 … "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); in rack_init_sysctls()
1642 "Highest move to non-move ratio seen"); in rack_init_sysctls()
1783 "Total number of times a sends returned enobuf for non-hdwr paced connections"); in rack_init_sysctls()
1980 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
1987 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
1988 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
1989 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
1990 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
1992 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
2014 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2022 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2024 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
2049 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
2051 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
2052 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
2054 /* Use the last known rtt i.e. the rack-rtt */ in rack_log_hybrid_bw()
2055 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
2060 log.u_bbr.cur_del_rate = cur->deadline; in rack_log_hybrid_bw()
2063 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2064 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2065 log.u_bbr.flex6 = cur->start_seq; in rack_log_hybrid_bw()
2066 log.u_bbr.pkts_out = cur->end_seq; in rack_log_hybrid_bw()
2069 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2070 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2072 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_bw()
2073 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2076 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); in rack_log_hybrid_bw()
2077 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2079 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_bw()
2080 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2082 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
2086 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
2087 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
2088 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; in rack_log_hybrid_bw()
2100 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
2102 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
2104 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
2106 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
2108 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
2109 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
2110 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
2122 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
2131 log.u_bbr.delRate = cur->sent_at_fs; in rack_log_hybrid_sends()
2133 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { in rack_log_hybrid_sends()
2139 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2140 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2146 log.u_bbr.cur_del_rate = cur->sent_at_ls; in rack_log_hybrid_sends()
2147 log.u_bbr.rttProp = cur->rxt_at_ls; in rack_log_hybrid_sends()
2149 log.u_bbr.bw_inuse = cur->rxt_at_fs; in rack_log_hybrid_sends()
2151 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2154 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_sends()
2155 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2157 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_sends()
2158 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2161 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_sends()
2162 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2164 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); in rack_log_hybrid_sends()
2165 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2167 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid_sends()
2168 log.u_bbr.lost = cur->playout_ms; in rack_log_hybrid_sends()
2169 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid_sends()
2172 * where a false retransmit occurred so first_send <-> lastsend may in rack_log_hybrid_sends()
2175 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2176 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2184 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2186 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2188 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2190 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2193 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2194 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2195 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2208 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2209 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2220 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2235 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2238 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2239 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2247 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2250 if (timenow >= ent->deadline) { in rack_rate_cap_bw()
2252 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2254 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2258 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2261 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2263 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2272 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_rate_cap_bw()
2273 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2274 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2277 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2279 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2288 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2289 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2292 if (lengone < (ent->end - ent->start)) in rack_rate_cap_bw()
2293 lenleft = (ent->end - ent->start) - lengone; in rack_rate_cap_bw()
2296 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2298 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2304 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2306 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2316 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2317 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2319 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2320 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2323 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2324 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2325 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2326 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2328 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2331 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2332 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); in rack_rate_cap_bw()
2340 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2342 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2343 rack->rc_catch_up && in rack_rate_cap_bw()
2344 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2345 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2347 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2348 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2351 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2352 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2353 …rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2354 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2358 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2359 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2370 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2384 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2390 * No goodput bw but a long-term b/w does exist in rack_get_gp_est()
2396 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2397 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2400 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2408 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2409 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2416 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2418 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2421 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2423 if (rack->dis_lt_bw) { in rack_get_gp_est()
2424 /* We are not using lt-bw */ in rack_get_gp_est()
2431 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2433 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2465 if (rack->use_fixed_rate) { in rack_get_bw()
2476 if (rack->use_fixed_rate) { in rack_get_output_gain()
2478 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2479 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2480 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2481 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2484 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2485 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2488 } else if (rack->rack_no_prr && in rack_get_output_gain()
2489 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2494 * Here we may have a non-retransmit but we in rack_get_output_gain()
2498 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2503 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2504 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2506 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2514 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. in rack_log_dsack_event()
2521 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2526 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2528 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2530 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2531 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2532 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2536 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2539 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2540 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2541 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2542 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2543 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2554 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2563 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2564 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2565 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2566 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2567 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2580 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2581 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2583 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2584 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2585 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2586 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2587 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2590 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2591 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2592 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2593 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2594 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2595 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2616 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2618 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2620 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2625 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2635 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2636 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2637 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2638 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2639 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2647 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2665 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2672 * 1 - We are retransmitting and this tells the reason. in rack_log_retran_reason()
2673 * 2 - We are clearing a dup-ack count. in rack_log_retran_reason()
2674 * 3 - We are incrementing a dup-ack count. in rack_log_retran_reason()
2684 log.u_bbr.flex3 = rsm->r_flags; in rack_log_retran_reason()
2685 log.u_bbr.flex4 = rsm->r_dupack; in rack_log_retran_reason()
2686 log.u_bbr.flex5 = rsm->r_start; in rack_log_retran_reason()
2687 log.u_bbr.flex6 = rsm->r_end; in rack_log_retran_reason()
2689 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2691 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2692 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2693 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2694 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2695 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2696 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2697 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2698 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2699 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2708 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2713 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2715 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2717 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2718 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2719 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2721 if (rack->rack_no_prr) in rack_log_to_start()
2724 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2725 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2727 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2728 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2729 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2730 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2731 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2732 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2733 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2735 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2736 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2738 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2739 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2740 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2741 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2742 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2751 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2756 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2758 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2759 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2763 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; in rack_log_to_event()
2764 if (rack->rack_no_prr) in rack_log_to_event()
2767 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2769 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2770 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2771 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2772 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2773 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2775 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2776 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2777 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2778 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2791 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2797 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2803 log.u_bbr.flex1 = prev->r_start; in rack_log_map_chg()
2804 log.u_bbr.flex2 = prev->r_end; in rack_log_map_chg()
2808 log.u_bbr.flex3 = rsm->r_start; in rack_log_map_chg()
2809 log.u_bbr.flex4 = rsm->r_end; in rack_log_map_chg()
2813 log.u_bbr.flex5 = next->r_start; in rack_log_map_chg()
2814 log.u_bbr.flex6 = next->r_end; in rack_log_map_chg()
2820 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2821 if (rack->rack_no_prr) in rack_log_map_chg()
2824 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2825 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2827 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2828 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2829 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2830 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2844 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2847 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2848 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2849 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2850 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2852 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2853 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2855 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2856 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2859 log.u_bbr.pkt_epoch = rsm->r_start; in rack_log_rtt_upd()
2860 log.u_bbr.lost = rsm->r_end; in rack_log_rtt_upd()
2861 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; in rack_log_rtt_upd()
2863 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; in rack_log_rtt_upd()
2866 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2872 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2874 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2876 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2878 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2880 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2882 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2884 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2886 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2887 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2888 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2889 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2890 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2891 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2892 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2895 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); in rack_log_rtt_upd()
2897 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2898 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2914 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2921 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2924 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2925 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2926 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2927 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2929 * We capture in delRate the upper 32 bits as in rack_log_rtt_sample()
2934 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2936 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2937 /* Lets capture all the things that make up t_rtxcur */ in rack_log_rtt_sample()
2940 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2943 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2944 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2946 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2947 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2948 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2949 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2958 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2970 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2972 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2973 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2974 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2975 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
2985 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
2997 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
2999 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
3000 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
3001 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
3002 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
3012 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
3017 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
3020 log.u_bbr.flex3 = tp->t_maxunacktime; in rack_log_progress_event()
3021 log.u_bbr.flex4 = tp->t_acktime; in rack_log_progress_event()
3024 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
3025 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
3026 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
3027 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
3028 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
3030 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
3032 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
3033 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
3042 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
3046 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
3048 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
3051 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
3052 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
3054 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
3055 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
3057 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
3058 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
3059 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
3060 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
3061 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
3062 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
3063 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
3072 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
3080 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
3081 if (rack->rack_no_prr) in rack_log_doseg_done()
3084 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
3086 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
3087 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
3089 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
3091 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
3092 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
3093 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
3095 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
3096 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
3098 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
3099 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
3100 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
3101 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
3102 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
3104 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
3105 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
3106 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
3107 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
3108 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
3109 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
3110 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
3111 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
3120 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
3125 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
3126 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
3129 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
3133 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
3134 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3135 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3136 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3137 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3138 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, in rack_log_type_pacing_sizes()
3139 &tptosocket(tp)->so_snd, in rack_log_type_pacing_sizes()
3148 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3153 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3155 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3157 if (rack->rack_no_prr) in rack_log_type_just_return()
3160 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3162 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3165 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3166 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3167 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3168 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3169 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3170 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3172 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3173 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3174 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3175 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3185 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3189 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3191 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3194 if (rack->rack_no_prr) in rack_log_to_cancel()
3197 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3198 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3201 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3203 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3204 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3205 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3206 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3207 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3209 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3210 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3211 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3212 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3225 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3243 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3244 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3245 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3254 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3261 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3262 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3264 if (rack->rack_no_prr) in rack_log_to_processing()
3267 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3268 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3269 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3270 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3272 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3273 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3274 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3275 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3284 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3289 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3290 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3291 if (rack->rack_no_prr) in rack_log_to_prr()
3294 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3295 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3296 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3297 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3302 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3303 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3305 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3306 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3307 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3308 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3386 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3387 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3388 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3390 rack->rc_free_cnt--; in rack_alloc()
3400 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3408 if (rack->rc_free_cnt) { in rack_alloc()
3410 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3411 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3412 rack->rc_free_cnt--; in rack_alloc()
3422 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3424 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3425 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3441 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3442 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3444 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3445 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3455 rsm->r_limit_type = limit_type; in rack_alloc_limit()
3456 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3470 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3471 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3472 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3473 rack->rc_free_cnt--; in rack_free_trim()
3474 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3482 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_free()
3483 if (rack->r_ctl.rc_app_limited_cnt > 0) { in rack_free()
3484 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3487 if (rsm->r_limit_type) { in rack_free()
3489 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3491 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3492 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); in rack_free()
3493 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3494 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3495 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3497 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3499 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3500 rack->r_ctl.rc_resend = NULL; in rack_free()
3501 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3502 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3503 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3504 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3505 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3506 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3509 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3512 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3513 rack->rc_free_cnt++; in rack_free()
3522 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3524 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3561 srtt = (uint64_t)tp->t_srtt; in rack_get_measure_window()
3603 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_enough_for_measurement()
3607 if ((tp->snd_max == tp->snd_una) || in rack_enough_for_measurement()
3608 (th_ack == tp->snd_max)){ in rack_enough_for_measurement()
3622 if (SEQ_GEQ(th_ack, tp->gput_ack)) { in rack_enough_for_measurement()
3632 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3633 if (SEQ_LT(th_ack, tp->gput_ack) && in rack_enough_for_measurement()
3634 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3638 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3639 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3648 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3649 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3650 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3669 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3675 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3677 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3679 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3681 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3682 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3683 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3684 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3685 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3686 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3693 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3695 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3696 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3697 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3698 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3704 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3705 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3706 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3707 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3708 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3795 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3797 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3799 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3800 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3802 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3803 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3810 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3811 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3813 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3814 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3825 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3832 * to a new-reno flow. in rack_increase_bw_mul()
3837 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3838 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3845 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3850 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3852 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3853 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3855 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3857 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3861 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3862 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3863 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3864 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3865 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3867 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3868 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3870 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3872 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3876 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3877 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3878 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3879 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3880 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3882 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3884 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3886 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3889 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3890 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3891 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3892 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3893 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3897 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3899 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3900 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3902 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3904 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3905 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3914 /*- in rack_get_decrease()
3916 * new_per = curper * (1 - B * norm_grad) in rack_get_decrease()
3919 * rtt_dif = input var current rtt-diff in rack_get_decrease()
3932 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_get_decrease()
3935 * reduce_by = (1000000 - inverse); in rack_get_decrease()
3941 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_get_decrease()
3944 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3949 perf = curper - 1; in rack_get_decrease()
3959 * result = curper * (1 - (B * ( 1 - ------ )) in rack_decrease_highrtt()
3968 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3970 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_decrease_highrtt()
3971 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - in rack_decrease_highrtt()
3974 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3997 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
3999 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
4001 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
4002 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
4008 rtt_diff *= -1; in rack_decrease_bw_mul()
4011 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
4014 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
4015 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4021 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4022 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
4023 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
4024 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
4026 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4029 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
4030 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4033 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
4036 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
4037 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4043 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4044 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
4045 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
4046 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
4049 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4058 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4063 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
4064 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4066 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
4069 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
4070 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4076 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4077 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
4078 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
4079 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
4081 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4091 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4096 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
4097 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4100 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
4101 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
4103 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
4104 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
4117 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
4123 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
4124 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4125 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
4127 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
4129 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
4131 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
4133 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
4135 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
4136 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
4137 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
4138 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
4142 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
4144 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
4145 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
4146 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
4147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4148 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4149 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4150 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4151 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4152 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4155 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4156 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4157 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4158 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4160 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4172 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4173 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4179 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4200 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4201 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4204 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4208 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4209 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4217 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4218 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4221 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4222 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4223 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4224 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4225 rack->in_probe_rtt = 1; in rack_enter_probertt()
4226 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4227 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4228 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4230 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4232 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4233 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4243 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4244 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4245 rack->in_probe_rtt = 0; in rack_exit_probertt()
4246 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4247 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4255 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4256 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4258 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4262 * probe-rtt. We probably are not interested in in rack_exit_probertt()
4265 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4271 * We need to mark these as app-limited so we in rack_exit_probertt()
4274 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4275 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_exit_probertt()
4276 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4277 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4284 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4285 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4286 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4288 rsm->r_flags |= RACK_APP_LIMITED; in rack_exit_probertt()
4289 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4301 rack->rc_gp_incr = 0; in rack_exit_probertt()
4302 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4303 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4304 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4307 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4308 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4309 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4311 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4312 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4313 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4319 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4322 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4323 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4336 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4340 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4344 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4349 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4351 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4359 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4366 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4367 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4368 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4375 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4382 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4385 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4387 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4388 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4389 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4391 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4394 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4397 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4400 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4401 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4402 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4403 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4409 /* Check in on probe-rtt */ in rack_check_probe_rtt()
4411 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4412 /* We do not do p-rtt unless we have gp measurements */ in rack_check_probe_rtt()
4415 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4419 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4420 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4426 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4427 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4432 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4437 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4438 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4439 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4441 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4442 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4447 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4448 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4451 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4456 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4458 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4460 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4461 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4466 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4468 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4469 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4470 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4475 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4477 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4478 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4479 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4481 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4486 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4493 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4499 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4500 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4501 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4514 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4515 (rack->use_fixed_rate) || in rack_update_multiplier()
4516 (rack->in_probe_rtt) || in rack_update_multiplier()
4517 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4521 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4524 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4526 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4528 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4530 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4531 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4544 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4563 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4564 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4566 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4567 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4569 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4575 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4576 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4590 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4591 /* We are not incrementing really no-count */ in rack_update_multiplier()
4592 rack->rc_gp_incr = 0; in rack_update_multiplier()
4593 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4613 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4614 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4615 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4616 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4623 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4624 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4625 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4632 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4633 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4635 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4652 rack->rc_gp_incr = 0; in rack_update_multiplier()
4653 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4654 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4659 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4660 /* We are not incrementing really no-count */ in rack_update_multiplier()
4661 rack->rc_gp_incr = 0; in rack_update_multiplier()
4662 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4666 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4667 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4682 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4686 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4690 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4692 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4693 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4696 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4697 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4703 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4726 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4731 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4740 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4741 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4746 * |----------------| in rack_in_gp_window()
4747 * |-----| <or> in rack_in_gp_window()
4748 * |----| in rack_in_gp_window()
4749 * <or> |---| in rack_in_gp_window()
4752 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4753 SEQ_GT(rsm->r_end, tp->gput_seq)){ in rack_in_gp_window()
4756 * |--------------| in rack_in_gp_window()
4757 * |-------->| in rack_in_gp_window()
4760 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4761 SEQ_LT(rsm->r_start, tp->gput_ack) && in rack_in_gp_window()
4762 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4766 * |--------------| in rack_in_gp_window()
4767 * |-------->| in rack_in_gp_window()
4778 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_mark_in_gp_win()
4786 rsm->r_flags |= RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4788 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4797 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4799 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4802 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ in rack_clear_gp_marks()
4803 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_clear_gp_marks()
4804 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4814 if (tp->snd_una == tp->snd_max) { in rack_tend_gp_marks()
4818 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { in rack_tend_gp_marks()
4825 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4828 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) in rack_tend_gp_marks()
4830 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4838 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4846 * *before* we started our measurment. The rsm, if non-null in rack_tend_gp_marks()
4851 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4854 if (SEQ_GT(rsm->r_end, tp->gput_ack)) in rack_tend_gp_marks()
4856 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4863 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4875 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4878 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4879 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4880 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4882 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4896 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4897 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4898 if (TSTMP_GEQ(us_cts, tp->gput_ts)) in rack_do_goodput_measurement()
4899 tim = us_cts - tp->gput_ts; in rack_do_goodput_measurement()
4902 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4903 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4918 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4930 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4964 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4965 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4966 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4967 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_do_goodput_measurement()
4975 bytes = (th_ack - tp->gput_seq); in rack_do_goodput_measurement()
4986 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4995 * IW - 2MSS. in rack_do_goodput_measurement()
4997 reqbytes -= (2 * segsiz); in rack_do_goodput_measurement()
4999 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5001 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
5003 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5011 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
5012 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5014 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
5016 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
5023 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
5024 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
5028 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5029 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
5030 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
5034 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
5045 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
5047 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
5050 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5053 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
5054 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
5055 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
5056 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5059 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5062 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
5063 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
5066 * where we transition from un-paced to paced. in rack_do_goodput_measurement()
5072 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
5073 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
5074 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
5077 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
5079 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
5080 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5081 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5082 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
5084 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5103 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
5104 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5106 srtt = (uint64_t)tp->t_srtt; in rack_do_goodput_measurement()
5111 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
5112 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
5127 * and non-dynamic... but considering lots of folks in rack_do_goodput_measurement()
5132 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
5133 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5135 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5154 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5159 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5160 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5172 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5183 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5187 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5188 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5190 * For probe-rtt we only add it in in rack_do_goodput_measurement()
5196 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5197 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5204 * or first-slowstart that ensues. If we ever needed to watch in rack_do_goodput_measurement()
5208 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5209 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5213 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5219 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5220 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5222 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5227 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5228 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5235 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5236 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5237 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5244 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5245 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5249 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5255 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5257 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5259 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5264 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5265 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5266 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5268 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5272 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5273 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5275 rack->gp_ready = 1; in rack_do_goodput_measurement()
5276 if (rack->dgp_on || in rack_do_goodput_measurement()
5277 rack->rack_hibeta) in rack_do_goodput_measurement()
5279 if (rack->defer_options) in rack_do_goodput_measurement()
5284 /* We do not update any multipliers if we are in or have seen a probe-rtt */ in rack_do_goodput_measurement()
5286 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5287 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5288 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5290 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5291 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5300 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5304 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5306 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5312 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5313 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5314 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5315 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5316 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5324 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, in rack_do_goodput_measurement()
5331 if (tp->t_stats_gput_prev > 0) in rack_do_goodput_measurement()
5332 stats_voi_update_abs_s32(tp->t_stats, in rack_do_goodput_measurement()
5334 ((gput - tp->t_stats_gput_prev) * 100) / in rack_do_goodput_measurement()
5335 tp->t_stats_gput_prev); in rack_do_goodput_measurement()
5337 tp->t_stats_gput_prev = gput; in rack_do_goodput_measurement()
5339 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5344 * We don't do the other case i.e. non-applimited here since in rack_do_goodput_measurement()
5347 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5348 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_goodput_measurement()
5349 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5350 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5351 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5358 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5359 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5360 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5361 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5362 tp->gput_seq = th_ack; in rack_do_goodput_measurement()
5363 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5364 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5365 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5366 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5367 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5368 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5370 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5373 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5374 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_do_goodput_measurement()
5378 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5379 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5384 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_do_goodput_measurement()
5390 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { in rack_do_goodput_measurement()
5395 tp->t_flags |= TF_GPUTINPROG; in rack_do_goodput_measurement()
5397 * Now we need to find the timestamp of the send at tp->gput_seq in rack_do_goodput_measurement()
5400 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5401 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5403 /* Ok send-based limit is set */ in rack_do_goodput_measurement()
5404 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { in rack_do_goodput_measurement()
5411 tp->gput_seq = rsm->r_start; in rack_do_goodput_measurement()
5413 if (rsm->r_flags & RACK_ACKED) { in rack_do_goodput_measurement()
5416 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; in rack_do_goodput_measurement()
5417 tp->gput_seq = rsm->r_end; in rack_do_goodput_measurement()
5418 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5422 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5425 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5427 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5431 * send-limit set the current time, which in rack_do_goodput_measurement()
5432 * basically disables the send-limit. in rack_do_goodput_measurement()
5437 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5441 tp->gput_seq, in rack_do_goodput_measurement()
5442 tp->gput_ack, in rack_do_goodput_measurement()
5444 tp->gput_ts, in rack_do_goodput_measurement()
5445 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5448 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5470 tp->t_ccv.nsegs = nsegs; in rack_ack_received()
5471 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); in rack_ack_received()
5472 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5475 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5476 if (tp->t_ccv.bytes_this_ack > max) { in rack_ack_received()
5477 tp->t_ccv.bytes_this_ack = max; in rack_ack_received()
5481 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, in rack_ack_received()
5482 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5484 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5493 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5494 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5495 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5496 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5497 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5499 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5500 rack->lt_bw_up = 0; in rack_ack_received()
5503 if ((tp->t_flags & TF_GPUTINPROG) && in rack_ack_received()
5509 if (tp->snd_cwnd <= tp->snd_wnd) in rack_ack_received()
5510 tp->t_ccv.flags |= CCF_CWND_LIMITED; in rack_ack_received()
5512 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; in rack_ack_received()
5513 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_ack_received()
5514 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, in rack_ack_received()
5517 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5518 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5519 tp->t_ccv.flags |= CCF_ABC_SENTAWND; in rack_ack_received()
5522 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_ack_received()
5523 tp->t_bytes_acked = 0; in rack_ack_received()
5525 prior_cwnd = tp->snd_cwnd; in rack_ack_received()
5526 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5527 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5528 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5529 labc_to_use = rack->rc_labc; in rack_ack_received()
5532 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5539 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_ack_received()
5540 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_ack_received()
5541 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_ack_received()
5549 if (CC_ALGO(tp)->ack_received != NULL) { in rack_ack_received()
5551 tp->t_ccv.curack = th_ack; in rack_ack_received()
5552 tp->t_ccv.labc = labc_to_use; in rack_ack_received()
5553 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; in rack_ack_received()
5554 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); in rack_ack_received()
5557 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; in rack_ack_received()
5559 if (rack->r_must_retran) { in rack_ack_received()
5560 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5565 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5566 rack->r_must_retran = 0; in rack_ack_received()
5567 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { in rack_ack_received()
5574 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5575 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5577 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5582 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5584 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5585 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5587 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5588 (tp->snd_cwnd >= tp->snd_ssthresh)) { in rack_ack_received()
5593 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5602 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5611 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5612 rack->rack_no_prr) in tcp_rack_partialack()
5613 rack->r_wanted_output = 1; in tcp_rack_partialack()
5622 EXIT_RECOVERY(tp->t_flags); in rack_exit_recovery()
5631 orig_cwnd = tp->snd_cwnd; in rack_post_recovery()
5633 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5635 if (CC_ALGO(tp)->post_recovery != NULL) { in rack_post_recovery()
5636 tp->t_ccv.curack = th_ack; in rack_post_recovery()
5637 CC_ALGO(tp)->post_recovery(&tp->t_ccv); in rack_post_recovery()
5638 if (tp->snd_cwnd < tp->snd_ssthresh) { in rack_post_recovery()
5642 * snd_ssthresh per RFC-6582 (option 2). in rack_post_recovery()
5644 tp->snd_cwnd = tp->snd_ssthresh; in rack_post_recovery()
5647 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5654 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_post_recovery()
5655 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_post_recovery()
5656 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_post_recovery()
5660 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5665 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5666 (rack->no_prr_addback == 0) && in rack_post_recovery()
5667 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5672 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { in rack_post_recovery()
5682 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), in rack_post_recovery()
5683 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5685 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5689 tp->snd_recover = tp->snd_una; in rack_post_recovery()
5690 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5691 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5692 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5693 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5697 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5698 rack->rto_from_rec = 0; in rack_post_recovery()
5699 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5700 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5713 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); in rack_cong_signal()
5715 if (IN_RECOVERY(tp->t_flags) == 0) { in rack_cong_signal()
5717 ssthresh_enter = tp->snd_ssthresh; in rack_cong_signal()
5718 cwnd_enter = tp->snd_cwnd; in rack_cong_signal()
5721 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5724 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5725 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5726 if (!IN_FASTRECOVERY(tp->t_flags)) { in rack_cong_signal()
5727 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ in rack_cong_signal()
5728 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5730 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5732 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5733 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5734 rack->r_fast_output = 0; in rack_cong_signal()
5735 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5736 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5739 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5740 tp->snd_recover = tp->snd_max; in rack_cong_signal()
5741 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5742 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5746 if (!IN_CONGRECOVERY(tp->t_flags) || in rack_cong_signal()
5751 SEQ_GEQ(ack, tp->snd_recover)) { in rack_cong_signal()
5752 EXIT_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5754 rack->r_fast_output = 0; in rack_cong_signal()
5755 tp->snd_recover = tp->snd_max + 1; in rack_cong_signal()
5756 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5757 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5761 tp->t_dupacks = 0; in rack_cong_signal()
5762 tp->t_bytes_acked = 0; in rack_cong_signal()
5763 rack->r_fast_output = 0; in rack_cong_signal()
5764 if (IN_RECOVERY(tp->t_flags)) in rack_cong_signal()
5766 orig_cwnd = tp->snd_cwnd; in rack_cong_signal()
5768 if (CC_ALGO(tp)->cong_signal == NULL) { in rack_cong_signal()
5770 tp->snd_ssthresh = max(2, in rack_cong_signal()
5771 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5773 tp->snd_cwnd = ctf_fixed_maxseg(tp); in rack_cong_signal()
5775 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5776 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5781 tp->snd_cwnd = tp->snd_cwnd_prev; in rack_cong_signal()
5782 tp->snd_ssthresh = tp->snd_ssthresh_prev; in rack_cong_signal()
5783 tp->snd_recover = tp->snd_recover_prev; in rack_cong_signal()
5784 if (tp->t_flags & TF_WASFRECOVERY) { in rack_cong_signal()
5785 ENTER_FASTRECOVERY(tp->t_flags); in rack_cong_signal()
5786 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5788 if (tp->t_flags & TF_WASCRECOVERY) { in rack_cong_signal()
5789 ENTER_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5790 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5792 tp->snd_nxt = tp->snd_max; in rack_cong_signal()
5793 tp->t_badrxtwin = 0; in rack_cong_signal()
5796 if ((CC_ALGO(tp)->cong_signal != NULL) && in rack_cong_signal()
5798 tp->t_ccv.curack = ack; in rack_cong_signal()
5799 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); in rack_cong_signal()
5801 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { in rack_cong_signal()
5803 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5804 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5805 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5806 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5807 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5818 if (CC_ALGO(tp)->after_idle != NULL) in rack_cc_after_idle()
5819 CC_ALGO(tp)->after_idle(&tp->t_ccv); in rack_cc_after_idle()
5821 if (tp->snd_cwnd == 1) in rack_cc_after_idle()
5822 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ in rack_cc_after_idle()
5831 if (tp->snd_cwnd < i_cwnd) { in rack_cc_after_idle()
5832 tp->snd_cwnd = i_cwnd; in rack_cc_after_idle()
5839 * - There is no delayed ack timer in progress.
5840 * - Our last ack wasn't a 0-sized window. We never want to delay
5841 * the ack that opens up a 0-sized window.
5842 * - LRO wasn't used for this segment. We make sure by checking that the
5844 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5848 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5849 ((tp->t_flags & TF_DELACK) == 0) && \
5850 (tlen <= tp->t_maxseg) && \
5851 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5859 * Walk the time-order transmitted list looking for an rsm that is in rack_find_lowest_rsm()
5863 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5864 if (rsm->r_flags & RACK_ACKED) { in rack_find_lowest_rsm()
5885 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5886 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { in rack_find_high_nonack()
5905 * If reorder-fade is configured, then we track the last time we saw in rack_calc_thresh_rack()
5906 * re-ordering occur. If we reach the point where enough time as in rack_calc_thresh_rack()
5909 * Or if reorder-face is 0, then once we see reordering we consider in rack_calc_thresh_rack()
5913 * In the end if lro is non-zero we add the extra time for in rack_calc_thresh_rack()
5918 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5919 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5920 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5921 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5933 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5935 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5945 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5946 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5948 /* Standards based pkt-delay is 1/4 srtt */ in rack_calc_thresh_rack()
5951 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5953 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5954 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5958 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5960 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5965 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
5992 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
5993 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
5998 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
5999 len = rsm->r_end - rsm->r_start; in rack_calc_thresh_tlp()
6000 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
6002 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
6005 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6011 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
6017 * possible inter-packet delay (if any). in rack_calc_thresh_tlp()
6022 idx = rsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6023 nidx = prsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6024 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { in rack_calc_thresh_tlp()
6026 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; in rack_calc_thresh_tlp()
6031 * Possibly compensate for delayed-ack. in rack_calc_thresh_tlp()
6039 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
6044 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6052 if (thresh > tp->t_rxtcur) { in rack_calc_thresh_tlp()
6053 thresh = tp->t_rxtcur; in rack_calc_thresh_tlp()
6077 if (rack->rc_rack_rtt) in rack_grab_rtt()
6078 return (rack->rc_rack_rtt); in rack_grab_rtt()
6079 else if (tp->t_srtt == 0) in rack_grab_rtt()
6081 return (tp->t_srtt); in rack_grab_rtt()
6097 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
6098 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
6101 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
6106 if (rsm->r_flags & RACK_ACKED) { in rack_check_recovery_mode()
6111 idx = rsm->r_rtr_cnt - 1; in rack_check_recovery_mode()
6114 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { in rack_check_recovery_mode()
6117 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { in rack_check_recovery_mode()
6120 /* Ok if we reach here we are over-due and this guy can be sent */ in rack_check_recovery_mode()
6121 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_check_recovery_mode()
6132 t = (tp->t_srtt + (tp->t_rttvar << 2)); in rack_get_persists_timer_val()
6133 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], in rack_get_persists_timer_val()
6134 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
6135 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6155 if (rack->t_timers_stopped) { in rack_timer_start()
6159 if (rack->rc_in_persist) { in rack_timer_start()
6163 rack->rc_on_min_to = 0; in rack_timer_start()
6164 if ((tp->t_state < TCPS_ESTABLISHED) || in rack_timer_start()
6165 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_timer_start()
6168 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6173 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6180 * recently thats the discount we want to use (now - timer time). in rack_timer_start()
6182 * we want to use that (now - oldest-packet-last_transmit_time). in rack_timer_start()
6185 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6186 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6187 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6189 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6191 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6193 if (SEQ_LT(tp->snd_una, tp->snd_max) || in rack_timer_start()
6194 sbavail(&tptosocket(tp)->so_snd)) { in rack_timer_start()
6195 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6196 to = tp->t_rxtcur; in rack_timer_start()
6198 to -= time_since_sent; in rack_timer_start()
6200 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6204 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timer_start()
6209 * of the keep-init timeout. in rack_timer_start()
6214 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { in rack_timer_start()
6215 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); in rack_timer_start()
6217 max_time -= red; in rack_timer_start()
6229 if (rsm->r_flags & RACK_ACKED) { in rack_timer_start()
6237 if ((rsm->r_flags & RACK_SACK_PASSED) || in rack_timer_start()
6238 (rsm->r_flags & RACK_RWND_COLLAPSED) || in rack_timer_start()
6239 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in rack_timer_start()
6240 if ((tp->t_flags & TF_SENTFIN) && in rack_timer_start()
6241 ((tp->snd_max - tp->snd_una) == 1) && in rack_timer_start()
6242 (rsm->r_flags & RACK_HAS_FIN)) { in rack_timer_start()
6249 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6250 (IN_FASTRECOVERY(tp->t_flags)) && in rack_timer_start()
6251 (rack->rack_no_prr == 0) && in rack_timer_start()
6252 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6259 * get to use the rack-cheat. in rack_timer_start()
6265 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6266 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; in rack_timer_start()
6268 to = exp - cts; in rack_timer_start()
6269 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6270 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6271 if (rack->r_rr_config == 3) in rack_timer_start()
6272 rack->rc_on_min_to = 1; in rack_timer_start()
6275 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6276 if (rack->r_rr_config == 3) in rack_timer_start()
6277 rack->rc_on_min_to = 1; in rack_timer_start()
6282 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6283 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6290 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6295 if (rsm->r_flags & RACK_HAS_FIN) { in rack_timer_start()
6300 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6302 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6303 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6305 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6307 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6309 if (tp->t_srtt) { in rack_timer_start()
6310 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6311 (tp->t_srtt == 1)) { in rack_timer_start()
6318 srtt_cur = tp->t_srtt; in rack_timer_start()
6329 tp->t_srtt && in rack_timer_start()
6335 to = thresh - time_since_sent; in rack_timer_start()
6337 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6342 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6343 (uint32_t)rsm->r_tim_lastsent[idx], in rack_timer_start()
6359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6361 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6371 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6372 if (tp->t_flags & TF_GPUTINPROG) { in rack_enter_persist()
6377 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6381 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6382 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6383 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6386 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6387 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6388 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6389 if (rack->lt_bw_up) { in rack_enter_persist()
6393 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6394 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6395 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6396 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6397 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6399 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6400 rack->lt_bw_up = 0; in rack_enter_persist()
6401 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6404 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6405 rack->probe_not_answered = 0; in rack_enter_persist()
6406 rack->forced_ack = 0; in rack_enter_persist()
6407 tp->t_rxtshift = 0; in rack_enter_persist()
6408 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_enter_persist()
6409 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6410 rack->rc_in_persist = 1; in rack_enter_persist()
6417 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6418 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6419 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6422 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6423 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6424 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6427 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6428 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6429 (rack->rc_always_pace)) { in rack_exit_persist()
6431 * Do we count this as if a probe-rtt just in rack_exit_persist()
6436 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6440 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6446 /* Yes, we count it as a probe-rtt. */ in rack_exit_persist()
6450 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6451 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6452 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6453 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6454 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6460 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6462 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6463 rack->lt_bw_up = 1; in rack_exit_persist()
6464 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6466 rack->rc_in_persist = 0; in rack_exit_persist()
6467 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6468 tp->t_rxtshift = 0; in rack_exit_persist()
6469 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_exit_persist()
6470 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6471 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6472 rack->r_early = 0; in rack_exit_persist()
6473 rack->r_late = 0; in rack_exit_persist()
6474 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6481 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6485 log.u_bbr.flex1 = diag->p_nxt_slot; in rack_log_hpts_diag()
6486 log.u_bbr.flex2 = diag->p_cur_slot; in rack_log_hpts_diag()
6487 log.u_bbr.flex3 = diag->slot_req; in rack_log_hpts_diag()
6488 log.u_bbr.flex4 = diag->inp_hptsslot; in rack_log_hpts_diag()
6489 log.u_bbr.flex5 = diag->slot_remaining; in rack_log_hpts_diag()
6490 log.u_bbr.flex6 = diag->need_new_to; in rack_log_hpts_diag()
6491 log.u_bbr.flex7 = diag->p_hpts_active; in rack_log_hpts_diag()
6492 log.u_bbr.flex8 = diag->p_on_min_sleep; in rack_log_hpts_diag()
6494 log.u_bbr.epoch = diag->have_slept; in rack_log_hpts_diag()
6495 log.u_bbr.lt_epoch = diag->yet_to_sleep; in rack_log_hpts_diag()
6496 log.u_bbr.pkts_out = diag->co_ret; in rack_log_hpts_diag()
6497 log.u_bbr.applimited = diag->hpts_sleep_time; in rack_log_hpts_diag()
6498 log.u_bbr.delivered = diag->p_prev_slot; in rack_log_hpts_diag()
6499 log.u_bbr.inflight = diag->p_runningslot; in rack_log_hpts_diag()
6500 log.u_bbr.bw_inuse = diag->wheel_slot; in rack_log_hpts_diag()
6501 log.u_bbr.rttProp = diag->wheel_cts; in rack_log_hpts_diag()
6503 log.u_bbr.delRate = diag->maxslots; in rack_log_hpts_diag()
6504 log.u_bbr.cur_del_rate = diag->p_curtick; in rack_log_hpts_diag()
6506 log.u_bbr.cur_del_rate |= diag->p_lasttick; in rack_log_hpts_diag()
6507 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6508 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6509 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6519 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6524 log.u_bbr.flex1 = sb->sb_flags; in rack_log_wakeup()
6526 log.u_bbr.flex3 = sb->sb_state; in rack_log_wakeup()
6529 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6530 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6531 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6551 if ((tp->t_state == TCPS_CLOSED) || in rack_start_hpts_timer()
6552 (tp->t_state == TCPS_LISTEN)) { in rack_start_hpts_timer()
6559 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6560 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6561 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6563 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6564 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6568 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6576 * by an ack aka the rc_agg_early (non-paced mode). in rack_start_hpts_timer()
6578 slot += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6579 rack->r_early = 0; in rack_start_hpts_timer()
6580 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6582 if ((rack->r_late) && in rack_start_hpts_timer()
6583 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6590 if (rack->r_ctl.rc_agg_delayed >= slot) { in rack_start_hpts_timer()
6599 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); in rack_start_hpts_timer()
6603 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); in rack_start_hpts_timer()
6607 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6608 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6611 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; in rack_start_hpts_timer()
6614 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6615 rack->r_late = 0; in rack_start_hpts_timer()
6617 } else if (rack->r_late) { in rack_start_hpts_timer()
6621 max_red = (slot * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6622 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6623 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6624 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6626 slot -= max_red; in rack_start_hpts_timer()
6627 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6630 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6632 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6645 if (tp->t_flags & TF_DELACK) { in rack_start_hpts_timer()
6647 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6653 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6656 * wheel, we resort to a keep-alive timer if its configured. in rack_start_hpts_timer()
6660 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_start_hpts_timer()
6661 (tp->t_state <= TCPS_CLOSING)) { in rack_start_hpts_timer()
6664 * del-ack), we don't have segments being paced. So in rack_start_hpts_timer()
6667 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_start_hpts_timer()
6668 /* Get the established keep-alive time */ in rack_start_hpts_timer()
6672 * Get the initial setup keep-alive time, in rack_start_hpts_timer()
6680 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6681 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6685 * exit probe-rtt and initiate a keep-alive ack. in rack_start_hpts_timer()
6686 * This will get us out of probe-rtt and update in rack_start_hpts_timer()
6687 * our min-rtt. in rack_start_hpts_timer()
6694 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6700 * keep-alive, delayed_ack we keep track of what was left in rack_start_hpts_timer()
6708 * Hack alert for now we can't time-out over 2,147,483 in rack_start_hpts_timer()
6714 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6717 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6718 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6720 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6740 * TF2_MBUF_QUEUE_READY - This flags says that I am busy in rack_start_hpts_timer()
6745 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction in rack_start_hpts_timer()
6760 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); in rack_start_hpts_timer()
6762 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6763 rack->r_ctl.rc_last_output_to = us_cts + slot; in rack_start_hpts_timer()
6772 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; in rack_start_hpts_timer()
6778 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6779 (IN_RECOVERY(tp->t_flags))) { in rack_start_hpts_timer()
6780 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6781 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6782 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6791 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6794 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6798 * backout the changes (used for non-paced in rack_start_hpts_timer()
6801 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | in rack_start_hpts_timer()
6804 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6805 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6809 * t-o if the t-o does not cause a send. in rack_start_hpts_timer()
6830 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6838 if (SEQ_GT(tp->snd_max, tp->snd_una)) { in rack_start_hpts_timer()
6839 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", in rack_start_hpts_timer()
6844 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6858 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6859 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { in rack_mark_lost()
6860 /* Got up to all that were marked sack-passed */ in rack_mark_lost()
6863 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_mark_lost()
6864 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_mark_lost()
6867 nrsm->r_flags |= RACK_WAS_LOST; in rack_mark_lost()
6868 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6890 * retransmissions, if so we will enter fast-recovery. The output in rack_timeout_rack()
6897 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6899 rack->rc_on_min_to = 0; in rack_timeout_rack()
6905 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6906 rack->r_timer_override = 1; in rack_timeout_rack()
6907 if (rack->use_rack_rr) { in rack_timeout_rack()
6911 * over-ride pacing i.e. rrr takes precedence in rack_timeout_rack()
6916 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6919 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6935 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { in rack_adjust_orig_mlen()
6942 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), in rack_adjust_orig_mlen()
6944 rsm->m, in rack_adjust_orig_mlen()
6946 (intmax_t)M_TRAILINGROOM(rsm->m), in rack_adjust_orig_mlen()
6947 rsm->orig_t_space, in rack_adjust_orig_mlen()
6948 rsm->orig_m_len, in rack_adjust_orig_mlen()
6949 rsm->m->m_len)); in rack_adjust_orig_mlen()
6950 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); in rack_adjust_orig_mlen()
6951 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_orig_mlen()
6953 if (rsm->m->m_len < rsm->orig_m_len) { in rack_adjust_orig_mlen()
6958 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), in rack_adjust_orig_mlen()
6960 rsm->m, rsm->m->m_len, in rack_adjust_orig_mlen()
6961 rsm, rsm->orig_m_len, in rack_adjust_orig_mlen()
6962 rsm->soff)); in rack_adjust_orig_mlen()
6963 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) in rack_adjust_orig_mlen()
6964 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); in rack_adjust_orig_mlen()
6966 rsm->soff = 0; in rack_adjust_orig_mlen()
6967 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_orig_mlen()
6969 } else if (rsm->m->m_len > rsm->orig_m_len) { in rack_adjust_orig_mlen()
6971 rsm, rsm->m); in rack_adjust_orig_mlen()
6982 if (src_rsm->m && in rack_setup_offset_for_rsm()
6983 ((src_rsm->orig_m_len != src_rsm->m->m_len) || in rack_setup_offset_for_rsm()
6984 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { in rack_setup_offset_for_rsm()
6988 m = src_rsm->m; in rack_setup_offset_for_rsm()
6989 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); in rack_setup_offset_for_rsm()
6990 while (soff >= m->m_len) { in rack_setup_offset_for_rsm()
6992 soff -= m->m_len; in rack_setup_offset_for_rsm()
6993 m = m->m_next; in rack_setup_offset_for_rsm()
6999 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7000 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7001 &src_rsm->soff); in rack_setup_offset_for_rsm()
7002 src_rsm->orig_m_len = src_rsm->m->m_len; in rack_setup_offset_for_rsm()
7003 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); in rack_setup_offset_for_rsm()
7004 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7005 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7006 &rsm->soff); in rack_setup_offset_for_rsm()
7007 rsm->orig_m_len = rsm->m->m_len; in rack_setup_offset_for_rsm()
7008 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7012 rsm->m = m; in rack_setup_offset_for_rsm()
7013 rsm->soff = soff; in rack_setup_offset_for_rsm()
7014 rsm->orig_m_len = m->m_len; in rack_setup_offset_for_rsm()
7015 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7024 nrsm->r_start = start; in rack_clone_rsm()
7025 nrsm->r_end = rsm->r_end; in rack_clone_rsm()
7026 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; in rack_clone_rsm()
7027 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; in rack_clone_rsm()
7028 nrsm->r_flags = rsm->r_flags; in rack_clone_rsm()
7029 nrsm->r_dupack = rsm->r_dupack; in rack_clone_rsm()
7030 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; in rack_clone_rsm()
7031 nrsm->r_rtr_bytes = 0; in rack_clone_rsm()
7032 nrsm->r_fas = rsm->r_fas; in rack_clone_rsm()
7033 nrsm->r_bas = rsm->r_bas; in rack_clone_rsm()
7034 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
7035 nrsm->r_just_ret = rsm->r_just_ret; in rack_clone_rsm()
7036 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { in rack_clone_rsm()
7037 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; in rack_clone_rsm()
7040 if (nrsm->r_flags & RACK_HAS_SYN) in rack_clone_rsm()
7041 nrsm->r_flags &= ~RACK_HAS_SYN; in rack_clone_rsm()
7043 if (rsm->r_flags & RACK_HAS_FIN) in rack_clone_rsm()
7044 rsm->r_flags &= ~RACK_HAS_FIN; in rack_clone_rsm()
7046 if (rsm->r_flags & RACK_HAD_PUSH) in rack_clone_rsm()
7047 rsm->r_flags &= ~RACK_HAD_PUSH; in rack_clone_rsm()
7049 nrsm->r_hw_tls = rsm->r_hw_tls; in rack_clone_rsm()
7057 KASSERT(((rsm->m != NULL) || in rack_clone_rsm()
7058 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), in rack_clone_rsm()
7059 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
7060 if (rsm->m) in rack_clone_rsm()
7079 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
7080 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); in rack_merge_rsm()
7081 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
7082 if (l_rsm->r_dupack < r_rsm->r_dupack) in rack_merge_rsm()
7083 l_rsm->r_dupack = r_rsm->r_dupack; in rack_merge_rsm()
7084 if (r_rsm->r_rtr_bytes) in rack_merge_rsm()
7085 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; in rack_merge_rsm()
7086 if (r_rsm->r_in_tmap) { in rack_merge_rsm()
7088 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
7089 r_rsm->r_in_tmap = 0; in rack_merge_rsm()
7093 if (r_rsm->r_flags & RACK_HAS_FIN) in rack_merge_rsm()
7094 l_rsm->r_flags |= RACK_HAS_FIN; in rack_merge_rsm()
7095 if (r_rsm->r_flags & RACK_TLP) in rack_merge_rsm()
7096 l_rsm->r_flags |= RACK_TLP; in rack_merge_rsm()
7097 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) in rack_merge_rsm()
7098 l_rsm->r_flags |= RACK_RWND_COLLAPSED; in rack_merge_rsm()
7099 if ((r_rsm->r_flags & RACK_APP_LIMITED) && in rack_merge_rsm()
7100 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_merge_rsm()
7102 * If both are app-limited then let the in rack_merge_rsm()
7106 l_rsm->r_flags |= RACK_APP_LIMITED; in rack_merge_rsm()
7107 r_rsm->r_flags &= ~RACK_APP_LIMITED; in rack_merge_rsm()
7108 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
7109 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
7111 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
7126 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < in rack_merge_rsm()
7127 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { in rack_merge_rsm()
7128 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; in rack_merge_rsm()
7135 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) in rack_merge_rsm()
7136 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; in rack_merge_rsm()
7138 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { in rack_merge_rsm()
7140 r_rsm->r_limit_type = l_rsm->r_limit_type; in rack_merge_rsm()
7141 l_rsm->r_limit_type = 0; in rack_merge_rsm()
7144 l_rsm->r_flags |= RACK_MERGED; in rack_merge_rsm()
7169 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7175 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_tlp()
7182 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7183 rack->r_might_revert = 0; in rack_timeout_tlp()
7184 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7186 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7188 avail = sbavail(&so->so_snd); in rack_timeout_tlp()
7189 out = tp->snd_max - tp->snd_una; in rack_timeout_tlp()
7190 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7195 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7196 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7197 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7198 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7202 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_tlp()
7203 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7212 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_tlp()
7213 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7214 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7215 tp->gput_seq, in rack_timeout_tlp()
7222 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7227 amm = avail - out; in rack_timeout_tlp()
7230 if ((amm + out) > tp->snd_wnd) { in rack_timeout_tlp()
7238 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_timeout_tlp()
7240 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7241 if (out + amm <= tp->snd_wnd) { in rack_timeout_tlp()
7242 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7243 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7249 /* Set the send-new override */ in rack_timeout_tlp()
7250 if (out + amm <= tp->snd_wnd) in rack_timeout_tlp()
7251 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7255 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7261 * Ok we need to arrange the last un-acked segment to be re-sent, or in rack_timeout_tlp()
7262 * optionally the first un-acked segment. in rack_timeout_tlp()
7266 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7268 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7269 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { in rack_timeout_tlp()
7284 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7285 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7287 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7294 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { in rack_timeout_tlp()
7309 (rsm->r_end - ctf_fixed_maxseg(tp))); in rack_timeout_tlp()
7312 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7314 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7319 if (rsm->r_in_tmap) { in rack_timeout_tlp()
7320 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7321 nrsm->r_in_tmap = 1; in rack_timeout_tlp()
7325 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7329 rack->r_timer_override = 1; in rack_timeout_tlp()
7330 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7333 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7350 tp->t_flags &= ~TF_DELACK; in rack_timeout_delack()
7351 tp->t_flags |= TF_ACKNOW; in rack_timeout_delack()
7353 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7362 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7364 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7365 rack->forced_ack = 1; in rack_send_ack_challange()
7366 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7368 rack->probe_not_answered = 1; in rack_send_ack_challange()
7370 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7371 &t_template->tt_t, (struct mbuf *)NULL, in rack_send_ack_challange()
7372 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7374 /* This does send an ack so kill any D-ack timer */ in rack_send_ack_challange()
7375 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7376 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7396 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7401 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7402 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_persist()
7415 if (tp->t_rxtshift >= V_tcp_retries && in rack_timeout_persist()
7416 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || in rack_timeout_persist()
7417 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { in rack_timeout_persist()
7420 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7421 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7424 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7425 tp->snd_una == tp->snd_max) in rack_timeout_persist()
7427 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7432 if (tp->t_state > TCPS_CLOSE_WAIT && in rack_timeout_persist()
7433 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { in rack_timeout_persist()
7436 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7437 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7442 if (rack->probe_not_answered) { in rack_timeout_persist()
7444 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7449 if (tp->t_rxtshift < V_tcp_retries) in rack_timeout_persist()
7450 tp->t_rxtshift++; in rack_timeout_persist()
7469 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7472 * Keep-alive timer went off; send something or drop connection if in rack_timeout_keepalive()
7476 if (tp->t_state < TCPS_ESTABLISHED) in rack_timeout_keepalive()
7478 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timeout_keepalive()
7479 tp->t_state <= TCPS_CLOSING) { in rack_timeout_keepalive()
7480 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) in rack_timeout_keepalive()
7487 * number tp->snd_una-1 causes the transmitted zero-length in rack_timeout_keepalive()
7500 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_keepalive()
7512 * un-acked. in rack_remxt_tmr()
7517 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7520 rack->r_timer_override = 1; in rack_remxt_tmr()
7521 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7522 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7523 rack->r_late = 0; in rack_remxt_tmr()
7524 rack->r_early = 0; in rack_remxt_tmr()
7525 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7526 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7527 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7529 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { in rack_remxt_tmr()
7532 * more than rack_rxt_scoreboard_clear_thresh time-outs. in rack_remxt_tmr()
7534 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7535 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7536 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7542 * mark SACK-PASS on anything not acked here. in rack_remxt_tmr()
7551 * sacks that come floating in will "re-ack" the data. in rack_remxt_tmr()
7556 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7558 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7559 rsm->r_dupack = 0; in rack_remxt_tmr()
7562 /* We must re-add it back to the tlist */ in rack_remxt_tmr()
7564 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7566 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7568 rsm->r_in_tmap = 1; in rack_remxt_tmr()
7570 if (rsm->r_flags & RACK_ACKED) in rack_remxt_tmr()
7571 rsm->r_flags |= RACK_WAS_ACKED; in rack_remxt_tmr()
7572 …rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_W… in rack_remxt_tmr()
7573 rsm->r_flags |= RACK_MUST_RXT; in rack_remxt_tmr()
7576 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7577 /* Clear the count (we just un-acked them) */ in rack_remxt_tmr()
7578 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7579 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7581 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7582 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7583 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7584 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7586 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7587 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7588 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7589 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && in rack_remxt_tmr()
7590 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_remxt_tmr()
7592 * For non-sack customers new data in rack_remxt_tmr()
7596 rack->r_must_retran = 1; in rack_remxt_tmr()
7597 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7598 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7606 tp->t_rxtcur = RACK_REXMTVAL(tp); in rack_convert_rtts()
7607 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_convert_rtts()
7608 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); in rack_convert_rtts()
7610 if (tp->t_rxtcur > rack_rto_max) { in rack_convert_rtts()
7611 tp->t_rxtcur = rack_rto_max; in rack_convert_rtts()
7621 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7622 srtt = tp->t_srtt; in rack_cc_conn_init()
7628 if ((srtt == 0) && (tp->t_srtt != 0)) in rack_cc_conn_init()
7636 if (tp->snd_ssthresh < tp->snd_wnd) { in rack_cc_conn_init()
7637 tp->snd_ssthresh = tp->snd_wnd; in rack_cc_conn_init()
7643 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7644 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7648 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7659 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_rxt()
7660 (tp->t_rxtshift)) { in rack_timeout_rxt()
7667 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_rxt()
7668 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7669 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7670 tp->gput_seq, in rack_timeout_rxt()
7676 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_rxt()
7678 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7679 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7680 rack->rc_ack_required = 1; in rack_timeout_rxt()
7681 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7682 if (IN_RECOVERY(tp->t_flags) && in rack_timeout_rxt()
7683 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7690 rack->rto_from_rec = 1; in rack_timeout_rxt()
7691 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7693 if (IN_FASTRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7694 tp->t_flags |= TF_WASFRECOVERY; in rack_timeout_rxt()
7696 tp->t_flags &= ~TF_WASFRECOVERY; in rack_timeout_rxt()
7697 if (IN_CONGRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7698 tp->t_flags |= TF_WASCRECOVERY; in rack_timeout_rxt()
7700 tp->t_flags &= ~TF_WASCRECOVERY; in rack_timeout_rxt()
7701 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_timeout_rxt()
7702 (tp->snd_una == tp->snd_max)) { in rack_timeout_rxt()
7706 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7707 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7708 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7709 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7721 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timeout_rxt()
7725 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7728 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && in rack_timeout_rxt()
7729 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { in rack_timeout_rxt()
7741 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7742 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7749 tp->t_rxtshift++; in rack_timeout_rxt()
7752 if (tp->t_rxtshift > V_tcp_retries) { in rack_timeout_rxt()
7755 tp->t_rxtshift = V_tcp_retries; in rack_timeout_rxt()
7758 MPASS(tp->t_softerror >= 0); in rack_timeout_rxt()
7759 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; in rack_timeout_rxt()
7762 if (tp->t_state == TCPS_SYN_SENT) { in rack_timeout_rxt()
7767 tp->snd_cwnd = 1; in rack_timeout_rxt()
7768 } else if (tp->t_rxtshift == 1) { in rack_timeout_rxt()
7775 * End-to-End Network Path Properties" by Allman and Paxson in rack_timeout_rxt()
7778 tp->snd_cwnd_prev = tp->snd_cwnd; in rack_timeout_rxt()
7779 tp->snd_ssthresh_prev = tp->snd_ssthresh; in rack_timeout_rxt()
7780 tp->snd_recover_prev = tp->snd_recover; in rack_timeout_rxt()
7781 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); in rack_timeout_rxt()
7782 tp->t_flags |= TF_PREVVALID; in rack_timeout_rxt()
7783 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) in rack_timeout_rxt()
7784 tp->t_flags &= ~TF_PREVVALID; in rack_timeout_rxt()
7786 if ((tp->t_state == TCPS_SYN_SENT) || in rack_timeout_rxt()
7787 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_timeout_rxt()
7788 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7790 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7792 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, in rack_timeout_rxt()
7793 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7802 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; in rack_timeout_rxt()
7809 ((tp->t_state == TCPS_ESTABLISHED) || in rack_timeout_rxt()
7810 (tp->t_state == TCPS_FIN_WAIT_1))) { in rack_timeout_rxt()
7813 * 1448 -> 1188 -> 524) should be given 2 chances to recover in rack_timeout_rxt()
7814 * before further clamping down. 'tp->t_rxtshift % 2 == 0' in rack_timeout_rxt()
7817 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == in rack_timeout_rxt()
7819 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && in rack_timeout_rxt()
7820 tp->t_rxtshift % 2 == 0)) { in rack_timeout_rxt()
7822 * Enter Path MTU Black-hole Detection mechanism: - in rack_timeout_rxt()
7823 * Disable Path MTU Discovery (IP "DF" bit). - in rack_timeout_rxt()
7827 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { in rack_timeout_rxt()
7829 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7831 tp->t_pmtud_saved_maxseg = tp->t_maxseg; in rack_timeout_rxt()
7840 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { in rack_timeout_rxt()
7842 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; in rack_timeout_rxt()
7846 tp->t_maxseg = V_tcp_v6mssdflt; in rack_timeout_rxt()
7851 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7859 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { in rack_timeout_rxt()
7861 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; in rack_timeout_rxt()
7865 tp->t_maxseg = V_tcp_mssdflt; in rack_timeout_rxt()
7870 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7883 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && in rack_timeout_rxt()
7884 (tp->t_rxtshift >= 6)) { in rack_timeout_rxt()
7885 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7886 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7887 tp->t_maxseg = tp->t_pmtud_saved_maxseg; in rack_timeout_rxt()
7888 if (tp->t_maxseg < V_tcp_mssdflt) { in rack_timeout_rxt()
7894 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7896 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7904 * our third SYN to work-around some broken terminal servers in rack_timeout_rxt()
7907 * unknown-to-them TCP options. in rack_timeout_rxt()
7909 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && in rack_timeout_rxt()
7910 (tp->t_rxtshift == 3)) in rack_timeout_rxt()
7911 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); in rack_timeout_rxt()
7918 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in rack_timeout_rxt()
7920 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_timeout_rxt()
7925 tp->t_rttvar += tp->t_srtt; in rack_timeout_rxt()
7926 tp->t_srtt = 0; in rack_timeout_rxt()
7928 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7929 tp->snd_recover = tp->snd_max; in rack_timeout_rxt()
7930 tp->t_flags |= TF_ACKNOW; in rack_timeout_rxt()
7931 tp->t_rtttime = 0; in rack_timeout_rxt()
7932 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); in rack_timeout_rxt()
7941 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7943 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_timers()
7944 (tp->t_flags & TF_GPUTINPROG)) { in rack_process_timers()
7953 bytes = tp->gput_ack - tp->gput_seq; in rack_process_timers()
7954 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_process_timers()
7955 bytes += tp->gput_seq - tp->snd_una; in rack_process_timers()
7956 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_process_timers()
7962 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7963 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7964 tp->gput_seq, in rack_process_timers()
7966 tp->t_flags &= ~TF_GPUTINPROG; in rack_process_timers()
7972 if (tp->t_state == TCPS_LISTEN) { in rack_process_timers()
7974 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
7979 rack->rc_on_min_to) { in rack_process_timers()
7982 * are on a min-timeout (which means rrr_conf = 3) in rack_process_timers()
7987 * If its on a normal rack timer (non-min) then in rack_process_timers()
7992 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
7995 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
7996 ret = -1; in rack_process_timers()
8007 ret = -2; in rack_process_timers()
8014 * no-sack wakeup on since we no longer have a PKT_OUTPUT in rack_process_timers()
8017 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
8018 ret = -3; in rack_process_timers()
8019 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
8025 rack->rc_tmr_stopped = 0; in rack_process_timers()
8026 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
8030 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8031 rack->r_fast_output = 0; in rack_process_timers()
8034 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8037 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8038 rack->r_fast_output = 0; in rack_process_timers()
8056 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
8058 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
8059 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
8060 ((tp->snd_max - tp->snd_una) == 0))) { in rack_timer_cancel()
8061 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8064 if ((tp->snd_max - tp->snd_una) == 0) in rack_timer_cancel()
8065 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
8068 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
8069 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
8070 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
8071 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
8077 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8080 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
8091 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
8092 rack->t_timers_stopped = 1; in rack_stopall()
8107 rack->rc_in_persist = 1; in rack_stop_all_timers()
8109 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
8110 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
8120 rsm->r_rtr_cnt++; in rack_update_rsm()
8121 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { in rack_update_rsm()
8122 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; in rack_update_rsm()
8123 rsm->r_flags |= RACK_OVERMAX; in rack_update_rsm()
8125 rsm->r_act_rxt_cnt++; in rack_update_rsm()
8128 rsm->r_dupack = 0; in rack_update_rsm()
8129 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { in rack_update_rsm()
8130 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8131 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8133 if (rsm->r_flags & RACK_WAS_LOST) { in rack_update_rsm()
8139 rsm->r_flags &= ~RACK_WAS_LOST; in rack_update_rsm()
8140 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_update_rsm()
8142 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_update_rsm()
8143 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_update_rsm()
8145 rack->r_ctl.rc_considered_lost = 0; in rack_update_rsm()
8147 idx = rsm->r_rtr_cnt - 1; in rack_update_rsm()
8148 rsm->r_tim_lastsent[idx] = ts; in rack_update_rsm()
8151 * in snduna <->snd_max. in rack_update_rsm()
8153 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8154 rack->r_ctl.rc_sacked); in rack_update_rsm()
8155 if (rsm->r_flags & RACK_ACKED) { in rack_update_rsm()
8157 rsm->r_flags &= ~RACK_ACKED; in rack_update_rsm()
8158 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8160 if (rsm->r_in_tmap) { in rack_update_rsm()
8161 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8162 rsm->r_in_tmap = 0; in rack_update_rsm()
8166 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8167 rsm->r_in_tmap = 1; in rack_update_rsm()
8168 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); in rack_update_rsm()
8170 if (rsm->r_flags & RACK_MUST_RXT) { in rack_update_rsm()
8171 if (rack->r_must_retran) in rack_update_rsm()
8172 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8173 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8178 rack->r_must_retran = 0; in rack_update_rsm()
8179 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8181 rsm->r_flags &= ~RACK_MUST_RXT; in rack_update_rsm()
8184 rsm->r_flags &= ~RACK_RWND_COLLAPSED; in rack_update_rsm()
8185 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_update_rsm()
8187 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_update_rsm()
8188 rsm->r_flags |= RACK_WAS_SACKPASS; in rack_update_rsm()
8197 * We (re-)transmitted starting at rsm->r_start for some length in rack_update_entry()
8206 c_end = rsm->r_start + len; in rack_update_entry()
8207 if (SEQ_GEQ(c_end, rsm->r_end)) { in rack_update_entry()
8213 if (c_end == rsm->r_end) { in rack_update_entry()
8220 act_len = rsm->r_end - rsm->r_start; in rack_update_entry()
8221 *lenp = (len - act_len); in rack_update_entry()
8222 return (rsm->r_end); in rack_update_entry()
8246 nrsm->r_dupack = 0; in rack_update_entry()
8249 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8251 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8256 if (rsm->r_in_tmap) { in rack_update_entry()
8257 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8258 nrsm->r_in_tmap = 1; in rack_update_entry()
8260 rsm->r_flags &= (~RACK_HAS_FIN); in rack_update_entry()
8298 * -- i.e. return if err != 0 or should we pretend we sent it? -- in rack_log_output()
8304 * We don't log errors -- we could but snd_max does not in rack_log_output()
8316 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8317 snd_una = tp->snd_una; in rack_log_output()
8318 snd_max = tp->snd_max; in rack_log_output()
8326 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) in rack_log_output()
8332 /* Are sending an old segment to induce an ack (keep-alive)? */ in rack_log_output()
8342 len = end - seq_out; in rack_log_output()
8350 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_log_output()
8351 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8361 * Hmm out of memory and the tcb got destroyed while in rack_log_output()
8367 rsm->r_flags = RACK_HAS_FIN|add_flag; in rack_log_output()
8369 rsm->r_flags = add_flag; in rack_log_output()
8372 rsm->r_hw_tls = 1; in rack_log_output()
8373 rsm->r_tim_lastsent[0] = cts; in rack_log_output()
8374 rsm->r_rtr_cnt = 1; in rack_log_output()
8375 rsm->r_act_rxt_cnt = 0; in rack_log_output()
8376 rsm->r_rtr_bytes = 0; in rack_log_output()
8379 rsm->r_flags |= RACK_HAS_SYN; in rack_log_output()
8381 rsm->r_start = seq_out; in rack_log_output()
8382 rsm->r_end = rsm->r_start + len; in rack_log_output()
8384 rsm->r_dupack = 0; in rack_log_output()
8390 rsm->m = s_mb; in rack_log_output()
8391 rsm->soff = s_moff; in rack_log_output()
8394 * reflected in in snduna <->snd_max in rack_log_output()
8396 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8397 rack->r_ctl.rc_sacked) + in rack_log_output()
8398 (rsm->r_end - rsm->r_start)); in rack_log_output()
8399 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8400 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8401 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8403 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ in rack_log_output()
8404 if (rsm->m) { in rack_log_output()
8405 if (rsm->m->m_len <= rsm->soff) { in rack_log_output()
8411 * within rsm->m. But if the sbsndptr was in rack_log_output()
8417 lm = rsm->m; in rack_log_output()
8418 while (lm->m_len <= rsm->soff) { in rack_log_output()
8419 rsm->soff -= lm->m_len; in rack_log_output()
8420 lm = lm->m_next; in rack_log_output()
8421 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", in rack_log_output()
8422 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8424 rsm->m = lm; in rack_log_output()
8426 rsm->orig_m_len = rsm->m->m_len; in rack_log_output()
8427 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_log_output()
8429 rsm->orig_m_len = 0; in rack_log_output()
8430 rsm->orig_t_space = 0; in rack_log_output()
8432 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); in rack_log_output()
8437 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8439 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8444 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8445 rsm->r_in_tmap = 1; in rack_log_output()
8446 if (rsm->r_flags & RACK_IS_PCM) { in rack_log_output()
8447 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8448 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8450 if (rack->pcm_in_progress == 0) in rack_log_output()
8451 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8459 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_log_output()
8460 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8463 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8465 prsm->r_one_out_nr = 1; in rack_log_output()
8473 if (hintrsm && (hintrsm->r_start == seq_out)) { in rack_log_output()
8480 if ((rsm) && (rsm->r_start == seq_out)) { in rack_log_output()
8490 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8492 if (rsm->r_start == seq_out) { in rack_log_output()
8500 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { in rack_log_output()
8518 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8520 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8525 if (rsm->r_in_tmap) { in rack_log_output()
8526 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8527 nrsm->r_in_tmap = 1; in rack_log_output()
8529 rsm->r_flags &= (~RACK_HAS_FIN); in rack_log_output()
8541 if (seq_out == tp->snd_max) { in rack_log_output()
8543 } else if (SEQ_LT(seq_out, tp->snd_max)) { in rack_log_output()
8545 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", in rack_log_output()
8546 seq_out, len, tp->snd_una, tp->snd_max); in rack_log_output()
8548 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8550 rsm, rsm->r_start, rsm->r_end); in rack_log_output()
8559 * Hmm beyond sndmax? (only if we are using the new rtt-pack in rack_log_output()
8563 seq_out, len, tp->snd_max, tp); in rack_log_output()
8577 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8578 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8579 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8581 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8582 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8583 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8585 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8586 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8587 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8588 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8589 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8593 (rsm->r_just_ret) || in tcp_rack_xmit_timer()
8594 (rsm->r_one_out_nr && in tcp_rack_xmit_timer()
8595 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8602 * the r_one_out_nr. If it was a CUM-ACK and in tcp_rack_xmit_timer()
8609 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8610 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8611 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8616 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8617 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8618 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8627 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8628 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8629 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8632 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8633 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8634 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8635 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8639 * Collect new round-trip time estimate
8648 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8651 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8653 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8654 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8656 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8657 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8659 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8660 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8663 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8669 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8674 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8675 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8676 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8678 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8679 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8681 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8686 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8687 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8689 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8695 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8696 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8697 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8698 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8700 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8704 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8705 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8710 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8712 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8713 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8714 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8715 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8718 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8719 if (tp->t_srtt != 0) { in tcp_rack_xmit_timer_commit()
8728 delta = tp->t_srtt - rtt; in tcp_rack_xmit_timer_commit()
8730 tp->t_srtt -= (tp->t_srtt >> 3); in tcp_rack_xmit_timer_commit()
8732 tp->t_srtt += (rtt >> 3); in tcp_rack_xmit_timer_commit()
8733 if (tp->t_srtt <= 0) in tcp_rack_xmit_timer_commit()
8734 tp->t_srtt = 1; in tcp_rack_xmit_timer_commit()
8737 delta = -delta; in tcp_rack_xmit_timer_commit()
8739 tp->t_rttvar -= (tp->t_rttvar >> 3); in tcp_rack_xmit_timer_commit()
8741 tp->t_rttvar += (delta >> 3); in tcp_rack_xmit_timer_commit()
8742 if (tp->t_rttvar <= 0) in tcp_rack_xmit_timer_commit()
8743 tp->t_rttvar = 1; in tcp_rack_xmit_timer_commit()
8746 * No rtt measurement yet - use the unsmoothed rtt. Set the in tcp_rack_xmit_timer_commit()
8750 tp->t_srtt = rtt; in tcp_rack_xmit_timer_commit()
8751 tp->t_rttvar = rtt >> 1; in tcp_rack_xmit_timer_commit()
8753 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8755 if (tp->t_rttupdated < UCHAR_MAX) in tcp_rack_xmit_timer_commit()
8756 tp->t_rttupdated++; in tcp_rack_xmit_timer_commit()
8760 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); in tcp_rack_xmit_timer_commit()
8766 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8767 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8773 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8774 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8777 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8779 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8781 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8786 * tick of rounding and 1 extra tick because of +-1/2 tick in tcp_rack_xmit_timer_commit()
8792 tp->t_rxtshift = 0; in tcp_rack_xmit_timer_commit()
8793 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in tcp_rack_xmit_timer_commit()
8794 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8796 tp->t_softerror = 0; in tcp_rack_xmit_timer_commit()
8804 * Apply to filter the inbound us-rtt at us_cts. in rack_apply_updated_usrtt()
8808 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8809 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8819 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { in rack_apply_updated_usrtt()
8821 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8822 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8823 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8826 * to the time that we would have entered probe-rtt. in rack_apply_updated_usrtt()
8828 * has entered probe-rtt. Lets go in now too. in rack_apply_updated_usrtt()
8834 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8835 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8836 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8840 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8853 if ((rsm->r_flags & RACK_ACKED) || in rack_update_rtt()
8854 (rsm->r_flags & RACK_WAS_ACKED)) in rack_update_rtt()
8857 if (rsm->r_no_rtt_allowed) { in rack_update_rtt()
8862 if (SEQ_GT(th_ack, rsm->r_end)) { in rack_update_rtt()
8863 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8866 len_acked = th_ack - rsm->r_start; in rack_update_rtt()
8870 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8873 if (rsm->r_rtr_cnt == 1) { in rack_update_rtt()
8875 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8878 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8879 tp->t_rttlow = t; in rack_update_rtt()
8880 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8881 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8882 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8883 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8884 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8887 …if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)… in rack_update_rtt()
8888 …us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr… in rack_update_rtt()
8890 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_update_rtt()
8893 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8895 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8897 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8899 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8900 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8913 * When we are not app-limited then we see if in rack_update_rtt()
8930 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_update_rtt()
8935 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8940 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8942 calc_conf, rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8944 if ((rsm->r_flags & RACK_TLP) && in rack_update_rtt()
8945 (!IN_FASTRECOVERY(tp->t_flags))) { in rack_update_rtt()
8947 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8951 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8952 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8953 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8955 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8956 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8957 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8958 rack->rc_rack_rtt = t; in rack_update_rtt()
8967 tp->t_rxtshift = 0; in rack_update_rtt()
8968 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_update_rtt()
8969 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8970 tp->t_softerror = 0; in rack_update_rtt()
8971 if (to && (to->to_flags & TOF_TS) && in rack_update_rtt()
8973 (to->to_tsecr) && in rack_update_rtt()
8974 ((rsm->r_flags & RACK_OVERMAX) == 0)) { in rack_update_rtt()
8979 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
8980 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { in rack_update_rtt()
8981 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8984 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8992 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
8993 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8995 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8996 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8998 if ((i + 1) < rsm->r_rtr_cnt) { in rack_update_rtt()
9010 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
9011 tp->t_rttlow = t; in rack_update_rtt()
9012 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9013 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9014 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9015 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9018 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9019 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9020 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
9022 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
9023 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9024 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9025 rack->rc_rack_rtt = t; in rack_update_rtt()
9027 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
9029 rsm->r_rtr_cnt); in rack_update_rtt()
9034 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
9035 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
9036 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
9044 * time-stamp since its not there or the time the peer last in rack_update_rtt()
9045 * received a segment that moved forward its cum-ack point. in rack_update_rtt()
9048 i = rsm->r_rtr_cnt - 1; in rack_update_rtt()
9049 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9052 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9057 * 6.2 Step 2 point 2 in the rack-draft so we in rack_update_rtt()
9063 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
9068 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
9069 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9070 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9071 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9072 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9075 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9076 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9077 (uint32_t)rsm->r_tim_lastsent[i]))) { in rack_update_rtt()
9079 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9080 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9081 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9082 rack->rc_rack_rtt = t; in rack_update_rtt()
9104 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
9110 if (nrsm->r_flags & RACK_ACKED) { in rack_log_sack_passed()
9118 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { in rack_log_sack_passed()
9126 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_log_sack_passed()
9129 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_log_sack_passed()
9132 nrsm->r_flags |= RACK_WAS_LOST; in rack_log_sack_passed()
9133 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
9136 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_log_sack_passed()
9144 nrsm->r_flags |= RACK_SACK_PASSED; in rack_log_sack_passed()
9145 nrsm->r_flags &= ~RACK_WAS_SACKPASS; in rack_log_sack_passed()
9159 if ((tp->t_flags & TF_GPUTINPROG) && in rack_need_set_test()
9160 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9170 if (rsm->r_rtr_cnt > 1) { in rack_need_set_test()
9183 seq = tp->gput_seq; in rack_need_set_test()
9184 ts = tp->gput_ts; in rack_need_set_test()
9185 rack->app_limited_needs_set = 0; in rack_need_set_test()
9186 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9189 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { in rack_need_set_test()
9197 tp->gput_seq = rsm->r_start; in rack_need_set_test()
9200 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9212 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9218 * way up to where this ack cum-ack moves in rack_need_set_test()
9221 if (SEQ_GT(th_ack, rsm->r_end)) in rack_need_set_test()
9222 tp->gput_seq = th_ack; in rack_need_set_test()
9224 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9226 if (SEQ_LT(tp->gput_seq, tp->snd_max)) in rack_need_set_test()
9227 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9241 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9243 /* If we hit here we have to have *not* sent tp->gput_seq */ in rack_need_set_test()
9244 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9246 rack->app_limited_needs_set = 1; in rack_need_set_test()
9248 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { in rack_need_set_test()
9250 * We moved beyond this guy's range, re-calculate in rack_need_set_test()
9253 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9254 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9256 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9263 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9264 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9265 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9266 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9267 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9268 seq, tp->gput_seq, in rack_need_set_test()
9269 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9270 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9272 if (rack->rc_gp_filled && in rack_need_set_test()
9273 ((tp->gput_ack - tp->gput_seq) < in rack_need_set_test()
9279 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { in rack_need_set_test()
9286 tp->t_flags &= ~TF_GPUTINPROG; in rack_need_set_test()
9287 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9289 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9290 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9296 tp->gput_ack = tp->gput_seq + ideal_amount; in rack_need_set_test()
9300 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9307 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9311 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9315 /* It has to be a sub-part of the original TLP recorded */ in is_rsm_inside_declared_tlp_block()
9331 start = sack->start; in rack_proc_sack_blk()
9332 end = sack->end; in rack_proc_sack_blk()
9337 (SEQ_LT(end, rsm->r_start)) || in rack_proc_sack_blk()
9338 (SEQ_GEQ(start, rsm->r_end)) || in rack_proc_sack_blk()
9339 (SEQ_LT(start, rsm->r_start))) { in rack_proc_sack_blk()
9345 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9352 if (rsm->r_start != start) { in rack_proc_sack_blk()
9353 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9358 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9359 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9364 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9372 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9376 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9377 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9378 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9379 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9381 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9382 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9383 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9384 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9387 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9388 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9389 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9390 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9391 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9398 * rsm |--------------| in rack_proc_sack_blk()
9399 * sackblk |-------> in rack_proc_sack_blk()
9401 * rsm |---| in rack_proc_sack_blk()
9403 * nrsm |----------| in rack_proc_sack_blk()
9415 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9417 (rsm->bindex == next->bindex) && in rack_proc_sack_blk()
9418 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9419 ((next->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9420 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9421 ((next->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9422 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9423 (next->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9428 (next->r_flags & RACK_ACKED) && in rack_proc_sack_blk()
9429 SEQ_GEQ(end, next->r_start)) { in rack_proc_sack_blk()
9436 * rsm |------------| (not-acked) in rack_proc_sack_blk()
9437 * next |-----------| (acked) in rack_proc_sack_blk()
9438 * sackblk |--------> in rack_proc_sack_blk()
9440 * rsm |------| (not-acked) in rack_proc_sack_blk()
9441 * next |-----------------| (acked) in rack_proc_sack_blk()
9442 * nrsm |-----| in rack_proc_sack_blk()
9450 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9451 next->r_start = start; in rack_proc_sack_blk()
9452 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9453 next->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9454 /* Now we must adjust back where next->m is */ in rack_proc_sack_blk()
9474 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9475 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) in rack_proc_sack_blk()
9476 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9480 if (next->r_ack_arrival < in rack_proc_sack_blk()
9481 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9482 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9487 rsm->r_dupack = 0; in rack_proc_sack_blk()
9488 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9491 nrsm->r_start = start; in rack_proc_sack_blk()
9494 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9495 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9496 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9497 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9498 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9501 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9502 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9504 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9505 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9507 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9509 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9510 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9511 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9512 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9516 * one left un-acked) to the next one in rack_proc_sack_blk()
9519 * sack-passed on rsm (The one passed in in rack_proc_sack_blk()
9524 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9530 if (nrsm && nrsm->r_in_tmap) in rack_proc_sack_blk()
9534 if (SEQ_LT(end, next->r_end) || in rack_proc_sack_blk()
9535 (end == next->r_end)) { in rack_proc_sack_blk()
9542 start = next->r_end; in rack_proc_sack_blk()
9543 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9551 * rsm |--------| in rack_proc_sack_blk()
9552 * sackblk |-----> in rack_proc_sack_blk()
9557 * rsm |----| in rack_proc_sack_blk()
9558 * sackblk |-----> in rack_proc_sack_blk()
9559 * nrsm |---| in rack_proc_sack_blk()
9574 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9576 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9578 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9583 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9584 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9585 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9588 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9595 if (end == rsm->r_end) { in rack_proc_sack_blk()
9597 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9599 } else if (SEQ_LT(end, rsm->r_end)) { in rack_proc_sack_blk()
9601 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9609 start = rsm->r_end; in rack_proc_sack_blk()
9610 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9616 if (SEQ_GEQ(end, rsm->r_end)) { in rack_proc_sack_blk()
9620 * rsm --- |-----| in rack_proc_sack_blk()
9621 * end |-----| in rack_proc_sack_blk()
9623 * end |---------| in rack_proc_sack_blk()
9625 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9629 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9630 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9635 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9642 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9646 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9647 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9648 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9649 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9651 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9652 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9653 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9654 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9657 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9658 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9659 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9660 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9661 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9665 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9667 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9670 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9671 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9672 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9674 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9675 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9677 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9679 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9680 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9683 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9684 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9685 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9686 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9687 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9689 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9690 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9691 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9692 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9693 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9694 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9695 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9696 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9702 if (end == rsm->r_end) { in rack_proc_sack_blk()
9703 /* This block only - done, setup for next */ in rack_proc_sack_blk()
9710 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9711 start = rsm->r_end; in rack_proc_sack_blk()
9720 * rsm --- |-----| in rack_proc_sack_blk()
9721 * end |--| in rack_proc_sack_blk()
9723 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9727 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9728 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9733 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9740 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9744 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9745 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9746 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9747 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9749 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9750 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9751 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9752 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9755 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9756 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9757 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9758 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9759 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9767 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9769 (rsm->bindex == prev->bindex) && in rack_proc_sack_blk()
9770 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9771 ((prev->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9772 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9773 ((prev->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9774 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9775 (prev->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9780 (prev->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9783 * in place and span from (rsm->r_start = end) to rsm->r_end. in rack_proc_sack_blk()
9785 * to prev->r_end <- end. in rack_proc_sack_blk()
9787 * prev |--------| (acked) in rack_proc_sack_blk()
9788 * rsm |-------| (non-acked) in rack_proc_sack_blk()
9789 * sackblk |-| in rack_proc_sack_blk()
9791 * prev |----------| (acked) in rack_proc_sack_blk()
9792 * rsm |-----| (non-acked) in rack_proc_sack_blk()
9793 * nrsm |-| (temporary) in rack_proc_sack_blk()
9800 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9801 rsm->r_start = end; in rack_proc_sack_blk()
9802 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9803 prev->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9808 nrsm->r_end = end; in rack_proc_sack_blk()
9809 rsm->r_dupack = 0; in rack_proc_sack_blk()
9828 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9829 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { in rack_proc_sack_blk()
9830 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9836 if(prev->r_ack_arrival < in rack_proc_sack_blk()
9837 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9838 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9853 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9854 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9855 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9856 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9857 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9860 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9861 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9863 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9864 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9866 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9868 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9869 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9870 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9871 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9887 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9888 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9893 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9900 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9904 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9905 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9906 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9907 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9909 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9910 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9911 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9912 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9915 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9916 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9917 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9918 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9919 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9924 * nrsm->r_start = end; in rack_proc_sack_blk()
9925 * nrsm->r_end = rsm->r_end; in rack_proc_sack_blk()
9926 * which is un-acked. in rack_proc_sack_blk()
9928 * rsm->r_end = nrsm->r_start; in rack_proc_sack_blk()
9929 * i.e. the remaining un-acked in rack_proc_sack_blk()
9934 * rsm |----------| (not acked) in rack_proc_sack_blk()
9935 * sackblk |---| in rack_proc_sack_blk()
9937 * rsm |---| (acked) in rack_proc_sack_blk()
9938 * nrsm |------| (not acked) in rack_proc_sack_blk()
9942 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9943 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9945 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9947 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9952 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9953 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9954 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9956 nrsm->r_dupack = 0; in rack_proc_sack_blk()
9959 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9960 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9963 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9964 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9965 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9967 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9968 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9970 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9972 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9974 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9977 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9978 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9979 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9980 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9981 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9983 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9984 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9985 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9986 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9987 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9989 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9990 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9991 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
10002 ((rsm->r_flags & RACK_TLP) == 0) && in rack_proc_sack_blk()
10003 (rsm->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
10009 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10011 if (next->r_flags & RACK_TLP) in rack_proc_sack_blk()
10014 if ((next->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10015 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10018 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10019 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10022 if (rsm->bindex != next->bindex) in rack_proc_sack_blk()
10024 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10026 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10028 if (next->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10030 if (next->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10032 if (next->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10035 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10040 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10042 if (prev->r_flags & RACK_TLP) in rack_proc_sack_blk()
10045 if ((prev->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10046 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10049 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10050 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10053 if (rsm->bindex != prev->bindex) in rack_proc_sack_blk()
10055 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10057 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10059 if (prev->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10061 if (prev->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10063 if (prev->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10066 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10077 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
10078 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
10088 while (rsm && (rsm->r_flags & RACK_ACKED)) { in rack_peer_reneges()
10090 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
10092 if (rsm->r_in_tmap) { in rack_peer_reneges()
10094 rack, rsm, rsm->r_flags); in rack_peer_reneges()
10097 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); in rack_peer_reneges()
10100 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
10103 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
10106 tmap->r_in_tmap = 1; in rack_peer_reneges()
10107 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
10113 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10158 * The cum-ack is being advanced upon the sendmap. in rack_rsm_sender_update()
10164 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_rsm_sender_update()
10171 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { in rack_rsm_sender_update()
10172 tp->gput_ack = rsm->r_end; in rack_rsm_sender_update()
10181 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10199 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= in rack_rsm_sender_update()
10200 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10203 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10204 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10219 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10224 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10226 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_process_to_cumack()
10228 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10230 rack->r_wanted_output = 1; in rack_process_to_cumack()
10231 if (SEQ_GT(th_ack, tp->snd_una)) in rack_process_to_cumack()
10232 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10235 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10236 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10237 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10240 * tlp retransmit sequence is ahead of the cum-ack. in rack_process_to_cumack()
10241 * This can only happen when the cum-ack moves all in rack_process_to_cumack()
10248 * the cum-ack is by the TLP before checking which is in rack_process_to_cumack()
10252 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10253 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10254 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10255 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10256 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10257 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10258 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10262 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10265 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10266 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10267 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10269 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10270 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10271 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10272 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10273 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10274 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10275 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10276 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10280 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10283 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10285 if ((th_ack - 1) == tp->iss) { in rack_process_to_cumack()
10294 if (tp->t_flags & TF_SENTFIN) { in rack_process_to_cumack()
10301 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10302 tp->snd_una, tp->snd_max); in rack_process_to_cumack()
10306 if (SEQ_LT(th_ack, rsm->r_start)) { in rack_process_to_cumack()
10310 rsm->r_start, in rack_process_to_cumack()
10311 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10318 if ((rsm->r_flags & RACK_TLP) && in rack_process_to_cumack()
10319 (rsm->r_rtr_cnt > 1)) { in rack_process_to_cumack()
10329 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10336 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10340 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10341 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10342 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10343 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10345 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10346 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10347 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10348 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10351 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10352 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10353 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10354 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10355 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10359 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10360 if (SEQ_GEQ(th_ack, rsm->r_end)) { in rack_process_to_cumack()
10365 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10371 rsm->r_flags &= ~RACK_WAS_LOST; in rack_process_to_cumack()
10372 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_process_to_cumack()
10374 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_process_to_cumack()
10375 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_process_to_cumack()
10377 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10379 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10380 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10381 rsm->r_rtr_bytes = 0; in rack_process_to_cumack()
10387 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10388 if (rsm->r_in_tmap) { in rack_process_to_cumack()
10389 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10390 rsm->r_in_tmap = 0; in rack_process_to_cumack()
10393 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10395 * It was acked on the scoreboard -- remove in rack_process_to_cumack()
10398 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10400 } else if (rsm->r_flags & RACK_SACK_PASSED) { in rack_process_to_cumack()
10406 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_process_to_cumack()
10407 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10408 rsm->r_flags |= RACK_ACKED; in rack_process_to_cumack()
10409 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10410 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10411 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10412 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10417 rack->r_might_revert = 1; in rack_process_to_cumack()
10419 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10421 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10423 if ((rsm->r_flags & RACK_TO_REXT) && in rack_process_to_cumack()
10424 (tp->t_flags & TF_RCVD_TSTMP) && in rack_process_to_cumack()
10425 (to->to_flags & TOF_TS) && in rack_process_to_cumack()
10426 (to->to_tsecr != 0) && in rack_process_to_cumack()
10427 (tp->t_flags & TF_PREVVALID)) { in rack_process_to_cumack()
10433 tp->t_flags &= ~TF_PREVVALID; in rack_process_to_cumack()
10434 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { in rack_process_to_cumack()
10439 left = th_ack - rsm->r_end; in rack_process_to_cumack()
10440 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10448 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10449 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { in rack_process_to_cumack()
10457 * given us snd_una up to (rsm->r_end). in rack_process_to_cumack()
10461 * our rsm->r_start in case we get an old ack in rack_process_to_cumack()
10468 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10470 * It was acked on the scoreboard -- remove it from in rack_process_to_cumack()
10471 * total for the part being cum-acked. in rack_process_to_cumack()
10473 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10475 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10478 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10485 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), in rack_process_to_cumack()
10487 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) in rack_process_to_cumack()
10488 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; in rack_process_to_cumack()
10490 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10496 rsm->r_dupack = 0; in rack_process_to_cumack()
10498 if (rsm->r_rtr_bytes) { in rack_process_to_cumack()
10505 ack_am = (th_ack - rsm->r_start); in rack_process_to_cumack()
10506 if (ack_am >= rsm->r_rtr_bytes) { in rack_process_to_cumack()
10507 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10508 rsm->r_rtr_bytes -= ack_am; in rack_process_to_cumack()
10518 if (rsm->m && in rack_process_to_cumack()
10519 ((rsm->orig_m_len != rsm->m->m_len) || in rack_process_to_cumack()
10520 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_process_to_cumack()
10524 rsm->soff += (th_ack - rsm->r_start); in rack_process_to_cumack()
10527 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10533 m = rsm->m; in rack_process_to_cumack()
10534 soff = rsm->soff; in rack_process_to_cumack()
10536 while (soff >= m->m_len) { in rack_process_to_cumack()
10537 soff -= m->m_len; in rack_process_to_cumack()
10538 KASSERT((m->m_next != NULL), in rack_process_to_cumack()
10540 rsm, rsm->soff, soff, m)); in rack_process_to_cumack()
10541 m = m->m_next; in rack_process_to_cumack()
10544 * This is a fall-back that prevents a panic. In reality in rack_process_to_cumack()
10547 * but tqhash_trim did update rsm->r_start so the offset calcuation in rack_process_to_cumack()
10552 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10553 (rsm->r_start - tp->snd_una), in rack_process_to_cumack()
10561 rsm->m = m; in rack_process_to_cumack()
10562 rsm->soff = soff; in rack_process_to_cumack()
10563 rsm->orig_m_len = rsm->m->m_len; in rack_process_to_cumack()
10564 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_process_to_cumack()
10567 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10568 SEQ_GEQ(th_ack, tp->gput_seq)) in rack_process_to_cumack()
10569 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10578 if (rack->r_might_revert) { in rack_handle_might_revert()
10589 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10590 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_handle_might_revert()
10602 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10603 orig_cwnd = tp->snd_cwnd; in rack_handle_might_revert()
10604 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10605 tp->snd_recover = tp->snd_una; in rack_handle_might_revert()
10607 if (IN_RECOVERY(tp->t_flags)) { in rack_handle_might_revert()
10609 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10612 * and then re-entered recovery (more sack's arrived) in rack_handle_might_revert()
10614 * the first recovery. We want to be able to slow-start in rack_handle_might_revert()
10618 * so we get no slow-start after our RTO. in rack_handle_might_revert()
10620 rack->rto_from_rec = 0; in rack_handle_might_revert()
10621 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10622 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10626 rack->r_might_revert = 0; in rack_handle_might_revert()
10639 am = end - start; in rack_note_dsack()
10642 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10643 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10644 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10655 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10656 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10657 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10668 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10669 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10670 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10671 rack->r_ctl.num_dsack++; in rack_note_dsack()
10672 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10680 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10681 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10682 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10683 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10688 rack->r_might_revert = 1; in rack_note_dsack()
10689 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10690 rack->r_might_revert = 0; in rack_note_dsack()
10691 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10692 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10700 return (((tp->snd_max - snd_una) - in do_rack_compute_pipe()
10701 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10708 (struct tcp_rack *)tp->t_fb_ptr, in rack_compute_pipe()
10709 tp->snd_una)); in rack_compute_pipe()
10718 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10720 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10724 * Note we use tp->snd_una here and not th_ack because in rack_update_prr()
10727 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10731 if (SEQ_GT(tp->snd_una, th_ack)) { in rack_update_prr()
10732 snd_una = tp->snd_una; in rack_update_prr()
10737 if (pipe > tp->snd_ssthresh) { in rack_update_prr()
10740 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10741 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10742 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10744 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10749 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10750 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10753 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10758 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10759 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10765 if (tp->snd_ssthresh > pipe) { in rack_update_prr()
10766 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10769 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10796 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10798 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10800 th_ack = th->th_ack; in rack_log_ack()
10801 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10806 * credit for larger cum-ack moves). in rack_log_ack()
10810 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10813 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_log_ack()
10815 tp->t_acktime = ticks; in rack_log_ack()
10817 if (rsm && SEQ_GT(th_ack, rsm->r_start)) in rack_log_ack()
10818 changed = th_ack - rsm->r_start; in rack_log_ack()
10821 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10823 if ((to->to_flags & TOF_SACK) == 0) { in rack_log_ack()
10827 * For cases where we struck a dup-ack in rack_log_ack()
10832 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10837 if (SEQ_GT(th_ack, tp->snd_una)) in rack_log_ack()
10840 ack_point = tp->snd_una; in rack_log_ack()
10841 for (i = 0; i < to->to_nsacks; i++) { in rack_log_ack()
10842 bcopy((to->to_sacks + i * TCPOLEN_SACK), in rack_log_ack()
10848 SEQ_LT(sack.start, tp->snd_max) && in rack_log_ack()
10850 SEQ_LEQ(sack.end, tp->snd_max)) { in rack_log_ack()
10861 * Its a D-SACK block. in rack_log_ack()
10866 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10868 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10870 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10878 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10879 num_sack_blks, th->th_ack); in rack_log_ack()
10880 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10927 * Now collapse out the dup-sack and in rack_log_ack()
10935 num_sack_blks--; in rack_log_ack()
10947 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10949 SEQ_GT(sack_blocks[0].end, rsm->r_start) && in rack_log_ack()
10950 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { in rack_log_ack()
10957 rack->r_wanted_output = 1; in rack_log_ack()
10965 * i.e the sack-filter pushes down in rack_log_ack()
10971 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); in rack_log_ack()
10983 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
10987 rack->r_wanted_output = 1; in rack_log_ack()
10995 * you have more than one sack-blk, this in rack_log_ack()
10997 * and the sack-filter is still working, or in rack_log_ack()
11006 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
11010 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_log_ack()
11012 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { in rack_log_ack()
11020 if (rack->rack_no_prr == 0) { in rack_log_ack()
11021 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
11024 rack->r_timer_override = 1; in rack_log_ack()
11025 rack->r_early = 0; in rack_log_ack()
11026 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11027 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11029 (rack->r_rr_config == 3)) { in rack_log_ack()
11034 rack->r_timer_override = 1; in rack_log_ack()
11035 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
11036 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
11038 if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11039 (rack->rack_no_prr == 0) && in rack_log_ack()
11042 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
11043 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
11044 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
11049 rack->r_early = 0; in rack_log_ack()
11050 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11051 rack->r_timer_override = 1; in rack_log_ack()
11061 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
11067 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in rack_strike_dupack()
11068 (rsm->r_flags & RACK_MUST_RXT)) { in rack_strike_dupack()
11074 if (rsm && (rsm->r_dupack < 0xff)) { in rack_strike_dupack()
11075 rsm->r_dupack++; in rack_strike_dupack()
11076 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { in rack_strike_dupack()
11082 * we will get a return of the rsm. For a non-sack in rack_strike_dupack()
11087 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
11088 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
11089 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
11090 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
11093 rack->r_wanted_output = 1; in rack_strike_dupack()
11094 rack->r_timer_override = 1; in rack_strike_dupack()
11118 * gauge the inter-ack times). If that occurs we have a real problem in rack_check_bottom_drag()
11131 if (tp->snd_max == tp->snd_una) { in rack_check_bottom_drag()
11143 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
11145 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11147 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
11148 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
11149 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
11152 * Lets use the long-term b/w we have in rack_check_bottom_drag()
11155 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
11167 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11168 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11169 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
11170 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11171 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11172 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11173 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
11174 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11175 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11176 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11177 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11178 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11180 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11181 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
11182 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
11184 rack->gp_ready = 1; in rack_check_bottom_drag()
11185 if (rack->dgp_on || in rack_check_bottom_drag()
11186 rack->rack_hibeta) in rack_check_bottom_drag()
11188 if (rack->defer_options) in rack_check_bottom_drag()
11195 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11197 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_check_bottom_drag()
11198 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), in rack_check_bottom_drag()
11200 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11201 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11202 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11213 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11214 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11225 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11227 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11248 log.u_bbr.flex2 = cur->start_seq; in rack_log_hybrid()
11249 log.u_bbr.flex3 = cur->end_seq; in rack_log_hybrid()
11250 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid()
11251 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid()
11252 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid()
11253 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid()
11254 log.u_bbr.rttProp = cur->timestamp; in rack_log_hybrid()
11255 log.u_bbr.cur_del_rate = cur->cspr; in rack_log_hybrid()
11256 log.u_bbr.bw_inuse = cur->start; in rack_log_hybrid()
11257 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid()
11258 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11259 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); in rack_log_hybrid()
11260 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11263 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11272 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11274 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11276 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11284 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11286 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11288 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11290 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11292 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11293 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11294 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11295 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11296 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11297 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11298 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11299 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11300 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11315 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11316 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11319 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11321 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11328 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11329 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11330 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11331 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11332 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11334 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11336 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11337 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11339 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11340 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11344 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { in rack_set_dgp_hybrid_mode()
11346 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11347 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11348 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11349 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11351 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11352 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11354 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_set_dgp_hybrid_mode()
11355 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_set_dgp_hybrid_mode()
11356 rc_cur->first_send = cts; in rack_set_dgp_hybrid_mode()
11357 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11358 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11369 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11370 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11371 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11373 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11377 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11378 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11380 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11381 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11382 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11387 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ in rack_set_dgp_hybrid_mode()
11389 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11390 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11392 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11394 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11395 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11396 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11398 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11401 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) in rack_set_dgp_hybrid_mode()
11402 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11404 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11405 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11409 * sendtime not arrival time for catch-up mode. in rack_set_dgp_hybrid_mode()
11411 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; in rack_set_dgp_hybrid_mode()
11413 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && in rack_set_dgp_hybrid_mode()
11414 (rc_cur->cspr > 0)) { in rack_set_dgp_hybrid_mode()
11417 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11422 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { in rack_set_dgp_hybrid_mode()
11428 rc_cur->deadline = cts; in rack_set_dgp_hybrid_mode()
11434 rc_cur->deadline = rc_cur->localtime; in rack_set_dgp_hybrid_mode()
11440 len = rc_cur->end - rc_cur->start; in rack_set_dgp_hybrid_mode()
11441 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { in rack_set_dgp_hybrid_mode()
11446 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); in rack_set_dgp_hybrid_mode()
11456 len /= rc_cur->cspr; in rack_set_dgp_hybrid_mode()
11457 rc_cur->deadline += len; in rack_set_dgp_hybrid_mode()
11459 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11460 rc_cur->deadline = 0; in rack_set_dgp_hybrid_mode()
11462 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11470 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11471 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11472 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11476 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11477 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11487 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11489 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || in rack_chk_req_and_hybrid_on_out()
11490 (SEQ_GEQ(seq, ent->end_seq))) { in rack_chk_req_and_hybrid_on_out()
11493 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11499 if (SEQ_LT(ent->end_seq, (seq + len))) { in rack_chk_req_and_hybrid_on_out()
11510 ent->end_seq = (seq + len); in rack_chk_req_and_hybrid_on_out()
11511 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11515 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_chk_req_and_hybrid_on_out()
11516 ent->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_chk_req_and_hybrid_on_out()
11517 ent->first_send = cts; in rack_chk_req_and_hybrid_on_out()
11518 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11519 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11548 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11549 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), in rack_gain_for_fastoutput()
11550 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11554 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11555 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11557 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11558 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11559 (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11598 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11600 m = sb->sb_mb; in rack_adjust_sendmap_head()
11601 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11607 KASSERT((rsm->m == m), in rack_adjust_sendmap_head()
11608 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", in rack_adjust_sendmap_head()
11610 while (rsm->m && (rsm->m == m)) { in rack_adjust_sendmap_head()
11616 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); in rack_adjust_sendmap_head()
11617 if ((rsm->orig_m_len != m->m_len) || in rack_adjust_sendmap_head()
11618 (rsm->orig_t_space != M_TRAILINGROOM(m))){ in rack_adjust_sendmap_head()
11622 KASSERT((rsm->soff == 0), in rack_adjust_sendmap_head()
11623 ("Rack:%p rsm:%p -- rsm at head but soff not zero", in rack_adjust_sendmap_head()
11627 if ((rsm->soff != soff) || (rsm->m != tm)) { in rack_adjust_sendmap_head()
11636 rsm->m = tm; in rack_adjust_sendmap_head()
11637 rsm->soff = soff; in rack_adjust_sendmap_head()
11639 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11640 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11642 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11643 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11646 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); in rack_adjust_sendmap_head()
11647 if (rsm->m) { in rack_adjust_sendmap_head()
11648 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11649 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11651 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11652 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11655 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11668 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11669 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11674 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11684 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11698 data = ent->end - ent->start; in rack_req_check_for_comp()
11699 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11700 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { in rack_req_check_for_comp()
11701 if (ent->first_send > ent->localtime) in rack_req_check_for_comp()
11702 ftim = ent->first_send; in rack_req_check_for_comp()
11704 ftim = ent->localtime; in rack_req_check_for_comp()
11707 ftim = ent->localtime; in rack_req_check_for_comp()
11709 if (laa > ent->localtime) in rack_req_check_for_comp()
11710 tim = laa - ftim; in rack_req_check_for_comp()
11724 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11725 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11726 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11727 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11728 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11729 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11731 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11732 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11736 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11739 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11740 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11749 * For ret_val if its 0 the TCP is locked, if its non-zero
11750 * its unlocked and probably unsafe to touch the TCB.
11769 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11770 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { in rack_process_ack()
11772 tp->t_flags2 |= TF2_NO_ISS_CHECK; in rack_process_ack()
11778 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { in rack_process_ack()
11780 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11783 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { in rack_process_ack()
11785 seq_min = tp->iss + 1; in rack_process_ack()
11792 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11796 if (SEQ_LT(th->th_ack, seq_min)) { in rack_process_ack()
11803 rack->r_wanted_output = 1; in rack_process_ack()
11807 if (SEQ_GT(th->th_ack, tp->snd_max)) { in rack_process_ack()
11809 rack->r_wanted_output = 1; in rack_process_ack()
11812 if (rack->gp_ready && in rack_process_ack()
11813 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11816 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { in rack_process_ack()
11820 in_rec = IN_FASTRECOVERY(tp->t_flags); in rack_process_ack()
11821 if (rack->rc_in_persist) { in rack_process_ack()
11822 tp->t_rxtshift = 0; in rack_process_ack()
11823 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11824 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11827 if ((th->th_ack == tp->snd_una) && in rack_process_ack()
11828 (tiwin == tp->snd_wnd) && in rack_process_ack()
11830 ((to->to_flags & TOF_SACK) == 0)) { in rack_process_ack()
11831 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11834 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), in rack_process_ack()
11838 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_process_ack()
11844 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { in rack_process_ack()
11845 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11846 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11847 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11855 if (tp->t_flags & TF_NEEDSYN) { in rack_process_ack()
11857 * T/TCP: Connection was half-synchronized, and our SYN has in rack_process_ack()
11859 * to non-starred state, increment snd_una for ACK of SYN, in rack_process_ack()
11862 tp->t_flags &= ~TF_NEEDSYN; in rack_process_ack()
11863 tp->snd_una++; in rack_process_ack()
11865 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_process_ack()
11867 tp->rcv_scale = tp->request_r_scale; in rack_process_ack()
11871 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_ack()
11876 * Any time we move the cum-ack forward clear in rack_process_ack()
11877 * keep-alive tied probe-not-answered. The in rack_process_ack()
11880 rack->probe_not_answered = 0; in rack_process_ack()
11890 if ((tp->t_flags & TF_PREVVALID) && in rack_process_ack()
11891 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_process_ack()
11892 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11893 if (tp->t_rxtshift == 1 && in rack_process_ack()
11894 (int)(ticks - tp->t_badrxtwin) < 0) in rack_process_ack()
11895 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_process_ack()
11899 tp->t_rxtshift = 0; in rack_process_ack()
11900 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11901 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11902 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11903 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11908 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11909 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11911 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11930 * (possibly backed-off) value. in rack_process_ack()
11937 if (IN_RECOVERY(tp->t_flags)) { in rack_process_ack()
11938 if (SEQ_LT(th->th_ack, tp->snd_recover) && in rack_process_ack()
11939 (SEQ_LT(th->th_ack, tp->snd_max))) { in rack_process_ack()
11942 rack_post_recovery(tp, th->th_ack); in rack_process_ack()
11949 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11951 p_cwnd += tp->snd_cwnd; in rack_process_ack()
11953 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11954 SEQ_GEQ(th->th_ack, tp->snd_recover)) { in rack_process_ack()
11957 * and never re-entered recovery. The timeout(s) in rack_process_ack()
11961 rack->rto_from_rec = 0; in rack_process_ack()
11968 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11970 (tp->snd_cwnd > p_cwnd)) { in rack_process_ack()
11971 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_process_ack()
11972 tp->snd_cwnd = p_cwnd; in rack_process_ack()
11975 acked_amount = min(acked, (int)sbavail(&so->so_snd)); in rack_process_ack()
11976 tp->snd_wnd -= acked_amount; in rack_process_ack()
11977 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_process_ack()
11978 if ((sbused(&so->so_snd) == 0) && in rack_process_ack()
11980 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
11981 (tp->t_flags & TF_SENTFIN)) { in rack_process_ack()
11990 tp->snd_una = th->th_ack; in rack_process_ack()
11992 if (acked_amount && sbavail(&so->so_snd)) in rack_process_ack()
11993 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
11994 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
11998 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_process_ack()
11999 tp->snd_recover = tp->snd_una; in rack_process_ack()
12001 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { in rack_process_ack()
12002 tp->snd_nxt = tp->snd_max; in rack_process_ack()
12005 (rack->use_fixed_rate == 0) && in rack_process_ack()
12006 (rack->in_probe_rtt == 0) && in rack_process_ack()
12007 rack->rc_gp_dyn_mul && in rack_process_ack()
12008 rack->rc_always_pace) { in rack_process_ack()
12012 if (tp->snd_una == tp->snd_max) { in rack_process_ack()
12014 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
12015 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
12016 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
12017 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
12018 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
12020 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_process_ack()
12021 tp->t_acktime = 0; in rack_process_ack()
12022 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
12023 rack->rc_suspicious = 0; in rack_process_ack()
12025 rack->r_wanted_output = 1; in rack_process_ack()
12026 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
12027 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
12028 (sbavail(&so->so_snd) == 0) && in rack_process_ack()
12029 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_process_ack()
12036 /* tcp_close will kill the inp pre-log the Reset */ in rack_process_ack()
12053 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
12062 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
12064 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
12074 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
12075 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
12076 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
12077 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
12092 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
12093 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
12094 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
12096 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
12097 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
12098 rack->rc_has_collapsed = 1; in rack_collapsed_window()
12099 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
12100 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
12111 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
12112 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
12113 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12116 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12120 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
12121 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
12122 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
12131 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12133 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
12135 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
12140 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
12141 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
12142 if (rsm->r_in_tmap) { in rack_un_collapse_window()
12143 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
12144 nrsm->r_in_tmap = 1; in rack_un_collapse_window()
12154 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
12156 nrsm->r_flags |= RACK_RWND_COLLAPSED; in rack_un_collapse_window()
12157 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
12163 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12172 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
12173 tp->t_flags |= TF_DELACK; in rack_handle_delayed_ack()
12175 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
12176 tp->t_flags |= TF_ACKNOW; in rack_handle_delayed_ack()
12188 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
12196 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12198 if (out >= tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12200 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12203 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
12204 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12206 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12214 * Return value of 1, the TCB is unlocked and most
12233 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12234 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_data()
12236 (SEQ_LT(tp->snd_wl1, th->th_seq) || in rack_process_data()
12237 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || in rack_process_data()
12238 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { in rack_process_data()
12241 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) in rack_process_data()
12243 tp->snd_wnd = tiwin; in rack_process_data()
12245 tp->snd_wl1 = th->th_seq; in rack_process_data()
12246 tp->snd_wl2 = th->th_ack; in rack_process_data()
12247 if (tp->snd_wnd > tp->max_sndwnd) in rack_process_data()
12248 tp->max_sndwnd = tp->snd_wnd; in rack_process_data()
12249 rack->r_wanted_output = 1; in rack_process_data()
12251 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { in rack_process_data()
12252 tp->snd_wnd = tiwin; in rack_process_data()
12254 tp->snd_wl1 = th->th_seq; in rack_process_data()
12255 tp->snd_wl2 = th->th_ack; in rack_process_data()
12258 if (tp->snd_wnd < ctf_outstanding(tp)) in rack_process_data()
12260 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12261 else if (rack->rc_has_collapsed) in rack_process_data()
12263 if ((rack->r_collapse_point_valid) && in rack_process_data()
12264 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12265 rack->r_collapse_point_valid = 0; in rack_process_data()
12267 if ((rack->rc_in_persist != 0) && in rack_process_data()
12268 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12269 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12270 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12271 tp->snd_nxt = tp->snd_max; in rack_process_data()
12273 rack->r_wanted_output = 1; in rack_process_data()
12276 if ((rack->rc_in_persist == 0) && in rack_process_data()
12277 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12278 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_process_data()
12279 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12280 sbavail(&tptosocket(tp)->so_snd) && in rack_process_data()
12281 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_process_data()
12288 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12290 if (tp->t_flags2 & TF2_DROP_AF_DATA) { in rack_process_data()
12298 tp->rcv_up = tp->rcv_nxt; in rack_process_data()
12303 * This process logically involves adjusting tp->rcv_wnd as data is in rack_process_data()
12308 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_process_data()
12309 (tp->t_flags & TF_FASTOPEN)); in rack_process_data()
12311 TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12312 tcp_seq save_start = th->th_seq; in rack_process_data()
12313 tcp_seq save_rnxt = tp->rcv_nxt; in rack_process_data()
12328 if (th->th_seq == tp->rcv_nxt && in rack_process_data()
12330 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_process_data()
12335 if (so->so_rcv.sb_shlim) { in rack_process_data()
12338 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_process_data()
12347 tp->rcv_nxt += tlen; in rack_process_data()
12349 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_process_data()
12350 (tp->t_fbyte_in == 0)) { in rack_process_data()
12351 tp->t_fbyte_in = ticks; in rack_process_data()
12352 if (tp->t_fbyte_in == 0) in rack_process_data()
12353 tp->t_fbyte_in = 1; in rack_process_data()
12354 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_process_data()
12355 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_process_data()
12361 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_process_data()
12370 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_process_data()
12375 sbappendstream_locked(&so->so_rcv, m, 0); in rack_process_data()
12377 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12381 if (so->so_rcv.sb_shlim && appended != mcnt) in rack_process_data()
12382 counter_fo_release(so->so_rcv.sb_shlim, in rack_process_data()
12383 mcnt - appended); in rack_process_data()
12395 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12396 if (tp->t_flags & TF_WAKESOR) { in rack_process_data()
12397 tp->t_flags &= ~TF_WAKESOR; in rack_process_data()
12402 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_process_data()
12404 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_process_data()
12412 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { in rack_process_data()
12413 if ((tp->rcv_numsacks >= 1) && in rack_process_data()
12414 (tp->sackblks[0].end == save_start)) { in rack_process_data()
12420 tp->sackblks[0].start, in rack_process_data()
12421 tp->sackblks[0].end); in rack_process_data()
12445 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12449 * If connection is half-synchronized (ie NEEDSYN in rack_process_data()
12455 if (tp->t_flags & TF_NEEDSYN) { in rack_process_data()
12457 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12458 tp->t_flags |= TF_DELACK; in rack_process_data()
12460 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12462 tp->rcv_nxt++; in rack_process_data()
12464 switch (tp->t_state) { in rack_process_data()
12470 tp->t_starttime = ticks; in rack_process_data()
12474 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12484 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12490 * starting the time-wait timer, turning off the in rack_process_data()
12495 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12503 if ((tp->t_flags & TF_ACKNOW) || in rack_process_data()
12504 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { in rack_process_data()
12505 rack->r_wanted_output = 1; in rack_process_data()
12512 * have broken out the fast-data path also just like
12513 * the fast-ack.
12532 if (__predict_false(th->th_seq != tp->rcv_nxt)) { in rack_do_fastnewdata()
12535 if (tiwin && tiwin != tp->snd_wnd) { in rack_do_fastnewdata()
12538 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { in rack_do_fastnewdata()
12541 if (__predict_false((to->to_flags & TOF_TS) && in rack_do_fastnewdata()
12542 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { in rack_do_fastnewdata()
12545 if (__predict_false((th->th_ack != tp->snd_una))) { in rack_do_fastnewdata()
12548 if (__predict_false(tlen > sbspace(&so->so_rcv))) { in rack_do_fastnewdata()
12551 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fastnewdata()
12552 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_do_fastnewdata()
12553 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fastnewdata()
12554 tp->ts_recent = to->to_tsval; in rack_do_fastnewdata()
12556 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12558 * This is a pure, in-sequence data packet with nothing on the in rack_do_fastnewdata()
12561 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_do_fastnewdata()
12564 if (so->so_rcv.sb_shlim) { in rack_do_fastnewdata()
12567 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_do_fastnewdata()
12576 if (tp->rcv_numsacks) in rack_do_fastnewdata()
12579 tp->rcv_nxt += tlen; in rack_do_fastnewdata()
12581 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_do_fastnewdata()
12582 (tp->t_fbyte_in == 0)) { in rack_do_fastnewdata()
12583 tp->t_fbyte_in = ticks; in rack_do_fastnewdata()
12584 if (tp->t_fbyte_in == 0) in rack_do_fastnewdata()
12585 tp->t_fbyte_in = 1; in rack_do_fastnewdata()
12586 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_do_fastnewdata()
12587 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_do_fastnewdata()
12592 tp->snd_wl1 = th->th_seq; in rack_do_fastnewdata()
12596 tp->rcv_up = tp->rcv_nxt; in rack_do_fastnewdata()
12603 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fastnewdata()
12612 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_do_fastnewdata()
12617 sbappendstream_locked(&so->so_rcv, m, 0); in rack_do_fastnewdata()
12620 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12624 if (so->so_rcv.sb_shlim && mcnt != appended) in rack_do_fastnewdata()
12625 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); in rack_do_fastnewdata()
12628 if (tp->snd_una == tp->snd_max) in rack_do_fastnewdata()
12629 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12636 * in sequence to remain in the fast-path. We also add
12640 * slow-path.
12652 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_fastack()
12656 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { in rack_fastack()
12664 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { in rack_fastack()
12668 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { in rack_fastack()
12672 if (__predict_false(IN_RECOVERY(tp->t_flags))) { in rack_fastack()
12676 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12677 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12681 /* Ok if we reach here, we can process a fast-ack */ in rack_fastack()
12682 if (rack->gp_ready && in rack_fastack()
12683 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12686 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_fastack()
12689 if (tiwin != tp->snd_wnd) { in rack_fastack()
12690 tp->snd_wnd = tiwin; in rack_fastack()
12692 tp->snd_wl1 = th->th_seq; in rack_fastack()
12693 if (tp->snd_wnd > tp->max_sndwnd) in rack_fastack()
12694 tp->max_sndwnd = tp->snd_wnd; in rack_fastack()
12697 if ((rack->rc_in_persist != 0) && in rack_fastack()
12698 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12699 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12703 if ((rack->rc_in_persist == 0) && in rack_fastack()
12704 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12705 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_fastack()
12706 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12707 sbavail(&tptosocket(tp)->so_snd) && in rack_fastack()
12708 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_fastack()
12715 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12722 if ((to->to_flags & TOF_TS) != 0 && in rack_fastack()
12723 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_fastack()
12724 tp->ts_recent_age = tcp_ts_getticks(); in rack_fastack()
12725 tp->ts_recent = to->to_tsval; in rack_fastack()
12735 if ((tp->t_flags & TF_PREVVALID) && in rack_fastack()
12736 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_fastack()
12737 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12738 if (tp->t_rxtshift == 1 && in rack_fastack()
12739 (int)(ticks - tp->t_badrxtwin) < 0) in rack_fastack()
12740 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_fastack()
12760 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12762 mfree = sbcut_locked(&so->so_snd, acked); in rack_fastack()
12763 tp->snd_una = th->th_ack; in rack_fastack()
12765 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12767 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12770 tp->t_rxtshift = 0; in rack_fastack()
12771 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_fastack()
12772 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12773 rack->rc_tlp_in_progress = 0; in rack_fastack()
12774 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12779 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12780 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12783 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12791 if (tp->snd_wnd < ctf_outstanding(tp)) { in rack_fastack()
12793 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12794 } else if (rack->rc_has_collapsed) in rack_fastack()
12796 if ((rack->r_collapse_point_valid) && in rack_fastack()
12797 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12798 rack->r_collapse_point_valid = 0; in rack_fastack()
12802 tp->snd_wl2 = th->th_ack; in rack_fastack()
12803 tp->t_dupacks = 0; in rack_fastack()
12809 * otherwise restart timer using current (possibly backed-off) in rack_fastack()
12815 (rack->use_fixed_rate == 0) && in rack_fastack()
12816 (rack->in_probe_rtt == 0) && in rack_fastack()
12817 rack->rc_gp_dyn_mul && in rack_fastack()
12818 rack->rc_always_pace) { in rack_fastack()
12822 if (tp->snd_una == tp->snd_max) { in rack_fastack()
12823 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12824 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12825 rack->rc_suspicious = 0; in rack_fastack()
12826 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12827 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12828 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12829 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12831 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_fastack()
12832 tp->t_acktime = 0; in rack_fastack()
12833 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12835 if (acked && rack->r_fast_output) in rack_fastack()
12837 if (sbavail(&so->so_snd)) { in rack_fastack()
12838 rack->r_wanted_output = 1; in rack_fastack()
12844 * Return value of 1, the TCB is unlocked and most
12866 * this is an acceptable SYN segment initialize tp->rcv_nxt and in rack_do_syn_sent()
12867 * tp->irs if seg contains ack then advance tp->snd_una if seg in rack_do_syn_sent()
12874 (SEQ_LEQ(th->th_ack, tp->iss) || in rack_do_syn_sent()
12875 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_sent()
12895 tp->irs = th->th_seq; in rack_do_syn_sent()
12897 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12907 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_sent()
12909 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_sent()
12911 tp->rcv_adv += min(tp->rcv_wnd, in rack_do_syn_sent()
12912 TCP_MAXWIN << tp->rcv_scale); in rack_do_syn_sent()
12917 if ((tp->t_flags & TF_FASTOPEN) && in rack_do_syn_sent()
12918 (tp->snd_una != tp->snd_max)) { in rack_do_syn_sent()
12920 if (SEQ_LT(th->th_ack, tp->snd_max)) in rack_do_syn_sent()
12929 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12930 tp->t_flags |= TF_DELACK; in rack_do_syn_sent()
12932 rack->r_wanted_output = 1; in rack_do_syn_sent()
12933 tp->t_flags |= TF_ACKNOW; in rack_do_syn_sent()
12938 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_syn_sent()
12944 * ack-processing since the in rack_do_syn_sent()
12945 * data stream in our send-map in rack_do_syn_sent()
12951 tp->snd_una++; in rack_do_syn_sent()
12952 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { in rack_do_syn_sent()
12961 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12963 if (rsm->r_flags & RACK_HAS_SYN) { in rack_do_syn_sent()
12964 rsm->r_flags &= ~RACK_HAS_SYN; in rack_do_syn_sent()
12965 rsm->r_start++; in rack_do_syn_sent()
12967 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
12973 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 in rack_do_syn_sent()
12975 tp->t_starttime = ticks; in rack_do_syn_sent()
12976 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_sent()
12978 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_sent()
12988 * Received initial SYN in SYN-SENT[*] state => simultaneous in rack_do_syn_sent()
12991 * half-synchronized. Otherwise, do 3-way handshake: in rack_do_syn_sent()
12992 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If in rack_do_syn_sent()
12995 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); in rack_do_syn_sent()
12999 * Advance th->th_seq to correspond to first data byte. If data, in rack_do_syn_sent()
13002 th->th_seq++; in rack_do_syn_sent()
13003 if (tlen > tp->rcv_wnd) { in rack_do_syn_sent()
13004 todrop = tlen - tp->rcv_wnd; in rack_do_syn_sent()
13005 m_adj(m, -todrop); in rack_do_syn_sent()
13006 tlen = tp->rcv_wnd; in rack_do_syn_sent()
13011 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_sent()
13012 tp->rcv_up = th->th_seq; in rack_do_syn_sent()
13020 /* For syn-sent we need to possibly update the rtt */ in rack_do_syn_sent()
13021 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_sent()
13025 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_sent()
13026 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_sent()
13027 tp->t_rttlow = t; in rack_do_syn_sent()
13028 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
13035 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_sent()
13052 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_sent()
13068 * Return value of 1, the TCB is unlocked and most
13082 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
13085 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_syn_recv()
13088 (SEQ_LEQ(th->th_ack, tp->snd_una) || in rack_do_syn_recv()
13089 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_recv()
13094 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13107 /* non-initial SYN is ignored */ in rack_do_syn_recv()
13108 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
13109 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
13110 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
13124 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_syn_recv()
13125 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_syn_recv()
13130 * In the SYN-RECEIVED state, validate that the packet belongs to in rack_do_syn_recv()
13136 if (SEQ_LT(th->th_seq, tp->irs)) { in rack_do_syn_recv()
13158 if ((to->to_flags & TOF_TS) != 0 && in rack_do_syn_recv()
13159 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_syn_recv()
13160 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_syn_recv()
13162 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_syn_recv()
13163 tp->ts_recent = to->to_tsval; in rack_do_syn_recv()
13165 tp->snd_wnd = tiwin; in rack_do_syn_recv()
13168 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_syn_recv()
13169 * is on (half-synchronized state), then queue data for later in rack_do_syn_recv()
13173 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13180 if (tp->t_flags & TF_SONOTCONN) { in rack_do_syn_recv()
13181 tp->t_flags &= ~TF_SONOTCONN; in rack_do_syn_recv()
13185 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_recv()
13187 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_recv()
13190 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> in rack_do_syn_recv()
13191 * FIN-WAIT-1 in rack_do_syn_recv()
13193 tp->t_starttime = ticks; in rack_do_syn_recv()
13194 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { in rack_do_syn_recv()
13195 tcp_fastopen_decrement_counter(tp->t_tfo_pending); in rack_do_syn_recv()
13196 tp->t_tfo_pending = NULL; in rack_do_syn_recv()
13198 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_recv()
13200 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_recv()
13211 if (!(tp->t_flags & TF_FASTOPEN)) in rack_do_syn_recv()
13219 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) in rack_do_syn_recv()
13220 tp->snd_una++; in rack_do_syn_recv()
13228 if (tp->t_flags & TF_WAKESOR) { in rack_do_syn_recv()
13229 tp->t_flags &= ~TF_WAKESOR; in rack_do_syn_recv()
13234 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_recv()
13235 /* For syn-recv we need to possibly update the rtt */ in rack_do_syn_recv()
13236 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_recv()
13240 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_recv()
13241 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_recv()
13242 tp->t_rttlow = t; in rack_do_syn_recv()
13243 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13250 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_recv()
13267 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_recv()
13282 * Return value of 1, the TCB is unlocked and most
13297 * uni-directional data xfer. If the packet has no control flags, in rack_do_established()
13298 * is in-sequence, the window didn't change and we're not in rack_do_established()
13302 * waiting for space. If the length is non-zero and the ack didn't in rack_do_established()
13303 * move, we're the receiver side. If we're getting packets in-order in rack_do_established()
13306 * hidden state-flags are also off. Since we check for in rack_do_established()
13309 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13310 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && in rack_do_established()
13313 __predict_true(th->th_seq == tp->rcv_nxt)) { in rack_do_established()
13316 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13329 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_established()
13344 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_established()
13345 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_established()
13366 if ((to->to_flags & TOF_TS) != 0 && in rack_do_established()
13367 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_established()
13368 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_established()
13370 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_established()
13371 tp->ts_recent = to->to_tsval; in rack_do_established()
13374 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_established()
13375 * is on (half-synchronized state), then queue data for later in rack_do_established()
13379 if (tp->t_flags & TF_NEEDSYN) { in rack_do_established()
13383 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_established()
13385 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_established()
13398 if (sbavail(&so->so_snd)) { in rack_do_established()
13411 * Return value of 1, the TCB is unlocked and most
13425 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_close_wait()
13439 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_close_wait()
13440 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_close_wait()
13461 if ((to->to_flags & TOF_TS) != 0 && in rack_do_close_wait()
13462 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_close_wait()
13463 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_close_wait()
13465 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_close_wait()
13466 tp->ts_recent = to->to_tsval; in rack_do_close_wait()
13469 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_close_wait()
13470 * is on (half-synchronized state), then queue data for later in rack_do_close_wait()
13474 if (tp->t_flags & TF_NEEDSYN) { in rack_do_close_wait()
13478 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_close_wait()
13480 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_close_wait()
13493 if (sbavail(&so->so_snd)) { in rack_do_close_wait()
13495 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_close_wait()
13511 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13512 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13515 /* tcp_close will kill the inp pre-log the Reset */ in rack_check_data_after_close()
13522 if (sbavail(&so->so_snd) == 0) in rack_check_data_after_close()
13526 tp->rcv_nxt = th->th_seq + *tlen; in rack_check_data_after_close()
13527 tp->t_flags2 |= TF2_DROP_AF_DATA; in rack_check_data_after_close()
13528 rack->r_wanted_output = 1; in rack_check_data_after_close()
13534 * Return value of 1, the TCB is unlocked and most
13550 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_1()
13564 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_1()
13565 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_1()
13576 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_1()
13593 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_1()
13594 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_1()
13595 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_1()
13597 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_1()
13598 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_1()
13601 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_1()
13602 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_1()
13606 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_1()
13609 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_1()
13611 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_1()
13634 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fin_wait_1()
13643 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_1()
13645 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_1()
13656 * Return value of 1, the TCB is unlocked and most
13672 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_closing()
13686 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_closing()
13687 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_closing()
13708 if ((to->to_flags & TOF_TS) != 0 && in rack_do_closing()
13709 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_closing()
13710 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_closing()
13712 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_closing()
13713 tp->ts_recent = to->to_tsval; in rack_do_closing()
13716 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_closing()
13717 * is on (half-synchronized state), then queue data for later in rack_do_closing()
13721 if (tp->t_flags & TF_NEEDSYN) { in rack_do_closing()
13724 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_closing()
13726 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_closing()
13744 if (sbavail(&so->so_snd)) { in rack_do_closing()
13746 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_closing()
13757 * Return value of 1, the TCB is unlocked and most
13773 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_lastack()
13787 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_lastack()
13788 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_lastack()
13810 if ((to->to_flags & TOF_TS) != 0 && in rack_do_lastack()
13811 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_lastack()
13812 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_lastack()
13814 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_lastack()
13815 tp->ts_recent = to->to_tsval; in rack_do_lastack()
13818 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_lastack()
13819 * is on (half-synchronized state), then queue data for later in rack_do_lastack()
13823 if (tp->t_flags & TF_NEEDSYN) { in rack_do_lastack()
13826 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_lastack()
13828 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_lastack()
13846 if (sbavail(&so->so_snd)) { in rack_do_lastack()
13848 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_lastack()
13859 * Return value of 1, the TCB is unlocked and most
13876 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_2()
13890 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_2()
13891 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_2()
13902 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_2()
13919 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_2()
13920 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_2()
13921 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_2()
13923 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_2()
13924 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_2()
13927 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_2()
13928 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_2()
13932 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_2()
13935 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_2()
13937 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_2()
13950 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_2()
13952 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_2()
13965 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13966 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13967 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13978 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
13979 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
13981 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
13982 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
13986 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13987 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13988 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
13989 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
13991 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
13992 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
13993 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13996 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
13997 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
13998 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
14000 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
14001 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
14002 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14008 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
14013 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
14014 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
14016 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
14018 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
14019 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
14020 rack->r_ctl.init_rate) { in rack_set_pace_segments()
14025 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14032 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
14034 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
14036 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14037 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14039 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
14040 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
14046 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
14049 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
14051 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
14071 if (rack->r_is_v6) { in rack_init_fsb_block()
14072 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
14073 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14074 if (tp->t_port) { in rack_init_fsb_block()
14075 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14077 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14078 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14079 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14080 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14083 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
14084 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14086 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14087 tp->t_port, in rack_init_fsb_block()
14088 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14089 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
14094 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
14095 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14096 if (tp->t_port) { in rack_init_fsb_block()
14097 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14099 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14100 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14101 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14102 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14105 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14106 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
14108 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14109 tp->t_port, in rack_init_fsb_block()
14110 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14111 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
14114 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
14115 (long)TCP_MAXWIN << tp->rcv_scale); in rack_init_fsb_block()
14116 rack->r_fsb_inited = 1; in rack_init_fsb_block()
14127 …rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
14129 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
14131 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
14133 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
14136 rack->r_fsb_inited = 0; in rack_init_fsb()
14145 * 20 - Initial round setup in rack_log_hystart_event()
14146 * 21 - Rack declares a new round. in rack_log_hystart_event()
14150 tp = rack->rc_tp; in rack_log_hystart_event()
14156 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
14157 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
14159 log.u_bbr.flex4 = tp->snd_max; in rack_log_hystart_event()
14162 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
14163 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
14165 &tptosocket(tp)->so_rcv, in rack_log_hystart_event()
14166 &tptosocket(tp)->so_snd, in rack_log_hystart_event()
14175 rack->rack_deferred_inited = 1; in rack_deferred_init()
14176 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
14177 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
14178 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
14192 * 1 - Use full sized retransmits i.e. limit in rack_init_retransmit_value()
14196 * 2 - Use pacer min granularity as a guide to in rack_init_retransmit_value()
14204 * 0 - The rack default 1 MSS (anything not 0/1/2 in rack_init_retransmit_value()
14209 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14210 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14212 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14213 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14215 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14216 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14226 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14249 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14250 switch (reqr->req) { in rack_chg_query()
14252 if ((reqr->req_param == tp->snd_max) || in rack_chg_query()
14253 (tp->snd_max == tp->snd_una)){ in rack_chg_query()
14257 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14259 /* Can't find that seq -- unlikely */ in rack_chg_query()
14262 reqr->sendmap_start = rsm->r_start; in rack_chg_query()
14263 reqr->sendmap_end = rsm->r_end; in rack_chg_query()
14264 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; in rack_chg_query()
14265 reqr->sendmap_fas = rsm->r_fas; in rack_chg_query()
14266 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) in rack_chg_query()
14267 reqr->sendmap_send_cnt = SNDMAP_NRTX; in rack_chg_query()
14268 for(i=0; i<reqr->sendmap_send_cnt; i++) in rack_chg_query()
14269 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; in rack_chg_query()
14270 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; in rack_chg_query()
14271 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; in rack_chg_query()
14272 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; in rack_chg_query()
14273 reqr->sendmap_dupacks = rsm->r_dupack; in rack_chg_query()
14275 rsm->r_start, in rack_chg_query()
14276 rsm->r_end, in rack_chg_query()
14277 rsm->r_flags); in rack_chg_query()
14281 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14285 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14286 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14287 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14289 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14290 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14293 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14294 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14295 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14300 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14301 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14303 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14304 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14305 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14306 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14307 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14309 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14310 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14311 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14312 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14313 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14314 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14316 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14317 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14318 if (rack->rc_in_persist) { in rack_chg_query()
14319 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14320 reqr->rack_in_persist = 1; in rack_chg_query()
14322 reqr->rack_time_went_idle = 0; in rack_chg_query()
14323 reqr->rack_in_persist = 0; in rack_chg_query()
14325 if (rack->r_wanted_output) in rack_chg_query()
14326 reqr->rack_wanted_output = 1; in rack_chg_query()
14328 reqr->rack_wanted_output = 0; in rack_chg_query()
14332 return (-EINVAL); in rack_chg_query()
14351 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14353 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14354 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14356 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14357 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14358 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_switch_failed()
14359 if (tp->t_in_hpts > IHPTS_NONE) { in rack_switch_failed()
14364 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14365 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14366 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14371 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14372 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14373 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14392 * to not refer to tp->t_fb_ptr. This has the old rack in rack_init_outstanding()
14398 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init_outstanding()
14406 rsm->r_no_rtt_allowed = 1; in rack_init_outstanding()
14407 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14408 rsm->r_rtr_cnt = 1; in rack_init_outstanding()
14409 rsm->r_rtr_bytes = 0; in rack_init_outstanding()
14410 if (tp->t_flags & TF_SENTFIN) in rack_init_outstanding()
14411 rsm->r_flags |= RACK_HAS_FIN; in rack_init_outstanding()
14412 rsm->r_end = tp->snd_max; in rack_init_outstanding()
14413 if (tp->snd_una == tp->iss) { in rack_init_outstanding()
14415 rsm->r_flags |= RACK_HAS_SYN; in rack_init_outstanding()
14416 rsm->r_start = tp->iss; in rack_init_outstanding()
14417 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); in rack_init_outstanding()
14419 rsm->r_start = tp->snd_una; in rack_init_outstanding()
14420 rsm->r_dupack = 0; in rack_init_outstanding()
14421 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14422 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14423 if (rsm->m) { in rack_init_outstanding()
14424 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14425 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14427 rsm->orig_m_len = 0; in rack_init_outstanding()
14428 rsm->orig_t_space = 0; in rack_init_outstanding()
14432 * This can happen if we have a stand-alone FIN or in rack_init_outstanding()
14435 rsm->m = NULL; in rack_init_outstanding()
14436 rsm->orig_m_len = 0; in rack_init_outstanding()
14437 rsm->orig_t_space = 0; in rack_init_outstanding()
14438 rsm->soff = 0; in rack_init_outstanding()
14441 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14446 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14448 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14449 rsm->r_in_tmap = 1; in rack_init_outstanding()
14456 at = tp->snd_una; in rack_init_outstanding()
14457 while (at != tp->snd_max) { in rack_init_outstanding()
14461 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) in rack_init_outstanding()
14473 rsm->r_dupack = qr.sendmap_dupacks; in rack_init_outstanding()
14474 rsm->r_start = qr.sendmap_start; in rack_init_outstanding()
14475 rsm->r_end = qr.sendmap_end; in rack_init_outstanding()
14477 rsm->r_fas = qr.sendmap_end; in rack_init_outstanding()
14479 rsm->r_fas = rsm->r_start - tp->snd_una; in rack_init_outstanding()
14485 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; in rack_init_outstanding()
14486 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; in rack_init_outstanding()
14487 rsm->r_rtr_cnt = qr.sendmap_send_cnt; in rack_init_outstanding()
14488 rsm->r_ack_arrival = qr.sendmap_ack_arrival; in rack_init_outstanding()
14489 for (i=0 ; i<rsm->r_rtr_cnt; i++) in rack_init_outstanding()
14490 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; in rack_init_outstanding()
14491 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14492 (rsm->r_start - tp->snd_una), &rsm->soff); in rack_init_outstanding()
14493 if (rsm->m) { in rack_init_outstanding()
14494 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14495 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14497 rsm->orig_m_len = 0; in rack_init_outstanding()
14498 rsm->orig_t_space = 0; in rack_init_outstanding()
14501 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14506 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14508 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_init_outstanding()
14509 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14510 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > in rack_init_outstanding()
14511 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { in rack_init_outstanding()
14518 rsm->r_in_tmap = 1; in rack_init_outstanding()
14523 if (rsm->r_in_tmap == 0) { in rack_init_outstanding()
14527 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14528 rsm->r_in_tmap = 1; in rack_init_outstanding()
14531 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14532 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14533 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14537 rsm->r_start, in rack_init_outstanding()
14538 rsm->r_end, in rack_init_outstanding()
14539 rsm->r_flags); in rack_init_outstanding()
14560 * will be tp->t_fb_ptr. If its a stack switch that in rack_init()
14564 if (ptr == &tp->t_fb_ptr) in rack_init()
14580 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14581 if (rack->r_ctl.tqh == NULL) { in rack_init()
14585 tqhash_init(rack->r_ctl.tqh); in rack_init()
14586 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14587 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14588 rack->rc_tp = tp; in rack_init()
14589 rack->rc_inp = inp; in rack_init()
14591 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14608 rack->rc_new_rnd_needed = 1; in rack_init()
14609 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14612 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14613 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14614 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14616 rack->rc_pace_to_cwnd = 1; in rack_init()
14618 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14620 rack->use_rack_rr = 1; in rack_init()
14622 rack->rc_pace_dnd = 1; in rack_init()
14625 tp->t_delayed_ack = 1; in rack_init()
14627 tp->t_delayed_ack = 0; in rack_init()
14630 tp->t_flags2 |= TF2_TCP_ACCOUNTING; in rack_init()
14633 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14634 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14635 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14636 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14637 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14640 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; in rack_init()
14642 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14643 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14645 rack->rack_enable_scwnd = 1; in rack_init()
14646 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14647 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14648 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14649 rack->rc_force_max_seg = 0; in rack_init()
14650 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14651 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; in rack_init()
14652 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; in rack_init()
14654 rack->rack_hibeta = 1; in rack_init()
14657 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; in rack_init()
14658 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14661 rack->r_ctl.saved_hibeta = 50; in rack_init()
14666 * will never have all 1's in ms :-) in rack_init()
14668 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14669 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14670 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14671 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14672 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14673 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14674 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14675 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14677 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14678 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14680 rack->r_use_cmp_ack = 1; in rack_init()
14682 rack->rack_no_prr = 1; in rack_init()
14684 rack->rc_gp_no_rec_chg = 1; in rack_init()
14686 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14687 rack->rc_always_pace = 1; in rack_init()
14688 if (rack->rack_hibeta) in rack_init()
14691 rack->rc_always_pace = 0; in rack_init()
14692 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14693 rack->r_mbuf_queue = 1; in rack_init()
14695 rack->r_mbuf_queue = 0; in rack_init()
14698 rack->r_limit_scw = 1; in rack_init()
14700 rack->r_limit_scw = 0; in rack_init()
14702 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14704 rack->r_use_hpts_min = 1; in rack_init()
14705 if (tp->snd_una != 0) { in rack_init()
14706 rack->rc_sendvars_notset = 0; in rack_init()
14714 * syn-cache. This means none of the in rack_init()
14718 rack->rc_sendvars_notset = 1; in rack_init()
14721 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14722 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14723 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14724 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14725 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14726 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; in rack_init()
14727 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14729 rack->r_up_only = 1; in rack_init()
14732 rack->rc_gp_dyn_mul = 1; in rack_init()
14734 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14736 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14737 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14739 rack->rc_skip_timely = 1; in rack_init()
14741 if (rack->rc_skip_timely) { in rack_init()
14742 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14743 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14744 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14746 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14747 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14748 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14750 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14752 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_init()
14753 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14754 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14755 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14756 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14758 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14760 rack->r_ctl.gate_to_fs = 1; in rack_init()
14761 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14767 rack->rc_rack_tmr_std_based = 1; in rack_init()
14771 rack->rc_rack_use_dsack = 1; in rack_init()
14775 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14777 rack->r_ctl.req_measurements = 1; in rack_init()
14779 rack->rack_hdw_pace_ena = 1; in rack_init()
14781 rack->r_rack_hw_rate_caps = 1; in rack_init()
14783 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14792 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_init()
14794 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_init()
14796 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_init()
14804 tp->t_flags &= ~TF_GPUTINPROG; in rack_init()
14805 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14806 (tp->t_state != TCPS_TIME_WAIT)) { in rack_init()
14811 if (SEQ_GT(tp->snd_max, tp->iss)) in rack_init()
14812 snt = tp->snd_max - tp->iss; in rack_init()
14823 if (tp->snd_cwnd < iwin) in rack_init()
14824 tp->snd_cwnd = iwin; in rack_init()
14845 tp->snd_ssthresh = 0xffffffff; in rack_init()
14856 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14857 (tp->t_state != TCPS_TIME_WAIT) && in rack_init()
14859 (tp->snd_una != tp->snd_max)) { in rack_init()
14868 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14869 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_init()
14871 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_init()
14872 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14873 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_init()
14879 * they are non-zero. They are kept with a 5 in rack_init()
14884 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14885 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { in rack_init()
14887 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init()
14897 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14899 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14900 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14901 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14902 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14903 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14904 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14905 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14906 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14907 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14908 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14909 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14910 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14912 rack->rc_tlp_in_progress = 1; in rack_init()
14913 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14915 rack->rc_tlp_in_progress = 0; in rack_init()
14916 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14919 rack->rc_srtt_measure_made = 1; in rack_init()
14921 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14923 if (rack->r_ctl.rc_scw) { in rack_init()
14924 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14925 rack->rack_scwnd_is_idle = 1; in rack_init()
14928 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14929 rack->probe_not_answered = 0; in rack_init()
14930 rack->forced_ack = 0; in rack_init()
14931 tp->t_rxtshift = 0; in rack_init()
14932 rack->rc_in_persist = 1; in rack_init()
14933 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_init()
14934 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14937 rack->r_wanted_output = 1; in rack_init()
14946 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14949 * non-zero return means we have a timer('s) in rack_init()
14955 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14957 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14959 tov = qr.timer_pacing_to - us_cts; in rack_init()
14964 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14967 tov = qr.timer_timer_exp - us_cts; in rack_init()
14973 rack->r_ctl.rc_hpts_flags, in rack_init()
14974 rack->r_ctl.rc_last_output_to, in rack_init()
14975 rack->r_ctl.rc_timer_exp); in rack_init()
14981 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
14985 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
14994 if ((tp->t_state == TCPS_CLOSED) || in rack_handoff_ok()
14995 (tp->t_state == TCPS_LISTEN)) { in rack_handoff_ok()
14999 if ((tp->t_state == TCPS_SYN_SENT) || in rack_handoff_ok()
15000 (tp->t_state == TCPS_SYN_RECEIVED)) { in rack_handoff_ok()
15007 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { in rack_handoff_ok()
15020 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ in rack_handoff_ok()
15034 if (tp->t_fb_ptr) { in rack_fini()
15040 tp->t_flags &= ~TF_FORCEDATA; in rack_fini()
15041 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
15050 if (rack->r_ctl.rc_scw) { in rack_fini()
15053 if (rack->r_limit_scw) in rack_fini()
15054 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
15057 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
15058 rack->r_ctl.rc_scw_index, in rack_fini()
15060 rack->r_ctl.rc_scw = NULL; in rack_fini()
15063 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
15064 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
15065 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
15066 rack->r_ctl.fsb.th = NULL; in rack_fini()
15068 if (rack->rc_always_pace == 1) { in rack_fini()
15072 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
15075 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
15076 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
15080 if (rack->r_ctl.crte != NULL) { in rack_fini()
15081 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
15082 rack->rack_hdrw_pacing = 0; in rack_fini()
15083 rack->r_ctl.crte = NULL; in rack_fini()
15090 * get each one and free it like a cum-ack would and in rack_fini()
15093 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15095 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
15096 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15098 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15100 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15102 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
15103 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15104 rack->rc_free_cnt--; in rack_fini()
15107 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15109 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
15110 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
15111 rack->r_ctl.pcm_s = NULL; in rack_fini()
15112 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
15113 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
15115 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
15122 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
15123 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
15125 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
15126 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15128 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15135 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
15138 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
15139 rack->rc_free_cnt = 0; in rack_fini()
15140 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
15141 rack->r_ctl.tqh = NULL; in rack_fini()
15142 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); in rack_fini()
15143 tp->t_fb_ptr = NULL; in rack_fini()
15146 tp->snd_nxt = tp->snd_max; in rack_fini()
15152 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
15153 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
15155 switch (tp->t_state) { in rack_set_state()
15157 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
15158 rack->r_substate = rack_do_syn_sent; in rack_set_state()
15161 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
15162 rack->r_substate = rack_do_syn_recv; in rack_set_state()
15166 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
15167 rack->r_substate = rack_do_established; in rack_set_state()
15170 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
15171 rack->r_substate = rack_do_close_wait; in rack_set_state()
15175 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
15176 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
15180 rack->r_state = TCPS_CLOSING; in rack_set_state()
15181 rack->r_substate = rack_do_closing; in rack_set_state()
15185 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
15186 rack->r_substate = rack_do_lastack; in rack_set_state()
15189 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
15190 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
15198 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
15199 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15215 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15216 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15222 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15226 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15228 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15229 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && in rack_timer_audit()
15236 if (tp->t_flags & TF_DELACK) { in rack_timer_audit()
15241 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15242 (tp->t_state <= TCPS_CLOSING)) && in rack_timer_audit()
15244 (tp->snd_max == tp->snd_una)) { in rack_timer_audit()
15249 if (SEQ_GT(tp->snd_max, tp->snd_una) && in rack_timer_audit()
15273 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15274 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15278 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15279 rack->r_early = 1; in rack_timer_audit()
15280 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15282 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15284 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15286 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15294 if ((SEQ_LT(tp->snd_wl1, seq) || in rack_do_win_updates()
15295 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || in rack_do_win_updates()
15296 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { in rack_do_win_updates()
15298 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) in rack_do_win_updates()
15300 tp->snd_wnd = tiwin; in rack_do_win_updates()
15302 tp->snd_wl1 = seq; in rack_do_win_updates()
15303 tp->snd_wl2 = ack; in rack_do_win_updates()
15304 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15305 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15306 rack->r_wanted_output = 1; in rack_do_win_updates()
15307 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { in rack_do_win_updates()
15308 tp->snd_wnd = tiwin; in rack_do_win_updates()
15310 tp->snd_wl1 = seq; in rack_do_win_updates()
15311 tp->snd_wl2 = ack; in rack_do_win_updates()
15316 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15317 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15319 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15320 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15321 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15325 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15326 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15327 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_win_updates()
15328 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15329 sbavail(&tptosocket(tp)->so_snd) && in rack_do_win_updates()
15330 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_do_win_updates()
15337 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15345 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15358 if (SEQ_GT(ae->ack, tp->snd_una)) { in rack_log_input_packet()
15359 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); in rack_log_input_packet()
15361 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); in rack_log_input_packet()
15365 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15366 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15367 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15370 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15372 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15373 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15374 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15375 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15376 log.u_bbr.pkts_out = tp->t_maxseg; in rack_log_input_packet()
15377 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15379 log.u_bbr.lost = ae->flags; in rack_log_input_packet()
15382 if (ae->flags & TSTMP_HDWR) { in rack_log_input_packet()
15385 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15386 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15390 } else if (ae->flags & TSTMP_LRO) { in rack_log_input_packet()
15393 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15394 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15401 log.u_bbr.delRate = ae->timestamp; in rack_log_input_packet()
15403 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_log_input_packet()
15405 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_log_input_packet()
15407 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_log_input_packet()
15411 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_log_input_packet()
15413 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_log_input_packet()
15414 log.u_bbr.rttProp = tcp_req->timestamp; in rack_log_input_packet()
15415 log.u_bbr.cur_del_rate = tcp_req->start; in rack_log_input_packet()
15416 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_log_input_packet()
15420 log.u_bbr.bw_inuse = tcp_req->end; in rack_log_input_packet()
15422 log.u_bbr.flex6 = tcp_req->start_seq; in rack_log_input_packet()
15423 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_log_input_packet()
15425 log.u_bbr.epoch = tcp_req->end_seq; in rack_log_input_packet()
15431 th->th_seq = ae->seq; in rack_log_input_packet()
15432 th->th_ack = ae->ack; in rack_log_input_packet()
15433 th->th_win = ae->win; in rack_log_input_packet()
15435 th->th_sport = inp->inp_fport; in rack_log_input_packet()
15436 th->th_dport = inp->inp_lport; in rack_log_input_packet()
15437 tcp_set_flags(th, ae->flags); in rack_log_input_packet()
15439 if (ae->flags & HAS_TSTMP) { in rack_log_input_packet()
15443 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); in rack_log_input_packet()
15453 val = htonl(ae->ts_value); in rack_log_input_packet()
15456 val = htonl(ae->ts_echo); in rack_log_input_packet()
15460 th->th_off = (sizeof(struct tcphdr) >> 2); in rack_log_input_packet()
15469 * snd_una was advanced and then un-advancing it so that the in rack_log_input_packet()
15472 if (tp->snd_una != high_seq) { in rack_log_input_packet()
15473 orig_snd_una = tp->snd_una; in rack_log_input_packet()
15474 tp->snd_una = high_seq; in rack_log_input_packet()
15479 &tptosocket(tp)->so_rcv, in rack_log_input_packet()
15480 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, in rack_log_input_packet()
15483 tp->snd_una = orig_snd_una; in rack_log_input_packet()
15494 * A persist or keep-alive was forced out, update our in rack_handle_probe_response()
15496 * When a subsequent keep-alive or persist times out in rack_handle_probe_response()
15502 * will clear the probe_not_answered flag i.e. cum-ack in rack_handle_probe_response()
15506 rack->forced_ack = 0; in rack_handle_probe_response()
15507 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15508 if ((rack->rc_in_persist && in rack_handle_probe_response()
15509 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15510 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15525 if (rack->rc_in_persist) in rack_handle_probe_response()
15527 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15530 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15552 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15553 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15554 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15562 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15569 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15575 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15576 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15578 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15580 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15581 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15596 rack->r_ctl.current_round++; in rack_new_round_setup()
15598 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15599 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15600 (rack->pcm_needed == 0) && in rack_new_round_setup()
15601 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15609 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15610 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15611 rack->pcm_needed = 1; in rack_new_round_setup()
15612 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15614 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15617 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_new_round_setup()
15619 if (CC_ALGO(tp)->newround != NULL) { in rack_new_round_setup()
15620 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15625 * that we are not just pushing on slow-start and just in rack_new_round_setup()
15627 * boost in b/w during the inital slow-start. in rack_new_round_setup()
15629 if (rack->dgp_on && in rack_new_round_setup()
15630 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15631 (tp->snd_cwnd < tp->snd_ssthresh) && in rack_new_round_setup()
15632 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15633 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15634 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15644 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15646 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15652 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15653 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15654 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15655 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15656 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15661 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15662 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15663 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15665 tp->snd_ssthresh = tp->snd_cwnd - 1; in rack_new_round_setup()
15667 rack->r_fast_output = 0; in rack_new_round_setup()
15678 * A) It moves the cum-ack forward in rack_do_compressed_ack_processing()
15679 * B) It is behind the cum-ack. in rack_do_compressed_ack_processing()
15680 * C) It is a window-update ack. in rack_do_compressed_ack_processing()
15681 * D) It is a dup-ack. in rack_do_compressed_ack_processing()
15683 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES in rack_do_compressed_ack_processing()
15708 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15709 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15710 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15713 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15715 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15716 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_compressed_ack_processing()
15725 bytes = tp->gput_ack - tp->gput_seq; in rack_do_compressed_ack_processing()
15726 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_compressed_ack_processing()
15727 bytes += tp->gput_seq - tp->snd_una; in rack_do_compressed_ack_processing()
15728 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_compressed_ack_processing()
15734 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15735 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15736 tp->gput_seq, in rack_do_compressed_ack_processing()
15738 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_compressed_ack_processing()
15742 to->to_flags = 0; in rack_do_compressed_ack_processing()
15743 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), in rack_do_compressed_ack_processing()
15744 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); in rack_do_compressed_ack_processing()
15745 cnt = m->m_len / sizeof(struct tcp_ackent); in rack_do_compressed_ack_processing()
15747 high_seq = tp->snd_una; in rack_do_compressed_ack_processing()
15748 the_win = tp->snd_wnd; in rack_do_compressed_ack_processing()
15749 win_seq = tp->snd_wl1; in rack_do_compressed_ack_processing()
15750 win_upd_ack = tp->snd_wl2; in rack_do_compressed_ack_processing()
15753 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15755 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15756 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15757 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15767 if (ae->flags & TH_FIN) in rack_do_compressed_ack_processing()
15776 tiwin = ae->win << tp->snd_scale; in rack_do_compressed_ack_processing()
15777 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15778 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15780 if (SEQ_LT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15782 ae->ack_val_set = ACK_BEHIND; in rack_do_compressed_ack_processing()
15783 } else if (SEQ_GT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15785 ae->ack_val_set = ACK_CUMACK; in rack_do_compressed_ack_processing()
15786 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15788 ae->ack_val_set = ACK_DUPACK; in rack_do_compressed_ack_processing()
15791 ae->ack_val_set = ACK_RWND; in rack_do_compressed_ack_processing()
15794 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15796 if (ae->flags & HAS_TSTMP) { in rack_do_compressed_ack_processing()
15798 to->to_flags = TOF_TS; in rack_do_compressed_ack_processing()
15799 ae->ts_echo -= tp->ts_offset; in rack_do_compressed_ack_processing()
15800 to->to_tsecr = ae->ts_echo; in rack_do_compressed_ack_processing()
15801 to->to_tsval = ae->ts_value; in rack_do_compressed_ack_processing()
15807 if (TSTMP_GT(ae->ts_echo, ms_cts)) in rack_do_compressed_ack_processing()
15808 to->to_tsecr = 0; in rack_do_compressed_ack_processing()
15809 if (tp->ts_recent && in rack_do_compressed_ack_processing()
15810 TSTMP_LT(ae->ts_value, tp->ts_recent)) { in rack_do_compressed_ack_processing()
15811 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { in rack_do_compressed_ack_processing()
15815 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15816 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15823 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && in rack_do_compressed_ack_processing()
15824 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { in rack_do_compressed_ack_processing()
15825 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_compressed_ack_processing()
15826 tp->ts_recent = ae->ts_value; in rack_do_compressed_ack_processing()
15830 to->to_flags = 0; in rack_do_compressed_ack_processing()
15833 if (tp->t_idle_reduce && in rack_do_compressed_ack_processing()
15834 (tp->snd_max == tp->snd_una) && in rack_do_compressed_ack_processing()
15835 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_compressed_ack_processing()
15839 tp->t_rcvtime = ticks; in rack_do_compressed_ack_processing()
15841 if (tcp_ecn_input_segment(tp, ae->flags, 0, in rack_do_compressed_ack_processing()
15842 tcp_packets_this_ack(tp, ae->ack), in rack_do_compressed_ack_processing()
15843 ae->codepoint)) in rack_do_compressed_ack_processing()
15844 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); in rack_do_compressed_ack_processing()
15847 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15848 tp->tcp_cnt_counters[ae->ack_val_set]++; in rack_do_compressed_ack_processing()
15855 * The non-compressed path through the code has this in rack_do_compressed_ack_processing()
15862 if (ae->ack_val_set == ACK_BEHIND) { in rack_do_compressed_ack_processing()
15865 * or it could be a keep-alive or persists in rack_do_compressed_ack_processing()
15867 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { in rack_do_compressed_ack_processing()
15868 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15869 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15870 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15872 } else if (ae->ack_val_set == ACK_DUPACK) { in rack_do_compressed_ack_processing()
15874 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15875 } else if (ae->ack_val_set == ACK_RWND) { in rack_do_compressed_ack_processing()
15877 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15878 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15879 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15880 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15881 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15883 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15885 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15887 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15892 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15893 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15898 if (SEQ_GT(ae->ack, tp->snd_max)) { in rack_do_compressed_ack_processing()
15903 if ((tp->t_flags & TF_ACKNOW) == 0) { in rack_do_compressed_ack_processing()
15905 if (tp->t_flags && TF_ACKNOW) in rack_do_compressed_ack_processing()
15906 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15911 if (tiwin != tp->snd_wnd) { in rack_do_compressed_ack_processing()
15912 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15913 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15919 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15920 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); in rack_do_compressed_ack_processing()
15923 high_seq = ae->ack; in rack_do_compressed_ack_processing()
15925 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15926 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15927 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15928 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15929 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15931 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15933 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15934 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15938 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15940 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15942 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15953 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15954 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15955 if (ae->ack_val_set == ACK_CUMACK) in rack_do_compressed_ack_processing()
15956 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15965 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { in rack_do_compressed_ack_processing()
15967 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15968 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15970 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15971 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
15972 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
15973 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
15986 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
15987 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
15997 * since cum-ack moved forward. in rack_do_compressed_ack_processing()
15999 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
16000 if (tp->t_flags & TF_NEEDSYN) { in rack_do_compressed_ack_processing()
16002 * T/TCP: Connection was half-synchronized, and our SYN has in rack_do_compressed_ack_processing()
16004 * to non-starred state, increment snd_una for ACK of SYN, in rack_do_compressed_ack_processing()
16007 tp->t_flags &= ~TF_NEEDSYN; in rack_do_compressed_ack_processing()
16008 tp->snd_una++; in rack_do_compressed_ack_processing()
16009 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
16011 if (acked > sbavail(&so->so_snd)) in rack_do_compressed_ack_processing()
16012 acked_amount = sbavail(&so->so_snd); in rack_do_compressed_ack_processing()
16013 if (IN_FASTRECOVERY(tp->t_flags) && in rack_do_compressed_ack_processing()
16014 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
16016 if (IN_RECOVERY(tp->t_flags)) { in rack_do_compressed_ack_processing()
16017 if (SEQ_LT(high_seq, tp->snd_recover) && in rack_do_compressed_ack_processing()
16018 (SEQ_LT(high_seq, tp->snd_max))) { in rack_do_compressed_ack_processing()
16024 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
16025 SEQ_GEQ(high_seq, tp->snd_recover)) { in rack_do_compressed_ack_processing()
16028 * and never re-entered recovery. The timeout(s) in rack_do_compressed_ack_processing()
16032 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
16034 /* Handle the rack-log-ack part (sendmap) */ in rack_do_compressed_ack_processing()
16035 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16037 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16038 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16051 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16054 if ((tp->t_flags & TF_PREVVALID) && in rack_do_compressed_ack_processing()
16055 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_do_compressed_ack_processing()
16056 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16057 if (tp->t_rxtshift == 1 && in rack_do_compressed_ack_processing()
16058 (int)(ticks - tp->t_badrxtwin) < 0) in rack_do_compressed_ack_processing()
16074 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
16076 p_cwnd += tp->snd_cwnd; in rack_do_compressed_ack_processing()
16079 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { in rack_do_compressed_ack_processing()
16080 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_do_compressed_ack_processing()
16081 tp->snd_cwnd = p_cwnd; in rack_do_compressed_ack_processing()
16084 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_do_compressed_ack_processing()
16085 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16087 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
16089 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
16094 tp->t_acktime = ticks; in rack_do_compressed_ack_processing()
16095 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
16098 tp->t_rxtshift = 0; in rack_do_compressed_ack_processing()
16099 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_do_compressed_ack_processing()
16100 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
16101 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
16102 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
16104 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_do_compressed_ack_processing()
16105 tp->snd_recover = tp->snd_una; in rack_do_compressed_ack_processing()
16106 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) in rack_do_compressed_ack_processing()
16107 tp->snd_nxt = tp->snd_max; in rack_do_compressed_ack_processing()
16112 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
16113 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16114 tp->snd_wl2 = high_seq; in rack_do_compressed_ack_processing()
16115 tp->t_dupacks = 0; in rack_do_compressed_ack_processing()
16117 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
16118 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
16119 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
16120 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
16124 if (tp->snd_una == tp->snd_max) { in rack_do_compressed_ack_processing()
16125 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16126 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
16127 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
16128 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
16129 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
16130 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
16131 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
16133 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_do_compressed_ack_processing()
16134 tp->t_acktime = 0; in rack_do_compressed_ack_processing()
16136 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16137 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16138 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
16139 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16140 (sbavail(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16141 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_do_compressed_ack_processing()
16147 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16148 /* tcp_close will kill the inp pre-log the Reset */ in rack_do_compressed_ack_processing()
16153 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16154 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16155 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16168 * We would normally do drop-with-reset which would in rack_do_compressed_ack_processing()
16179 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16180 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16181 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16189 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16198 * We don't change to fin-wait-2 if we have our fin acked in rack_do_compressed_ack_processing()
16206 if (sbavail(&so->so_snd)) { in rack_do_compressed_ack_processing()
16207 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16209 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_compressed_ack_processing()
16218 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16219 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16220 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16231 switch(tp->t_state) { in rack_do_compressed_ack_processing()
16236 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16237 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16238 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16251 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16252 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16253 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16266 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16267 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16268 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16272 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16285 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16294 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16295 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16296 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16303 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16304 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16321 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16322 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16323 (tp->t_flags & TF_ACKNOW )) { in rack_do_compressed_ack_processing()
16333 if (tp->t_flags2 & TF2_HPTS_CALLS) in rack_do_compressed_ack_processing()
16334 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_compressed_ack_processing()
16339 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16361 * cts - is the current time from tv (caller gets ts) in microseconds. in rack_do_segment_nounlock()
16362 * ms_cts - is the current time from tv in milliseconds. in rack_do_segment_nounlock()
16363 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. in rack_do_segment_nounlock()
16386 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16387 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16398 * can happen in the non-LRO path where we are pacing and in rack_do_segment_nounlock()
16403 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16408 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16412 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16413 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16414 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16415 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16422 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16423 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16435 optlen = (th->th_off << 2) - sizeof(struct tcphdr); in rack_do_segment_nounlock()
16461 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16467 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16472 nsegs = m->m_pkthdr.lro_nsegs; in rack_do_segment_nounlock()
16479 if ((m->m_flags & M_TSTMP) || in rack_do_segment_nounlock()
16480 (m->m_flags & M_TSTMP_LRO)) { in rack_do_segment_nounlock()
16482 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16483 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16485 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16489 * Unscale the window into a 32-bit value. For the SYN_SENT state in rack_do_segment_nounlock()
16492 tiwin = th->th_win << tp->snd_scale; in rack_do_segment_nounlock()
16521 (th->th_off << 2) - sizeof(struct tcphdr), in rack_do_segment_nounlock()
16523 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", in rack_do_segment_nounlock()
16525 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", in rack_do_segment_nounlock()
16527 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { in rack_do_segment_nounlock()
16535 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_segment_nounlock()
16536 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_segment_nounlock()
16545 bytes = tp->gput_ack - tp->gput_seq; in rack_do_segment_nounlock()
16546 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_segment_nounlock()
16547 bytes += tp->gput_seq - tp->snd_una; in rack_do_segment_nounlock()
16548 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_segment_nounlock()
16554 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16555 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16556 tp->gput_seq, in rack_do_segment_nounlock()
16558 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_segment_nounlock()
16561 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16567 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_segment_nounlock()
16568 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); in rack_do_segment_nounlock()
16570 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); in rack_do_segment_nounlock()
16574 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16575 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16576 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16579 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16581 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16582 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16583 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16584 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16585 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16586 log.u_bbr.flex3 = m->m_flags; in rack_do_segment_nounlock()
16587 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16594 if (m->m_flags & M_TSTMP) { in rack_do_segment_nounlock()
16600 } else if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment_nounlock()
16609 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; in rack_do_segment_nounlock()
16611 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_do_segment_nounlock()
16613 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_do_segment_nounlock()
16615 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_do_segment_nounlock()
16619 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16621 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16622 log.u_bbr.rttProp = tcp_req->timestamp; in rack_do_segment_nounlock()
16623 log.u_bbr.cur_del_rate = tcp_req->start; in rack_do_segment_nounlock()
16624 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_do_segment_nounlock()
16628 log.u_bbr.bw_inuse = tcp_req->end; in rack_do_segment_nounlock()
16630 log.u_bbr.flex6 = tcp_req->start_seq; in rack_do_segment_nounlock()
16631 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_do_segment_nounlock()
16633 log.u_bbr.epoch = tcp_req->end_seq; in rack_do_segment_nounlock()
16637 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, in rack_do_segment_nounlock()
16642 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16651 * If a segment with the ACK-bit set arrives in the SYN-SENT state in rack_do_segment_nounlock()
16654 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && in rack_do_segment_nounlock()
16655 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_segment_nounlock()
16670 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && in rack_do_segment_nounlock()
16678 * Segment received on connection. Reset idle time and keep-alive in rack_do_segment_nounlock()
16682 if (tp->t_idle_reduce && in rack_do_segment_nounlock()
16683 (tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16684 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_segment_nounlock()
16688 tp->t_rcvtime = ticks; in rack_do_segment_nounlock()
16690 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); in rack_do_segment_nounlock()
16692 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16693 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16699 tcp_packets_this_ack(tp, th->th_ack), in rack_do_segment_nounlock()
16701 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); in rack_do_segment_nounlock()
16709 to.to_tsecr -= tp->ts_offset; in rack_do_segment_nounlock()
16713 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16715 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16726 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16727 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16728 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16739 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16741 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16742 ("%s: rack->rc_inp unexpectedly NULL", __func__)); in rack_do_segment_nounlock()
16743 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16744 rack->rc_inp = inp; in rack_do_segment_nounlock()
16754 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { in rack_do_segment_nounlock()
16758 (tp->t_flags & TF_REQ_SCALE)) { in rack_do_segment_nounlock()
16759 tp->t_flags |= TF_RCVD_SCALE; in rack_do_segment_nounlock()
16760 tp->snd_scale = to.to_wscale; in rack_do_segment_nounlock()
16762 tp->t_flags &= ~TF_REQ_SCALE; in rack_do_segment_nounlock()
16767 tp->snd_wnd = th->th_win; in rack_do_segment_nounlock()
16770 (tp->t_flags & TF_REQ_TSTMP)) { in rack_do_segment_nounlock()
16771 tp->t_flags |= TF_RCVD_TSTMP; in rack_do_segment_nounlock()
16772 tp->ts_recent = to.to_tsval; in rack_do_segment_nounlock()
16773 tp->ts_recent_age = cts; in rack_do_segment_nounlock()
16775 tp->t_flags &= ~TF_REQ_TSTMP; in rack_do_segment_nounlock()
16779 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_do_segment_nounlock()
16781 tp->t_flags &= ~TF_SACK_PERMIT; in rack_do_segment_nounlock()
16782 if (tp->t_flags & TF_FASTOPEN) { in rack_do_segment_nounlock()
16789 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_do_segment_nounlock()
16802 * TF_SACK_PERMIT is set and the sack-not-required is clear. in rack_do_segment_nounlock()
16803 * The code now does do dup-ack counting so if you don't in rack_do_segment_nounlock()
16809 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_do_segment_nounlock()
16811 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, in rack_do_segment_nounlock()
16819 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16823 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16824 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16825 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16826 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16831 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16837 * always. All other times (timers etc) we must have a rack-state in rack_do_segment_nounlock()
16840 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16841 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16843 if (SEQ_GT(th->th_ack, tp->snd_una) && in rack_do_segment_nounlock()
16844 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16846 prev_state = rack->r_state; in rack_do_segment_nounlock()
16848 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && in rack_do_segment_nounlock()
16849 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || in rack_do_segment_nounlock()
16850 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { in rack_do_segment_nounlock()
16852 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16854 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16859 * If retval is 1 the tcb is unlocked and most likely the tp in rack_do_segment_nounlock()
16863 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16864 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16865 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16866 rack->in_probe_rtt && in rack_do_segment_nounlock()
16867 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16874 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16876 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { in rack_do_segment_nounlock()
16878 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16889 * use of 0xf here since we only have 11 counter (0 - 0xa) and in rack_do_segment_nounlock()
16897 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_segment_nounlock()
16898 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); in rack_do_segment_nounlock()
16903 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16904 (tp->t_flags & TF_ACKNOW) || in rack_do_segment_nounlock()
16905 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16918 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { in rack_do_segment_nounlock()
16922 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16932 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) in rack_do_segment_nounlock()
16933 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_segment_nounlock()
16944 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16945 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16951 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16954 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16955 (SEQ_GT(tp->snd_max, tp->snd_una) || in rack_do_segment_nounlock()
16956 (tp->t_flags & TF_DELACK) || in rack_do_segment_nounlock()
16957 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16958 (tp->t_state <= TCPS_CLOSING)))) { in rack_do_segment_nounlock()
16960 if ((tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16961 ((tp->t_flags & TF_DELACK) == 0) && in rack_do_segment_nounlock()
16962 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16963 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16969 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16971 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
16972 rack->r_early = 1; in rack_do_segment_nounlock()
16973 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
16976 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
16993 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
16999 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
17015 if (!STAILQ_EMPTY(&tp->t_inqueue)) { in rack_do_segment()
17021 if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment()
17040 /* Return the next guy to be re-transmitted */ in tcp_rack_output()
17041 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
17044 if (tp->t_flags & TF_SENTFIN) { in tcp_rack_output()
17049 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
17050 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
17053 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { in tcp_rack_output()
17061 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
17062 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17069 if (rsm->r_flags & RACK_ACKED) { in tcp_rack_output()
17072 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && in tcp_rack_output()
17073 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17078 idx = rsm->r_rtr_cnt - 1; in tcp_rack_output()
17079 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; in tcp_rack_output()
17086 if ((tsused - ts_low) < thresh) { in tcp_rack_output()
17090 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in tcp_rack_output()
17091 ((rsm->r_flags & RACK_SACK_PASSED))) { in tcp_rack_output()
17093 * We have passed the dup-ack threshold <or> in tcp_rack_output()
17096 * it is only the dup-ack threshold that in tcp_rack_output()
17100 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
17101 rack->r_fast_output = 0; in tcp_rack_output()
17112 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
17133 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
17134 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
17135 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
17136 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
17137 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
17139 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
17141 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
17143 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
17145 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
17147 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
17149 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
17151 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
17153 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
17154 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
17155 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
17158 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
17163 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
17164 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
17166 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
17175 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
17176 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
17178 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
17180 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
17182 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
17184 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
17186 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
17187 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
17188 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
17199 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
17200 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
17203 if (rack->use_fixed_rate && in rack_get_pacing_len()
17204 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17205 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17210 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17215 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17216 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17219 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17220 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17221 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17223 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17224 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17225 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17238 * nearly zero, maybe because of a time-out? in rack_arrive_at_discounted_rate()
17239 * Lets drop back to the lt-bw. in rack_arrive_at_discounted_rate()
17245 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17250 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17254 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17256 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17288 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17289 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17291 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17293 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17295 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17296 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17297 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17301 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17304 * first lets calculate the b/w based on the last us-rtt in pace_to_fill_cwnd()
17307 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17308 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17317 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17318 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17321 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17323 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17324 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17329 * We want to limit fill-cw to the some multiplier in pace_to_fill_cwnd()
17343 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17356 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17369 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17370 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17371 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17374 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17379 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17388 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17389 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17390 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17391 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17392 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17393 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17394 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17401 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17410 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17411 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17413 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17441 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17445 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17461 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17462 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17464 srtt = max(tp->t_srtt, 1); in rack_get_pacing_delay()
17465 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17466 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17468 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17488 slot -= reduce; in rack_get_pacing_delay()
17494 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17498 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17503 /* RRS: We insert non-paced call to stats here for len */ in rack_get_pacing_delay()
17511 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17512 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17514 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17516 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17517 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17520 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17526 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17529 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17530 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17533 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17536 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17537 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17539 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17543 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { in rack_get_pacing_delay()
17552 segs = (len + segsiz - 1) / segsiz; in rack_get_pacing_delay()
17554 * We need the diff between 1514 bytes (e-mtu with e-hdr) in rack_get_pacing_delay()
17560 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); in rack_get_pacing_delay()
17561 if (rack->r_is_v6) { in rack_get_pacing_delay()
17579 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17584 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17585 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17586 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17587 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17590 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17591 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17597 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17604 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17605 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17606 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17607 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17610 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17615 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17616 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17617 /* Lets re-allow attempting to setup pacing */ in rack_get_pacing_delay()
17618 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17619 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17624 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17625 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17627 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17628 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17629 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17630 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17636 /* Re-check to make sure we are not exceeding our max b/w */ in rack_get_pacing_delay()
17637 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17638 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17644 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17651 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17652 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17653 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17654 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17655 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17659 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17660 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17661 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17663 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17664 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17669 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17670 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17671 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17674 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17675 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17676 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17677 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17678 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17679 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17681 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17683 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17687 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17688 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17692 if (rack->r_up_only && in rack_get_pacing_delay()
17693 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17698 * previous | this-time in rack_get_pacing_delay()
17699 * A) 0 | 0 -- fill_cw not in the picture in rack_get_pacing_delay()
17700 * B) 1 | 0 -- we were doing a fill-cw but now are not in rack_get_pacing_delay()
17701 * C) 1 | 1 -- all rates from fill_cw in rack_get_pacing_delay()
17702 * D) 0 | 1 -- we were doing non-fill and now we are filling in rack_get_pacing_delay()
17709 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17712 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17713 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17721 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17723 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17724 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17725 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17726 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17727 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17730 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17731 rack->rc_tp, in rack_get_pacing_delay()
17732 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17735 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17741 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17742 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17746 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17748 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17749 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17750 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17751 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17752 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17754 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17756 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17760 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17762 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17764 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17770 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17771 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17782 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17783 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17796 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17800 * of gas or we are mis-estimating the time in rack_get_pacing_delay()
17806 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17822 if (tp->t_state < TCPS_ESTABLISHED) { in rack_start_gp_measurement()
17829 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_start_gp_measurement()
17836 if (sbavail(&tptosocket(tp)->so_snd) < in rack_start_gp_measurement()
17843 tp->t_flags |= TF_GPUTINPROG; in rack_start_gp_measurement()
17844 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17845 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17846 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17847 tp->gput_seq = startseq; in rack_start_gp_measurement()
17848 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17849 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17850 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17851 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17852 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17853 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17854 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17855 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17860 tp->gput_ts = tcp_get_usecs(&tv); in rack_start_gp_measurement()
17861 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17867 * initial-windows worth of data to in rack_start_gp_measurement()
17871 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17872 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17873 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17876 tp->gput_seq, in rack_start_gp_measurement()
17877 tp->gput_ack, in rack_start_gp_measurement()
17879 tp->gput_ts, in rack_start_gp_measurement()
17880 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17884 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17893 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17896 * the tp->gput_ts is correctly set based on in rack_start_gp_measurement()
17900 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17902 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17907 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17922 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17926 * after that (after the app-limited). in rack_start_gp_measurement()
17928 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17930 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) in rack_start_gp_measurement()
17932 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17935 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); in rack_start_gp_measurement()
17940 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17943 * the last is the app-limited one. in rack_start_gp_measurement()
17948 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17950 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17956 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17957 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17962 tp->gput_seq = my_rsm->r_end; in rack_start_gp_measurement()
17967 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17978 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17979 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17980 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17981 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
17982 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { in rack_start_gp_measurement()
17988 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17989 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
17992 tp->gput_seq, in rack_start_gp_measurement()
17993 tp->gput_ack, in rack_start_gp_measurement()
17995 tp->gput_ts, in rack_start_gp_measurement()
17996 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18001 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18008 * idle or if this is the first-send. Lets in rack_start_gp_measurement()
18013 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18014 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18015 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18017 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
18019 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18020 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
18025 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
18026 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
18028 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { in rack_start_gp_measurement()
18030 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
18034 * TSNH unless we have some send-map limit, in rack_start_gp_measurement()
18041 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
18045 tp->gput_seq, in rack_start_gp_measurement()
18046 tp->gput_ack, in rack_start_gp_measurement()
18048 tp->gput_ts, in rack_start_gp_measurement()
18049 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18051 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18061 if (tp->snd_wnd > cwnd_to_use) in rack_what_can_we_send()
18064 sendwin = tp->snd_wnd; in rack_what_can_we_send()
18065 if (ctf_outstanding(tp) >= tp->snd_wnd) { in rack_what_can_we_send()
18066 /* We never want to go over our peers rcv-window */ in rack_what_can_we_send()
18071 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
18076 * >= tp->snd_wnd). in rack_what_can_we_send()
18080 len = sendwin - flight; in rack_what_can_we_send()
18081 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { in rack_what_can_we_send()
18083 len = tp->snd_wnd - ctf_outstanding(tp); in rack_what_can_we_send()
18090 len = avail - sb_offset; in rack_what_can_we_send()
18101 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
18106 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
18111 log.u_bbr.flex5 = tp->rcv_numsacks; in rack_log_fsb()
18112 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
18114 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
18115 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
18123 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
18124 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, in rack_log_fsb()
18154 if (hw_tls && (m->m_flags & M_EXTPG)) in rack_fo_base_copym()
18155 tls = m->m_epg_tls; in rack_fo_base_copym()
18169 if (m->m_flags & M_EXTPG) in rack_fo_base_copym()
18170 ntls = m->m_epg_tls; in rack_fo_base_copym()
18186 mlen = min(len, m->m_len - off); in rack_fo_base_copym()
18196 if (m->m_flags & M_EXTPG) { in rack_fo_base_copym()
18217 mlen = (seglimit - frags - 1) * fragsize; in rack_fo_base_copym()
18224 seglimit -= frags; in rack_fo_base_copym()
18228 n = m_get(M_NOWAIT, m->m_type); in rack_fo_base_copym()
18232 n->m_len = mlen; in rack_fo_base_copym()
18234 len_cp += n->m_len; in rack_fo_base_copym()
18235 if (m->m_flags & (M_EXT | M_EXTPG)) { in rack_fo_base_copym()
18236 n->m_data = m->m_data + off; in rack_fo_base_copym()
18240 (u_int)n->m_len); in rack_fo_base_copym()
18242 len -= n->m_len; in rack_fo_base_copym()
18244 m = m->m_next; in rack_fo_base_copym()
18245 np = &n->m_next; in rack_fo_base_copym()
18246 if (len || (soff == smb->m_len)) { in rack_fo_base_copym()
18258 fsb->m = smb; in rack_fo_base_copym()
18259 fsb->off = soff; in rack_fo_base_copym()
18267 fsb->o_m_len = smb->m_len; in rack_fo_base_copym()
18268 fsb->o_t_len = M_TRAILINGROOM(smb); in rack_fo_base_copym()
18278 fsb->o_m_len = 0; in rack_fo_base_copym()
18279 fsb->o_t_len = 0; in rack_fo_base_copym()
18301 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18302 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18309 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18314 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18315 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18316 m->m_len)); in rack_fo_m_copym()
18317 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18318 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18320 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18325 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18327 m, m->m_len, in rack_fo_m_copym()
18328 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18329 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18331 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18332 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18334 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18335 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18337 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18342 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18345 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", in rack_fo_m_copym()
18347 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18350 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18352 &rack->r_ctl.fsb, in rack_fo_m_copym()
18353 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18367 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18368 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18371 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18374 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18375 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18376 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18380 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18382 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18398 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18404 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18425 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18430 /* TSNH -- KASSERT? */ in rack_check_queue_level()
18436 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18439 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18440 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18441 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18445 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18448 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18491 if (rack->r_is_v6) { in rack_fast_rsm_output()
18492 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18497 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18500 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_rsm_output()
18505 rsm->r_flags |= RACK_TLP; in rack_fast_rsm_output()
18508 rsm->r_flags &= ~RACK_TLP; in rack_fast_rsm_output()
18510 startseq = rsm->r_start; in rack_fast_rsm_output()
18511 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18512 inp = rack->rc_inp; in rack_fast_rsm_output()
18514 flags = tcp_outflags[tp->t_state]; in rack_fast_rsm_output()
18518 if (rsm->r_flags & RACK_HAS_FIN) { in rack_fast_rsm_output()
18526 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_rsm_output()
18527 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_rsm_output()
18528 to.to_tsecr = tp->ts_recent; in rack_fast_rsm_output()
18532 /* TCP-MD5 (RFC2385). */ in rack_fast_rsm_output()
18533 if (tp->t_flags & TF_SIGNATURE) in rack_fast_rsm_output()
18538 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18541 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18542 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18543 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18544 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18547 if ((tp->t_flags & TF_TSO) && in rack_fast_rsm_output()
18550 (tp->t_port == 0)) in rack_fast_rsm_output()
18560 m->m_data += max_linkhdr; in rack_fast_rsm_output()
18561 m->m_len = hdrlen; in rack_fast_rsm_output()
18562 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18571 if_hw_tsomax = tp->t_tsomax; in rack_fast_rsm_output()
18572 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_rsm_output()
18573 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_rsm_output()
18580 max_len = (if_hw_tsomax - hdrlen - in rack_fast_rsm_output()
18602 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_rsm_output()
18605 th->th_seq = htonl(rsm->r_start); in rack_fast_rsm_output()
18606 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_rsm_output()
18614 if ((rsm->r_flags & RACK_HAD_PUSH) && in rack_fast_rsm_output()
18615 (len == (rsm->r_end - rsm->r_start))) in rack_fast_rsm_output()
18617 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18618 if (th->th_win == 0) { in rack_fast_rsm_output()
18619 tp->t_sndzerowin++; in rack_fast_rsm_output()
18620 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_rsm_output()
18622 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_rsm_output()
18623 if (rsm->r_flags & RACK_TLP) { in rack_fast_rsm_output()
18631 tp->t_sndrexmitpack++; in rack_fast_rsm_output()
18636 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_fast_rsm_output()
18639 if (rsm->m == NULL) in rack_fast_rsm_output()
18641 if (rsm->m && in rack_fast_rsm_output()
18642 ((rsm->orig_m_len != rsm->m->m_len) || in rack_fast_rsm_output()
18643 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_fast_rsm_output()
18647 …m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxse… in rack_fast_rsm_output()
18657 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_rsm_output()
18661 if (rack->r_is_v6) in rack_fast_rsm_output()
18662 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_rsm_output()
18664 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_rsm_output()
18665 udp->uh_ulen = htons(ulen); in rack_fast_rsm_output()
18667 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_rsm_output()
18668 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_rsm_output()
18669 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_rsm_output()
18671 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_rsm_output()
18672 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_rsm_output()
18673 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_rsm_output()
18675 if (rack->r_is_v6) { in rack_fast_rsm_output()
18676 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_rsm_output()
18677 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_rsm_output()
18682 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_rsm_output()
18683 ip->ip_tos |= ect; in rack_fast_rsm_output()
18686 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18694 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_rsm_output()
18704 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_rsm_output()
18714 if (rack->r_is_v6) { in rack_fast_rsm_output()
18715 if (tp->t_port) { in rack_fast_rsm_output()
18716 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_rsm_output()
18717 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18718 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_rsm_output()
18719 th->th_sum = htons(0); in rack_fast_rsm_output()
18722 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_rsm_output()
18723 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18724 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_rsm_output()
18735 if (tp->t_port) { in rack_fast_rsm_output()
18736 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_rsm_output()
18737 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18738 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18739 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_rsm_output()
18740 th->th_sum = htons(0); in rack_fast_rsm_output()
18743 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_rsm_output()
18744 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18745 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18746 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_rsm_output()
18750 KASSERT(ip->ip_v == IPVERSION, in rack_fast_rsm_output()
18751 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_rsm_output()
18758 * via either fast-path). in rack_fast_rsm_output()
18762 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_rsm_output()
18763 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_rsm_output()
18766 if (rack->r_is_v6) { in rack_fast_rsm_output()
18767 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18768 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_rsm_output()
18769 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_rsm_output()
18770 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18772 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18780 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_rsm_output()
18781 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18782 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_rsm_output()
18783 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18784 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_rsm_output()
18785 ip->ip_off |= htons(IP_DF); in rack_fast_rsm_output()
18788 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18794 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18797 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_rsm_output()
18799 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18802 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18807 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18808 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18811 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_rsm_output()
18813 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_rsm_output()
18815 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18818 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_fast_rsm_output()
18819 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18821 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_fast_rsm_output()
18824 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18825 if (rack->rack_no_prr) in rack_fast_rsm_output()
18828 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18829 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18830 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18833 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18834 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18836 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18843 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_rsm_output()
18845 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18846 if (rsm->r_rtr_cnt > 0) { in rack_fast_rsm_output()
18851 log.u_bbr.flex5 = rsm->r_fas; in rack_fast_rsm_output()
18852 log.u_bbr.bbr_substate = rsm->r_bas; in rack_fast_rsm_output()
18859 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_rsm_output()
18861 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18864 log.u_bbr.delRate = rsm->r_flags; in rack_fast_rsm_output()
18866 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18874 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18879 if (rack->r_is_v6) { in rack_fast_rsm_output()
18880 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_rsm_output()
18881 &inp->inp_route6, in rack_fast_rsm_output()
18889 &inp->inp_route, in rack_fast_rsm_output()
18895 lgb->tlb_errno = error; in rack_fast_rsm_output()
18899 tp->snd_nxt = tp->snd_max; in rack_fast_rsm_output()
18902 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18903 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18904 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18905 rack->r_early = 0; in rack_fast_rsm_output()
18906 rack->r_late = 0; in rack_fast_rsm_output()
18907 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18909 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), in rack_fast_rsm_output()
18910 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); in rack_fast_rsm_output()
18912 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18913 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18917 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); in rack_fast_rsm_output()
18919 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18920 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18921 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18922 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18924 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18925 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18927 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18929 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_rsm_output()
18930 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18931 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_fast_rsm_output()
18932 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18938 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_rsm_output()
18942 if (tp->t_rtttime == 0) { in rack_fast_rsm_output()
18943 tp->t_rtttime = ticks; in rack_fast_rsm_output()
18944 tp->t_rtseq = startseq; in rack_fast_rsm_output()
18949 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18950 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18951 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18954 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18955 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18956 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18957 rack->rc_enobuf++; in rack_fast_rsm_output()
18960 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18962 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
18971 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
18972 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_rsm_output()
18973 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_rsm_output()
18974 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); in rack_fast_rsm_output()
18982 return (-1); in rack_fast_rsm_output()
18993 * delay (eg. trans-continental/oceanic links). Setting the in rack_sndbuf_autoscale()
19015 tp = rack->rc_tp; in rack_sndbuf_autoscale()
19016 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
19017 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
19018 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { in rack_sndbuf_autoscale()
19019 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && in rack_sndbuf_autoscale()
19020 sbused(&so->so_snd) >= in rack_sndbuf_autoscale()
19021 (so->so_snd.sb_hiwat / 8 * 7) && in rack_sndbuf_autoscale()
19022 sbused(&so->so_snd) < V_tcp_autosndbuf_max && in rack_sndbuf_autoscale()
19023 sendwin >= (sbused(&so->so_snd) - in rack_sndbuf_autoscale()
19024 (tp->snd_max - tp->snd_una))) { in rack_sndbuf_autoscale()
19026 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; in rack_sndbuf_autoscale()
19031 scaleup += so->so_snd.sb_hiwat; in rack_sndbuf_autoscale()
19035 so->so_snd.sb_flags &= ~SB_AUTOSIZE; in rack_sndbuf_autoscale()
19050 * the max-burst). We have how much to send and all the info we in rack_fast_output()
19080 if (rack->r_is_v6) { in rack_fast_output()
19081 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19087 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19091 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_output()
19095 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
19096 startseq = tp->snd_max; in rack_fast_output()
19097 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
19098 inp = rack->rc_inp; in rack_fast_output()
19099 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19101 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
19102 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_output()
19103 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_output()
19104 to.to_tsecr = tp->ts_recent; in rack_fast_output()
19108 /* TCP-MD5 (RFC2385). */ in rack_fast_output()
19109 if (tp->t_flags & TF_SIGNATURE) in rack_fast_output()
19114 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
19117 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
19118 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19119 else if (rack->rc_user_set_max_segs) in rack_fast_output()
19120 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
19123 if ((tp->t_flags & TF_TSO) && in rack_fast_output()
19126 (tp->t_port == 0)) in rack_fast_output()
19137 m->m_data += max_linkhdr; in rack_fast_output()
19138 m->m_len = hdrlen; in rack_fast_output()
19139 th = rack->r_ctl.fsb.th; in rack_fast_output()
19148 if_hw_tsomax = tp->t_tsomax; in rack_fast_output()
19149 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_output()
19150 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_output()
19157 max_len = (if_hw_tsomax - hdrlen - in rack_fast_output()
19179 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_output()
19182 sb_offset = tp->snd_max - tp->snd_una; in rack_fast_output()
19183 th->th_seq = htonl(tp->snd_max); in rack_fast_output()
19184 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_output()
19185 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
19186 if (th->th_win == 0) { in rack_fast_output()
19187 tp->t_sndzerowin++; in rack_fast_output()
19188 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_output()
19190 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_output()
19191 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_fast_output()
19195 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_fast_output()
19198 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
19202 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19213 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19214 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19218 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_output()
19222 if (rack->r_is_v6) in rack_fast_output()
19223 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_output()
19225 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_output()
19226 udp->uh_ulen = htons(ulen); in rack_fast_output()
19228 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_output()
19229 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_output()
19230 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_output()
19232 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_output()
19233 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_output()
19234 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_output()
19236 if (rack->r_is_v6) { in rack_fast_output()
19237 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_output()
19238 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_output()
19244 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_output()
19245 ip->ip_tos |= ect; in rack_fast_output()
19250 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_output()
19260 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_output()
19270 if (rack->r_is_v6) { in rack_fast_output()
19271 if (tp->t_port) { in rack_fast_output()
19272 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_output()
19273 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19274 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_output()
19275 th->th_sum = htons(0); in rack_fast_output()
19278 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_output()
19279 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19280 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_output()
19291 if (tp->t_port) { in rack_fast_output()
19292 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_output()
19293 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19294 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19295 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_output()
19296 th->th_sum = htons(0); in rack_fast_output()
19299 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_output()
19300 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19301 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19302 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_output()
19306 KASSERT(ip->ip_v == IPVERSION, in rack_fast_output()
19307 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_output()
19314 * via either fast-path). in rack_fast_output()
19318 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_output()
19319 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_output()
19322 if (rack->r_is_v6) { in rack_fast_output()
19323 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19324 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_output()
19325 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_output()
19326 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19328 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19336 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_output()
19337 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19338 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_output()
19339 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19340 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_output()
19341 ip->ip_off |= htons(IP_DF); in rack_fast_output()
19344 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19348 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_output()
19350 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19353 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19357 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19358 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19361 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_output()
19363 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_output()
19365 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19369 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19373 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19374 if (rack->rack_no_prr) in rack_fast_output()
19377 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19378 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19379 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19382 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19383 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19385 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19389 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_output()
19391 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19393 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19396 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19401 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_output()
19407 if (rack->r_is_v6) { in rack_fast_output()
19408 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_output()
19409 &inp->inp_route6, in rack_fast_output()
19419 &inp->inp_route, in rack_fast_output()
19424 lgb->tlb_errno = error; in rack_fast_output()
19431 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19432 rack->rc_hw_nobuf = 0; in rack_fast_output()
19433 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19434 rack->r_early = 0; in rack_fast_output()
19435 rack->r_late = 0; in rack_fast_output()
19436 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19438 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19440 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); in rack_fast_output()
19441 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19442 rack->lt_bw_up = 1; in rack_fast_output()
19444 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19452 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19453 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19455 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19456 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19457 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19460 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), in rack_fast_output()
19461 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19462 if (tp->snd_una == tp->snd_max) { in rack_fast_output()
19463 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19465 tp->t_acktime = ticks; in rack_fast_output()
19468 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19470 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19472 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_fast_output()
19473 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19474 tp->snd_max += len; in rack_fast_output()
19475 tp->snd_nxt = tp->snd_max; in rack_fast_output()
19476 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19477 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19484 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_output()
19488 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19489 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19491 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19492 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19493 rack->r_fast_output = 0; in rack_fast_output()
19494 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19496 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19498 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19500 if (tp->t_rtttime == 0) { in rack_fast_output()
19501 tp->t_rtttime = ticks; in rack_fast_output()
19502 tp->t_rtseq = startseq; in rack_fast_output()
19505 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19508 max_val -= len; in rack_fast_output()
19510 th = rack->r_ctl.fsb.th; in rack_fast_output()
19516 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_output()
19522 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19523 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_output()
19524 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_output()
19525 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); in rack_fast_output()
19533 rack->r_fast_output = 0; in rack_fast_output()
19534 return (-1); in rack_fast_output()
19544 rack->r_fast_output = 1; in rack_setup_fast_output()
19545 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19546 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19547 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19548 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19549 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19550 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19552 rack->r_fast_output = 0; in rack_setup_fast_output()
19556 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19559 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19561 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19562 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19564 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19565 (tp->snd_max - tp->snd_una))); in rack_setup_fast_output()
19566 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19567 rack->r_fast_output = 0; in rack_setup_fast_output()
19569 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19570 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19572 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19583 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19595 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19596 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_check_collapsed()
19598 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19602 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19609 if (rsm->r_flags & RACK_ACKED) { in rack_check_collapsed()
19614 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19616 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19617 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19618 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19624 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19625 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { in rack_check_collapsed()
19626 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19627 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19628 thresh, __LINE__, 6, rsm->r_flags, rsm); in rack_check_collapsed()
19632 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19633 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19634 thresh, __LINE__, 7, rsm->r_flags, rsm); in rack_check_collapsed()
19641 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19642 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19645 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19646 rack->gp_ready) { in rack_validate_sizes()
19735 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19740 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); in rack_output()
19741 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_output()
19743 if (tp->t_flags & TF_TOE) { in rack_output()
19750 if (rack->rack_deferred_inited == 0) { in rack_output()
19763 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19764 (tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
19765 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ in rack_output()
19766 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19773 if (rack->r_state) { in rack_output()
19775 isipv6 = rack->r_is_v6; in rack_output()
19777 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19783 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19784 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19792 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19793 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19795 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19800 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19817 if (rack->rc_in_persist) { in rack_output()
19818 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19827 if ((rack->rc_ack_required == 1) && in rack_output()
19828 (rack->r_timer_override == 0)){ in rack_output()
19830 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19839 if ((rack->r_timer_override) || in rack_output()
19840 (rack->rc_ack_can_sendout_data) || in rack_output()
19842 (tp->t_state < TCPS_ESTABLISHED)) { in rack_output()
19843 rack->rc_ack_can_sendout_data = 0; in rack_output()
19844 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19845 tcp_hpts_remove(rack->rc_tp); in rack_output()
19846 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19853 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19854 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); in rack_output()
19855 tp->tcp_cnt_counters[SND_BLOCKED]++; in rack_output()
19863 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19864 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19865 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19868 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19869 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19870 rack->r_late = 1; in rack_output()
19871 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19872 rack->r_ctl.rc_agg_early += early; in rack_output()
19873 rack->r_early = 1; in rack_output()
19874 } else if (rack->rc_always_pace == 0) { in rack_output()
19875 /* Non-paced we are not late */ in rack_output()
19876 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19877 rack->r_early = rack->r_late = 0; in rack_output()
19880 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19881 rack->r_wanted_output = 0; in rack_output()
19882 rack->r_timer_override = 0; in rack_output()
19883 if ((tp->t_state != rack->r_state) && in rack_output()
19884 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
19887 if ((rack->r_fast_output) && in rack_output()
19889 (tp->rcv_numsacks == 0)) { in rack_output()
19897 inp = rack->rc_inp; in rack_output()
19898 so = inp->inp_socket; in rack_output()
19899 sb = &so->so_snd; in rack_output()
19903 inp = rack->rc_inp; in rack_output()
19909 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19910 ((tp->t_state == TCPS_SYN_RECEIVED) || in rack_output()
19911 (tp->t_state == TCPS_SYN_SENT)) && in rack_output()
19912 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ in rack_output()
19913 (tp->t_rxtshift == 0)) { /* not a retransmit */ in rack_output()
19926 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); in rack_output()
19927 if (tp->t_idle_reduce) { in rack_output()
19928 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) in rack_output()
19931 tp->t_flags &= ~TF_LASTIDLE; in rack_output()
19933 if (tp->t_flags & TF_MORETOCOME) { in rack_output()
19934 tp->t_flags |= TF_LASTIDLE; in rack_output()
19938 if ((tp->snd_una == tp->snd_max) && in rack_output()
19939 rack->r_ctl.rc_went_idle_time && in rack_output()
19940 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19941 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19944 if (rack->in_probe_rtt == 0) { in rack_output()
19945 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19946 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19947 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19948 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19956 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19957 (rack->r_fsb_inited == 0) && in rack_output()
19958 (rack->r_state != TCPS_CLOSED)) in rack_output()
19959 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
19960 if (rack->rc_sendvars_notset == 1) { in rack_output()
19961 rack->rc_sendvars_notset = 0; in rack_output()
19963 * Make sure any TCP timers (keep-alive) is not running. in rack_output()
19967 if ((rack->rack_no_prr == 1) && in rack_output()
19968 (rack->rc_always_pace == 0)) { in rack_output()
19971 * no-pacing enabled and prr is turned off that in rack_output()
19979 rack->rack_no_prr = 0; in rack_output()
19981 if ((rack->pcm_enabled == 1) && in rack_output()
19982 (rack->pcm_needed == 0) && in rack_output()
19990 if (tp->t_srtt) in rack_output()
19991 rtts_idle = tot_idle / tp->t_srtt; in rack_output()
19994 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
19995 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
19996 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
19997 rack->pcm_needed = 1; in rack_output()
19998 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
20007 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
20009 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
20010 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
20012 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
20013 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20014 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
20020 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
20021 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
20025 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
20028 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
20031 if (tp->snd_wnd > ctf_outstanding(tp)) in rack_output()
20032 rw_avail = tp->snd_wnd - ctf_outstanding(tp); in rack_output()
20035 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
20036 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
20039 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
20040 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
20042 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
20044 rack->r_fast_output = 0; in rack_output()
20048 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
20051 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20052 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
20053 flags = tcp_outflags[tp->t_state]; in rack_output()
20054 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
20060 so = inp->inp_socket; in rack_output()
20061 sb = &so->so_snd; in rack_output()
20064 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
20065 rack->rc_free_cnt++; in rack_output()
20072 SOCK_SENDBUF_LOCK(inp->inp_socket); in rack_output()
20073 so = inp->inp_socket; in rack_output()
20074 sb = &so->so_snd; in rack_output()
20077 if (rack->r_ctl.rc_resend) { in rack_output()
20079 rsm = rack->r_ctl.rc_resend; in rack_output()
20080 rack->r_ctl.rc_resend = NULL; in rack_output()
20081 len = rsm->r_end - rsm->r_start; in rack_output()
20084 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20087 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20088 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20090 } else if (rack->r_collapse_point_valid && in rack_output()
20097 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
20098 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
20100 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
20101 rack->r_ctl.high_collapse_point)) in rack_output()
20102 rack->r_collapse_point_valid = 0; in rack_output()
20106 len = rsm->r_end - rsm->r_start; in rack_output()
20107 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20112 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_output()
20113 ((rsm->r_flags & RACK_MUST_RXT) == 0) && in rack_output()
20114 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { in rack_output()
20115 /* Enter recovery if not induced by a time-out */ in rack_output()
20116 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_output()
20119 if (SEQ_LT(rsm->r_start, tp->snd_una)) { in rack_output()
20121 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
20124 len = rsm->r_end - rsm->r_start; in rack_output()
20125 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20128 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20129 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20138 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
20149 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
20151 rsm->r_flags |= RACK_TLP; in rack_output()
20152 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
20154 tlen = rsm->r_end - rsm->r_start; in rack_output()
20157 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20160 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20161 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20162 cwin = min(tp->snd_wnd, tlen); in rack_output()
20165 if (rack->r_must_retran && in rack_output()
20167 (SEQ_GT(tp->snd_max, tp->snd_una)) && in rack_output()
20172 * a) This is a non-sack connection, we had a time-out in rack_output()
20186 sendwin = min(tp->snd_wnd, tp->snd_cwnd); in rack_output()
20187 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
20192 so = inp->inp_socket; in rack_output()
20193 sb = &so->so_snd; in rack_output()
20198 * outstanding/not-acked should be marked. in rack_output()
20201 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20204 rack->r_must_retran = 0; in rack_output()
20205 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20206 so = inp->inp_socket; in rack_output()
20207 sb = &so->so_snd; in rack_output()
20210 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { in rack_output()
20215 rack->r_must_retran = 0; in rack_output()
20216 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20221 len = rsm->r_end - rsm->r_start; in rack_output()
20222 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20224 if ((rack->full_size_rxt == 0) && in rack_output()
20225 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20228 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20229 rack->gp_ready) { in rack_output()
20250 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20252 if (!rack->alloc_limit_reported) { in rack_output()
20253 rack->alloc_limit_reported = 1; in rack_output()
20256 so = inp->inp_socket; in rack_output()
20257 sb = &so->so_snd; in rack_output()
20260 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { in rack_output()
20262 len--; in rack_output()
20271 if (rsm && rack->r_fsb_inited && in rack_output()
20273 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { in rack_output()
20280 so = inp->inp_socket; in rack_output()
20281 sb = &so->so_snd; in rack_output()
20287 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && in rack_output()
20288 rack->rack_enable_scwnd) { in rack_output()
20290 if (rack->gp_ready && in rack_output()
20291 (rack->rack_attempted_scwnd == 0) && in rack_output()
20292 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20293 tp->t_lib) { in rack_output()
20296 rack->rack_attempted_scwnd = 1; in rack_output()
20297 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20298 &rack->r_ctl.rc_scw_index, in rack_output()
20301 if (rack->r_ctl.rc_scw && in rack_output()
20302 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20303 sbavail(&so->so_snd)) { in rack_output()
20305 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20306 rack->rack_scwnd_is_idle = 0; in rack_output()
20308 if (rack->r_ctl.rc_scw) { in rack_output()
20310 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20311 rack->r_ctl.rc_scw_index, in rack_output()
20312 tp->snd_cwnd, tp->snd_wnd, segsiz); in rack_output()
20320 if (tp->t_flags & TF_NEEDFIN) in rack_output()
20322 if (tp->t_flags & TF_NEEDSYN) in rack_output()
20326 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20333 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_output()
20334 (tp->t_flags & TF_FASTOPEN))) { in rack_output()
20344 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) in rack_output()
20345 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20348 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20349 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20351 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20352 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20354 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20355 if (tp->snd_wnd > sb_offset) in rack_output()
20356 len = tp->snd_wnd - sb_offset; in rack_output()
20360 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20362 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20366 if ((rack->r_ctl.crte == NULL) && in rack_output()
20367 IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20368 (rack->full_size_rxt == 0) && in rack_output()
20369 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20379 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20380 rack->gp_ready) { in rack_output()
20398 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20399 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20400 if (tp->snd_wnd > outstanding) { in rack_output()
20401 len = tp->snd_wnd - outstanding; in rack_output()
20406 len = avail - sb_offset; in rack_output()
20414 len = avail - sb_offset; in rack_output()
20419 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20420 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20432 * let us send a lot as well :-) in rack_output()
20434 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20446 leftinsb = sbavail(sb) - sb_offset; in rack_output()
20453 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
20460 !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20472 * SYN-SENT state and if segment contains data and if we don't know in rack_output()
20476 SEQ_GT(tp->snd_max, tp->snd_una) && in rack_output()
20478 (tp->t_rxtshift == 0))) { in rack_output()
20483 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20484 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_output()
20492 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { in rack_output()
20499 * - When retransmitting SYN|ACK on a passively-created socket in rack_output()
20501 * - When retransmitting SYN on an actively created socket in rack_output()
20503 * - When sending a zero-length cookie (cookie request) on an in rack_output()
20506 * - When the socket is in the CLOSED state (RST is being sent) in rack_output()
20508 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20509 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || in rack_output()
20510 ((tp->t_state == TCPS_SYN_SENT) && in rack_output()
20511 (tp->t_tfo_client_cookie_len == 0)) || in rack_output()
20516 /* Without fast-open there should never be data sent on a SYN */ in rack_output()
20517 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20531 if ((tp->snd_wnd == 0) && in rack_output()
20532 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20533 (tp->snd_una == tp->snd_max) && in rack_output()
20535 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20545 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20546 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20548 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20558 if (tp->snd_max == tp->snd_una) { in rack_output()
20563 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20566 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20567 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20580 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < in rack_output()
20581 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20582 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20583 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20584 (TCPS_HAVEESTABLISHED(tp->t_state))) { in rack_output()
20594 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20595 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && in rack_output()
20597 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20598 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20618 * defeats the point of hw-pacing (i.e. to help us get in rack_output()
20633 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP in rack_output()
20647 * Pre-calculate here as we save another lookup into the darknesses in rack_output()
20666 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && in rack_output()
20667 (tp->t_port == 0) && in rack_output()
20668 ((tp->t_flags & TF_SIGNATURE) == 0) && in rack_output()
20675 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20676 if (tp->t_flags & TF_SENTFIN) { in rack_output()
20681 outstanding--; in rack_output()
20684 if ((rsm->r_flags & RACK_HAS_FIN) == 0) in rack_output()
20688 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), in rack_output()
20689 (long)TCP_MAXWIN << tp->rcv_scale); in rack_output()
20693 * conditions when len is non-zero: in rack_output()
20695 * - We have a full segment (or more with TSO) - This is the last in rack_output()
20697 * NODELAY - we've timed out (e.g. persist timer) - we have more in rack_output()
20699 * limited the window size) - we need to retransmit in rack_output()
20711 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ in rack_output()
20712 (idle || (tp->t_flags & TF_NODELAY)) && in rack_output()
20714 (tp->t_flags & TF_NOPUSH) == 0) { in rack_output()
20718 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ in rack_output()
20722 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { in rack_output()
20730 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && in rack_output()
20767 * pending (it will get piggy-backed on it) or the remote side in rack_output()
20768 * already has done a half-close and won't send more data. Skip in rack_output()
20769 * this if the connection is in T/TCP half-open state. in rack_output()
20771 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && in rack_output()
20772 !(tp->t_flags & TF_DELACK) && in rack_output()
20773 !TCPS_HAVERCVDFIN(tp->t_state)) { in rack_output()
20777 * tp->rcv_scale. in rack_output()
20783 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { in rack_output()
20784 oldwin = (tp->rcv_adv - tp->rcv_nxt); in rack_output()
20786 adv -= oldwin; in rack_output()
20799 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) in rack_output()
20803 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || in rack_output()
20804 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || in rack_output()
20805 so->so_rcv.sb_hiwat <= 8 * segsiz)) { in rack_output()
20809 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { in rack_output()
20818 * is also a catch-all for the retransmit timer timeout case. in rack_output()
20820 if (tp->t_flags & TF_ACKNOW) { in rack_output()
20824 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { in rack_output()
20833 (tp->snd_max == tp->snd_una)) { in rack_output()
20846 if ((tp->t_flags & TF_FASTOPEN) == 0 && in rack_output()
20849 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20850 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20859 * the peer wait for the delayed-ack timer to run off in rack_output()
20865 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20871 rack->r_fsb_inited && in rack_output()
20872 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20873 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
20874 (rack->r_must_retran == 0) && in rack_output()
20875 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
20878 ((orig_len - len) >= segsiz) && in rack_output()
20885 rack->r_fast_output = 0; in rack_output()
20890 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
20891 tp->snd_nxt = tp->snd_max; in rack_output()
20894 uint32_t seq = tp->gput_ack; in rack_output()
20896 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20899 * Mark the last sent that we just-returned (hinting in rack_output()
20902 rsm->r_just_ret = 1; in rack_output()
20905 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20906 rack->r_early = 0; in rack_output()
20907 rack->r_late = 0; in rack_output()
20908 rack->r_ctl.rc_agg_early = 0; in rack_output()
20910 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20911 minseg)) >= tp->snd_wnd) { in rack_output()
20914 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20915 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20917 /* We are limited by whats available -- app limited */ in rack_output()
20919 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20920 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20922 ((tp->t_flags & TF_NODELAY) == 0) && in rack_output()
20929 * don't send. Another app-limited case. in rack_output()
20932 } else if (tp->t_flags & TF_NOPUSH) { in rack_output()
20943 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20944 (rack->rack_no_prr == 0) && in rack_output()
20945 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
21000 if ((tp->t_flags & TF_GPUTINPROG) && in rack_output()
21001 SEQ_GT(tp->gput_ack, tp->snd_max)) { in rack_output()
21002 tp->gput_ack = tp->snd_max; in rack_output()
21003 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_output()
21007 tp->t_flags &= ~TF_GPUTINPROG; in rack_output()
21008 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
21009 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
21010 tp->gput_seq, in rack_output()
21016 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
21017 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_output()
21018 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
21019 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
21026 if (rack->r_ctl.rc_end_appl) in rack_output()
21027 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
21028 rack->r_ctl.rc_end_appl = rsm; in rack_output()
21030 rsm->r_flags |= RACK_APP_LIMITED; in rack_output()
21031 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
21035 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
21036 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); in rack_output()
21040 if ((tp->snd_max == tp->snd_una) && in rack_output()
21041 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21043 (sbavail(sb) > tp->snd_wnd) && in rack_output()
21044 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
21045 /* Yes lets make sure to move to persist before timer-start */ in rack_output()
21046 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
21053 rack->r_ctl.rc_scw) { in rack_output()
21054 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
21055 rack->rack_scwnd_is_idle = 1; in rack_output()
21061 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21062 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
21063 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_output()
21064 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); in rack_output()
21068 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21069 tp->tcp_cnt_counters[SND_LIMITED]++; in rack_output()
21070 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); in rack_output()
21078 if ((rack->r_ctl.crte != NULL) && in rack_output()
21080 ((rack->rc_hw_nobuf == 1) || in rack_output()
21090 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21091 rack->r_ctl.rc_agg_early = 0; in rack_output()
21092 rack->r_early = 0; in rack_output()
21093 rack->r_late = 0; in rack_output()
21111 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21112 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21113 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21122 * the peer wait for the delayed-ack timer to run off in rack_output()
21135 (rack->pcm_in_progress == 0) && in rack_output()
21136 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
21137 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
21140 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21142 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21148 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; in rack_output()
21150 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; in rack_output()
21172 * be snd_max-1 else its snd_max. in rack_output()
21176 rack_seq = tp->iss; in rack_output()
21178 (tp->t_flags & TF_SENTFIN)) in rack_output()
21179 rack_seq = tp->snd_max - 1; in rack_output()
21181 rack_seq = tp->snd_max; in rack_output()
21183 rack_seq = rsm->r_start; in rack_output()
21187 * established connection segments. Options for SYN-ACK segments in rack_output()
21191 if ((tp->t_flags & TF_NOOPT) == 0) { in rack_output()
21194 to.to_mss = tcp_mssopt(&inp->inp_inc); in rack_output()
21195 if (tp->t_port) in rack_output()
21196 to.to_mss -= V_tcp_udp_tunneling_overhead; in rack_output()
21206 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
21207 (tp->t_rxtshift == 0)) { in rack_output()
21208 if (tp->t_state == TCPS_SYN_RECEIVED) { in rack_output()
21211 (u_int8_t *)&tp->t_tfo_cookie.server; in rack_output()
21214 } else if (tp->t_state == TCPS_SYN_SENT) { in rack_output()
21216 tp->t_tfo_client_cookie_len; in rack_output()
21218 tp->t_tfo_cookie.client; in rack_output()
21233 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { in rack_output()
21234 to.to_wscale = tp->request_r_scale; in rack_output()
21238 if ((tp->t_flags & TF_RCVD_TSTMP) || in rack_output()
21239 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { in rack_output()
21242 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21243 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21251 * our ack-probe. in rack_output()
21257 to.to_tsval = ts_to_use + tp->ts_offset; in rack_output()
21258 to.to_tsecr = tp->ts_recent; in rack_output()
21261 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
21262 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21263 (tp->snd_una == tp->snd_max) && in rack_output()
21266 (rack->r_ctl.current_round != 0) && in rack_output()
21268 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21269 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21270 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21271 rack->r_rcvpath_rtt_up = 1; in rack_output()
21273 rack_seq--; in rack_output()
21277 if (tp->rfbuf_ts == 0 && in rack_output()
21278 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { in rack_output()
21279 tp->rfbuf_ts = ms_cts; in rack_output()
21282 if (tp->t_flags & TF_SACK_PERMIT) { in rack_output()
21285 else if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21286 tp->rcv_numsacks > 0) { in rack_output()
21288 to.to_nsacks = tp->rcv_numsacks; in rack_output()
21289 to.to_sacks = (u_char *)tp->sackblks; in rack_output()
21293 /* TCP-MD5 (RFC2385). */ in rack_output()
21294 if (tp->t_flags & TF_SIGNATURE) in rack_output()
21304 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && in rack_output()
21308 if (tp->t_port) { in rack_output()
21314 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21315 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
21316 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
21329 if (inp->inp_options) in rack_output()
21330 ipoptlen = inp->inp_options->m_len - in rack_output()
21343 if (len + optlen + ipoptlen > tp->t_maxseg) { in rack_output()
21350 if_hw_tsomax = tp->t_tsomax; in rack_output()
21351 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_output()
21352 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_output()
21362 max_len = (if_hw_tsomax - hdrlen - in rack_output()
21376 max_len = (tp->t_maxseg - optlen); in rack_output()
21381 len -= moff; in rack_output()
21398 if (tp->t_flags & TF_NEEDFIN) { in rack_output()
21403 if (optlen + ipoptlen >= tp->t_maxseg) { in rack_output()
21417 len = tp->t_maxseg - optlen - ipoptlen; in rack_output()
21449 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21450 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21459 * the peer wait for the delayed-ack timer to run off in rack_output()
21471 hw_tls = tp->t_nic_ktls_xmit != 0; in rack_output()
21500 m->m_data += max_linkhdr; in rack_output()
21501 m->m_len = hdrlen; in rack_output()
21510 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { in rack_output()
21520 m->m_len += len; in rack_output()
21535 m->m_next = tcp_m_copym( in rack_output()
21543 if (len <= (tp->t_maxseg - optlen)) { in rack_output()
21552 if (m->m_next == NULL) { in rack_output()
21561 if (rsm && (rsm->r_flags & RACK_TLP)) { in rack_output()
21569 tp->t_sndrexmitpack++; in rack_output()
21574 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_output()
21581 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_output()
21600 if (tp->t_flags & TF_ACKNOW) in rack_output()
21619 m->m_data += max_linkhdr; in rack_output()
21620 m->m_len = hdrlen; in rack_output()
21623 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_output()
21627 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21630 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21634 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21636 th = rack->r_ctl.fsb.th; in rack_output()
21637 udp = rack->r_ctl.fsb.udp; in rack_output()
21641 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21644 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21645 udp->uh_ulen = htons(ulen); in rack_output()
21651 if (tp->t_port) { in rack_output()
21653 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21654 udp->uh_dport = tp->t_port; in rack_output()
21655 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21656 udp->uh_ulen = htons(ulen); in rack_output()
21660 tcpip_fillheaders(inp, tp->t_port, ip6, th); in rack_output()
21666 if (tp->t_port) { in rack_output()
21668 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21669 udp->uh_dport = tp->t_port; in rack_output()
21670 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21671 udp->uh_ulen = htons(ulen); in rack_output()
21675 tcpip_fillheaders(inp, tp->t_port, ip, th); in rack_output()
21684 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { in rack_output()
21688 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_output()
21689 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_output()
21691 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
21692 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_output()
21693 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_output()
21696 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_output()
21697 ip6->ip6_flow |= htonl(ect << 20); in rack_output()
21703 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_output()
21704 ip->ip_tos |= ect; in rack_output()
21708 th->th_seq = htonl(rack_seq); in rack_output()
21709 th->th_ack = htonl(tp->rcv_nxt); in rack_output()
21719 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && in rack_output()
21723 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && in rack_output()
21724 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) in rack_output()
21725 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); in rack_output()
21734 th->th_win = htons((u_short) in rack_output()
21735 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); in rack_output()
21738 recwin = roundup2(recwin, 1 << tp->rcv_scale); in rack_output()
21739 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); in rack_output()
21742 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 in rack_output()
21749 if (th->th_win == 0) { in rack_output()
21750 tp->t_sndzerowin++; in rack_output()
21751 tp->t_flags |= TF_RXWIN0SENT; in rack_output()
21753 tp->t_flags &= ~TF_RXWIN0SENT; in rack_output()
21754 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_output()
21756 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21760 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21780 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21783 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21787 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_output()
21793 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_output()
21803 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_output()
21818 if (tp->t_port) { in rack_output()
21819 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_output()
21820 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21821 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_output()
21822 th->th_sum = htons(0); in rack_output()
21825 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_output()
21826 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21827 th->th_sum = in6_cksum_pseudo(ip6, in rack_output()
21838 if (tp->t_port) { in rack_output()
21839 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_output()
21840 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21841 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21842 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_output()
21843 th->th_sum = htons(0); in rack_output()
21846 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_output()
21847 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21848 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21849 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_output()
21853 KASSERT(ip->ip_v == IPVERSION, in rack_output()
21854 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_output()
21867 KASSERT(len > tp->t_maxseg - optlen, in rack_output()
21869 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_output()
21870 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; in rack_output()
21880 if ((rack->r_ctl.crte != NULL) && in rack_output()
21881 (rack->rc_hw_nobuf == 0) && in rack_output()
21886 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21890 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21891 if (rack->rack_no_prr) in rack_output()
21894 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21895 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21896 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21899 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21900 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21902 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21905 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_output()
21906 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21908 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_output()
21922 log.u_bbr.pkts_out = tp->t_maxseg; in rack_output()
21924 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21925 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_output()
21930 log.u_bbr.flex5 = rsm->r_fas; in rack_output()
21931 log.u_bbr.bbr_substate = rsm->r_bas; in rack_output()
21939 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_output()
21946 log.u_bbr.delRate = rsm->r_flags; in rack_output()
21948 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
21952 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
21956 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, in rack_output()
21967 * m->m_pkthdr.len should have been set before cksum calcuration, in rack_output()
21978 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
21985 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_output()
21987 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_output()
21988 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
21990 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
21992 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
21998 inp->in6p_outputopts, in rack_output()
21999 &inp->inp_route6, in rack_output()
22003 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) in rack_output()
22004 mtu = inp->inp_route6.ro_nh->nh_mtu; in rack_output()
22012 ip->ip_len = htons(m->m_pkthdr.len); in rack_output()
22014 if (inp->inp_vflag & INP_IPV6PROTO) in rack_output()
22015 ip->ip_ttl = in6_selecthlim(inp, NULL); in rack_output()
22017 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
22028 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_output()
22029 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22030 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_output()
22031 ip->ip_off |= htons(IP_DF); in rack_output()
22034 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22037 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22044 inp->inp_options, in rack_output()
22048 &inp->inp_route, in rack_output()
22051 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) in rack_output()
22052 mtu = inp->inp_route.ro_nh->nh_mtu; in rack_output()
22056 lgb->tlb_errno = error; in rack_output()
22072 rack->pcm_in_progress = 1; in rack_output()
22073 rack->pcm_needed = 0; in rack_output()
22074 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
22077 if (rack->lt_bw_up == 0) { in rack_output()
22078 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); in rack_output()
22079 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22080 rack->lt_bw_up = 1; in rack_output()
22081 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
22088 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
22089 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22091 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
22092 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
22093 rack->r_ctl.lt_timemark = tmark; in rack_output()
22097 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
22101 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
22102 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
22103 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
22104 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
22106 if (rack->rc_hw_nobuf) { in rack_output()
22107 rack->rc_hw_nobuf = 0; in rack_output()
22108 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22109 rack->r_early = 0; in rack_output()
22110 rack->r_late = 0; in rack_output()
22111 rack->r_ctl.rc_agg_early = 0; in rack_output()
22115 rack->rc_gp_saw_rec = 1; in rack_output()
22117 if (cwnd_to_use > tp->snd_ssthresh) { in rack_output()
22119 rack->rc_gp_saw_ca = 1; in rack_output()
22122 rack->rc_gp_saw_ss = 1; in rack_output()
22125 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22126 (tp->t_flags & TF_SACK_PERMIT) && in rack_output()
22127 tp->rcv_numsacks > 0) in rack_output()
22137 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_output()
22142 if ((rack->rack_no_prr == 0) && in rack_output()
22145 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
22146 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
22148 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
22156 rsm->r_flags &= ~RACK_TLP; in rack_output()
22162 (tp->snd_una == tp->snd_max)) in rack_output()
22163 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
22170 tcp_seq startseq = tp->snd_max; in rack_output()
22174 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
22185 rack->rc_tlp_in_progress = 0; in rack_output()
22186 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
22194 rack->rc_tlp_in_progress = 1; in rack_output()
22195 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22203 if ((tp->snd_una == tp->snd_max) && (len > 0)) { in rack_output()
22209 tp->t_acktime = ticks; in rack_output()
22216 ((tp->t_flags & TF_SENTSYN) == 0)) { in rack_output()
22217 tp->snd_max++; in rack_output()
22218 tp->t_flags |= TF_SENTSYN; in rack_output()
22221 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_output()
22222 tp->snd_max++; in rack_output()
22223 tp->t_flags |= TF_SENTFIN; in rack_output()
22226 tp->snd_max += len; in rack_output()
22227 if (rack->rc_new_rnd_needed) { in rack_output()
22228 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22236 if (tp->t_rtttime == 0) { in rack_output()
22237 tp->t_rtttime = ticks; in rack_output()
22238 tp->t_rtseq = startseq; in rack_output()
22242 ((tp->t_flags & TF_GPUTINPROG) == 0)) in rack_output()
22253 if (rack->r_fast_output && len) { in rack_output()
22254 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22255 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22257 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22258 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22259 rack->r_fast_output = 0; in rack_output()
22260 if (rack->r_fast_output) { in rack_output()
22261 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22262 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22263 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22270 ((pace_max_seg - len) > segsiz)) { in rack_output()
22278 n_len = (orig_len - len); in rack_output()
22279 orig_len -= len; in rack_output()
22280 pace_max_seg -= len; in rack_output()
22282 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22283 /* Re-lock for the next spin */ in rack_output()
22290 ((orig_len - len) > segsiz)) { in rack_output()
22298 n_len = (orig_len - len); in rack_output()
22299 orig_len -= len; in rack_output()
22301 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22302 /* Re-lock for the next spin */ in rack_output()
22310 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22311 rack->r_early = 0; in rack_output()
22312 rack->r_late = 0; in rack_output()
22313 rack->r_ctl.rc_agg_early = 0; in rack_output()
22328 tp->t_softerror = error; in rack_output()
22331 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22332 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22333 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22343 if (rack->r_ctl.crte != NULL) { in rack_output()
22344 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22345 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22348 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22349 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22350 if (rack->rc_enobuf < 0x7f) in rack_output()
22351 rack->rc_enobuf++; in rack_output()
22354 if (rack->r_ctl.crte != NULL) { in rack_output()
22356 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22370 tp->t_flags &= ~TF_TSO; in rack_output()
22374 saved_mtu = tp->t_maxseg; in rack_output()
22375 tcp_mss_update(tp, -1, mtu, NULL, NULL); in rack_output()
22376 if (saved_mtu > tp->t_maxseg) { in rack_output()
22384 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22385 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22386 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22397 if (TCPS_HAVERCVDSYN(tp->t_state)) { in rack_output()
22398 tp->t_softerror = error; in rack_output()
22407 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22408 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22409 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22416 rack->rc_enobuf = 0; in rack_output()
22417 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_output()
22418 rack->r_ctl.retran_during_recovery += len; in rack_output()
22427 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) in rack_output()
22428 tp->rcv_adv = tp->rcv_nxt + recwin; in rack_output()
22430 tp->last_ack_sent = tp->rcv_nxt; in rack_output()
22431 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_output()
22461 rack->r_ent_rec_ns = 0; in rack_output()
22462 if (rack->r_must_retran) { in rack_output()
22464 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22465 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22469 rack->r_must_retran = 0; in rack_output()
22470 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22472 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22477 rack->r_must_retran = 0; in rack_output()
22478 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22481 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22482 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && in rack_output()
22483 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22488 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); in rack_output()
22491 /* set the rack tcb into the slot N */ in rack_output()
22497 rack->r_fsb_inited && in rack_output()
22498 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22499 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22500 (rack->r_must_retran == 0) && in rack_output()
22501 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22504 ((orig_len - len) >= segsiz) && in rack_output()
22511 rack->r_fast_output = 0; in rack_output()
22524 (rack->r_must_retran == 0) && in rack_output()
22525 rack->r_fsb_inited && in rack_output()
22526 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22527 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22528 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22531 ((orig_len - len) >= segsiz) && in rack_output()
22537 if (rack->r_fast_output) { in rack_output()
22551 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
22552 tp->snd_nxt = tp->snd_max; in rack_output()
22555 crtsc = get_cyclecount() - ts_val; in rack_output()
22557 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22558 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
22559 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; in rack_output()
22560 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); in rack_output()
22563 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22564 tp->tcp_cnt_counters[SND_OUT_ACK]++; in rack_output()
22565 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; in rack_output()
22580 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22581 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22582 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22595 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22596 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22605 rack->r_fast_output = 0; in rack_mtu_change()
22606 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22607 rack->r_ctl.rc_sacked); in rack_mtu_change()
22608 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22609 rack->r_must_retran = 1; in rack_mtu_change()
22611 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22612 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); in rack_mtu_change()
22615 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22617 tp->snd_nxt = tp->snd_max; in rack_mtu_change()
22623 if (rack->dgp_on == 1) in rack_set_dgp()
22625 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22626 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22633 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22638 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22639 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22640 rack->dgp_on = 1; in rack_set_dgp()
22641 rack->rc_always_pace = 1; in rack_set_dgp()
22642 rack->rc_pace_dnd = 1; in rack_set_dgp()
22643 rack->use_fixed_rate = 0; in rack_set_dgp()
22644 if (rack->gp_ready) in rack_set_dgp()
22646 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22647 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22649 rack->full_size_rxt = 1; in rack_set_dgp()
22650 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22652 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22653 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22654 rack->r_use_cmp_ack) in rack_set_dgp()
22655 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22657 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22659 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22661 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22663 rack->r_rr_config = 3; in rack_set_dgp()
22665 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22667 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22668 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22669 rack->rtt_limit_mul = 0; in rack_set_dgp()
22671 rack->rack_no_prr = 1; in rack_set_dgp()
22673 rack->r_limit_scw = 1; in rack_set_dgp()
22675 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22697 * fill-cw the same settings that profile5 does in rack_set_profile()
22698 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). in rack_set_profile()
22700 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22703 if (rack->rc_always_pace == 1) { in rack_set_profile()
22707 rack->dgp_on = 0; in rack_set_profile()
22708 rack->rc_hybrid_mode = 0; in rack_set_profile()
22709 rack->use_fixed_rate = 0; in rack_set_profile()
22713 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22715 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22718 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22719 rack->rc_always_pace = 1; in rack_set_profile()
22720 if (rack->rack_hibeta) in rack_set_profile()
22723 rack->rc_always_pace = 0; in rack_set_profile()
22726 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22730 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22733 rack->r_use_cmp_ack = 1; in rack_set_profile()
22735 rack->r_use_cmp_ack = 0; in rack_set_profile()
22737 rack->rack_no_prr = 1; in rack_set_profile()
22739 rack->rack_no_prr = 0; in rack_set_profile()
22741 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22743 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22744 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22745 rack->r_mbuf_queue = 1; in rack_set_profile()
22746 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22747 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22748 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22750 rack->r_mbuf_queue = 0; in rack_set_profile()
22751 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22754 rack->rack_enable_scwnd = 1; in rack_set_profile()
22756 rack->rack_enable_scwnd = 0; in rack_set_profile()
22759 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22761 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22763 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22764 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22766 rack->r_rr_config = 0; in rack_set_profile()
22767 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22768 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22769 rack->rtt_limit_mul = 0; in rack_set_profile()
22772 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22774 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22776 rack->rack_no_prr = 1; in rack_set_profile()
22778 rack->rack_no_prr = 0; in rack_set_profile()
22780 rack->r_limit_scw = 1; in rack_set_profile()
22782 rack->r_limit_scw = 0; in rack_set_profile()
22798 * No space yikes -- fail out.. in rack_add_deferred_option()
22802 dol->optname = sopt_name; in rack_add_deferred_option()
22803 dol->optval = loptval; in rack_add_deferred_option()
22804 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22820 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22821 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22822 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22823 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22825 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); in process_hybrid_pacing()
22827 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22829 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22834 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; in process_hybrid_pacing()
22836 seq = sft->start_seq; in process_hybrid_pacing()
22837 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { in process_hybrid_pacing()
22839 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22841 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22846 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22854 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22863 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22866 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22867 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22871 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22876 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22880 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; in process_hybrid_pacing()
22881 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) in process_hybrid_pacing()
22882 sft->cspr = hybrid->cspr; in process_hybrid_pacing()
22884 sft->cspr = 0; in process_hybrid_pacing()
22885 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) in process_hybrid_pacing()
22886 sft->hint_maxseg = hybrid->hint_maxseg; in process_hybrid_pacing()
22888 sft->hint_maxseg = 0; in process_hybrid_pacing()
22889 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22901 si->bytes_transmitted = tp->t_sndbytes; in rack_stack_information()
22902 si->bytes_retransmitted = tp->t_snd_rxt_bytes; in rack_stack_information()
22933 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22935 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22938 rack->rc_rack_use_dsack = 1; in rack_process_option()
22940 rack->rc_rack_use_dsack = 0; in rack_process_option()
22947 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
22950 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
22952 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
22958 rack->rack_hibeta = 1; in rack_process_option()
22964 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
22965 if (rack->rc_pacing_cc_set) in rack_process_option()
22967 rack->r_ctl.rc_saved_beta = optval; in rack_process_option()
22969 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
22972 rack->rack_hibeta = 0; in rack_process_option()
22973 if (rack->rc_pacing_cc_set) in rack_process_option()
22982 rack->r_ctl.timer_slop = optval; in rack_process_option()
22983 if (rack->rc_tp->t_srtt) { in rack_process_option()
22988 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_option()
22990 rack->r_ctl.timer_slop); in rack_process_option()
22995 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_process_option()
23000 if (rack->rc_pacing_cc_set) { in rack_process_option()
23009 if (CC_ALGO(tp)->ctl_output != NULL) in rack_process_option()
23010 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_process_option()
23018 rack->r_ctl.rc_saved_beta_ecn = optval; in rack_process_option()
23024 if (rack->gp_ready) { in rack_process_option()
23029 rack->defer_options = 1; in rack_process_option()
23031 rack->defer_options = 0; in rack_process_option()
23036 rack->r_ctl.req_measurements = optval; in rack_process_option()
23043 rack->r_use_labc_for_rec = 1; in rack_process_option()
23045 rack->r_use_labc_for_rec = 0; in rack_process_option()
23050 rack->rc_labc = optval; in rack_process_option()
23057 rack->r_up_only = 1; in rack_process_option()
23059 rack->r_up_only = 0; in rack_process_option()
23063 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
23067 if ((rack->dgp_on == 1) && in rack_process_option()
23068 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23080 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23082 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23084 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
23091 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
23099 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
23101 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
23109 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { in rack_process_option()
23112 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
23113 rack->r_use_cmp_ack = 1; in rack_process_option()
23114 rack->r_mbuf_queue = 1; in rack_process_option()
23115 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23117 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
23118 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_process_option()
23123 rack->r_limit_scw = 1; in rack_process_option()
23125 rack->r_limit_scw = 0; in rack_process_option()
23133 rack->rc_pace_to_cwnd = 0; in rack_process_option()
23135 rack->rc_pace_to_cwnd = 1; in rack_process_option()
23140 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
23141 rack->rtt_limit_mul = optval; in rack_process_option()
23143 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
23144 rack->rtt_limit_mul = 0; in rack_process_option()
23150 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
23152 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
23159 rack->rack_enable_scwnd = 0; in rack_process_option()
23161 rack->rack_enable_scwnd = 1; in rack_process_option()
23164 /* Now do we use the LRO mbuf-queue feature */ in rack_process_option()
23166 if (optval || rack->r_use_cmp_ack) in rack_process_option()
23167 rack->r_mbuf_queue = 1; in rack_process_option()
23169 rack->r_mbuf_queue = 0; in rack_process_option()
23170 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23171 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23173 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23178 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
23180 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
23185 rack->rack_no_prr = 0; in rack_process_option()
23187 rack->rack_no_prr = 1; in rack_process_option()
23189 rack->no_prr_addback = 1; in rack_process_option()
23195 rack->cspr_is_fcc = 1; in rack_process_option()
23197 rack->cspr_is_fcc = 0; in rack_process_option()
23202 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23204 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23210 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23223 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23228 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23237 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23242 if (rack->rc_always_pace) { in rack_process_option()
23246 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23247 rack->rc_always_pace = 1; in rack_process_option()
23248 if (rack->rack_hibeta) in rack_process_option()
23256 if (rack->rc_always_pace == 1) { in rack_process_option()
23260 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23261 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23263 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23273 rack->r_ctl.init_rate = val; in rack_process_option()
23274 if (rack->rc_always_pace) in rack_process_option()
23283 rack->rc_force_max_seg = 1; in rack_process_option()
23285 rack->rc_force_max_seg = 0; in rack_process_option()
23289 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23295 if ((rack->dgp_on == 1) && in rack_process_option()
23296 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23298 * If we set a max-seg and are doing DGP then in rack_process_option()
23309 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23311 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23314 rack->rc_user_set_max_segs = optval; in rack_process_option()
23316 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23322 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23326 if (rack->dgp_on) { in rack_process_option()
23334 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23335 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23336 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23337 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23338 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23339 rack->use_fixed_rate = 1; in rack_process_option()
23340 if (rack->rack_hibeta) in rack_process_option()
23343 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23344 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23345 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23352 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23356 if (rack->dgp_on) { in rack_process_option()
23364 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23365 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23366 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23367 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23368 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23369 rack->use_fixed_rate = 1; in rack_process_option()
23370 if (rack->rack_hibeta) in rack_process_option()
23373 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23374 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23375 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23382 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23386 if (rack->dgp_on) { in rack_process_option()
23394 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23395 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23396 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23397 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23398 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23399 rack->use_fixed_rate = 1; in rack_process_option()
23400 if (rack->rack_hibeta) in rack_process_option()
23403 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23404 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23405 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23410 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23412 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23413 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23414 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23428 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23430 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23431 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23432 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23446 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23448 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23449 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23450 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23456 rack->r_rr_config = optval; in rack_process_option()
23458 rack->r_rr_config = 0; in rack_process_option()
23462 rack->rc_pace_dnd = 1; in rack_process_option()
23464 rack->rc_pace_dnd = 0; in rack_process_option()
23469 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23470 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23474 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23481 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23483 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23488 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23490 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23492 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23495 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23497 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23504 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23508 rack->rc_initial_ss_comp = 1; in rack_process_option()
23509 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23514 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23519 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23520 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23521 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23525 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23527 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23528 rack->rack_hdrw_pacing = 0; in rack_process_option()
23529 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23530 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23531 rack->r_ctl.crte = NULL; in rack_process_option()
23540 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23543 /* Minimum time between rack t-o's in ms */ in rack_process_option()
23545 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23550 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23555 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_process_option()
23557 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_process_option()
23559 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_process_option()
23561 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_process_option()
23569 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23576 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23582 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23589 rack->use_rack_rr = 1; in rack_process_option()
23591 rack->use_rack_rr = 0; in rack_process_option()
23594 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_process_option()
23596 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23601 tp->t_delayed_ack = 0; in rack_process_option()
23603 tp->t_delayed_ack = 1; in rack_process_option()
23604 if (tp->t_flags & TF_DELACK) { in rack_process_option()
23605 tp->t_flags &= ~TF_DELACK; in rack_process_option()
23606 tp->t_flags |= TF_ACKNOW; in rack_process_option()
23620 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23625 rack->r_use_hpts_min = 1; in rack_process_option()
23627 * Must be between 2 - 80% to be a reduction else in rack_process_option()
23631 rack->r_ctl.max_reduction = optval; in rack_process_option()
23634 rack->r_use_hpts_min = 0; in rack_process_option()
23639 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23641 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23646 rack->rc_skip_timely = 1; in rack_process_option()
23647 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23648 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23649 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23651 rack->rc_skip_timely = 0; in rack_process_option()
23656 rack->use_lesser_lt_bw = 0; in rack_process_option()
23657 rack->dis_lt_bw = 1; in rack_process_option()
23659 rack->use_lesser_lt_bw = 1; in rack_process_option()
23660 rack->dis_lt_bw = 0; in rack_process_option()
23662 rack->use_lesser_lt_bw = 0; in rack_process_option()
23663 rack->dis_lt_bw = 0; in rack_process_option()
23669 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23671 rack->rc_allow_data_af_clo = 0; in rack_process_option()
23686 * apply a read-lock to the parent (we are already in rack_inherit()
23697 if (par->t_fb != tp->t_fb) { in rack_inherit()
23703 dest = (struct tcp_rack *)tp->t_fb_ptr; in rack_inherit()
23704 src = (struct tcp_rack *)par->t_fb_ptr; in rack_inherit()
23710 /* Now copy out anything we wish to inherit i.e. things in socket-options */ in rack_inherit()
23712 if ((src->dgp_on) && (dest->dgp_on == 0)) { in rack_inherit()
23718 if (dest->full_size_rxt != src->full_size_rxt) { in rack_inherit()
23719 dest->full_size_rxt = src->full_size_rxt; in rack_inherit()
23722 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { in rack_inherit()
23723 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; in rack_inherit()
23727 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { in rack_inherit()
23728 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; in rack_inherit()
23731 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { in rack_inherit()
23732 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; in rack_inherit()
23736 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { in rack_inherit()
23737 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; in rack_inherit()
23741 if (src->rack_hibeta != dest->rack_hibeta) { in rack_inherit()
23743 if (src->rack_hibeta) { in rack_inherit()
23744 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; in rack_inherit()
23745 dest->rack_hibeta = 1; in rack_inherit()
23747 dest->rack_hibeta = 0; in rack_inherit()
23751 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { in rack_inherit()
23752 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; in rack_inherit()
23756 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { in rack_inherit()
23757 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; in rack_inherit()
23762 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { in rack_inherit()
23763 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; in rack_inherit()
23767 if (dest->r_up_only != src->r_up_only) { in rack_inherit()
23768 dest->r_up_only = src->r_up_only; in rack_inherit()
23772 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { in rack_inherit()
23773 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; in rack_inherit()
23777 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { in rack_inherit()
23778 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; in rack_inherit()
23783 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { in rack_inherit()
23784 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; in rack_inherit()
23788 if (dest->r_limit_scw != src->r_limit_scw) { in rack_inherit()
23789 dest->r_limit_scw = src->r_limit_scw; in rack_inherit()
23793 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { in rack_inherit()
23794 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; in rack_inherit()
23797 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { in rack_inherit()
23798 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; in rack_inherit()
23801 if (dest->rtt_limit_mul != src->rtt_limit_mul) { in rack_inherit()
23802 dest->rtt_limit_mul = src->rtt_limit_mul; in rack_inherit()
23806 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { in rack_inherit()
23807 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; in rack_inherit()
23811 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { in rack_inherit()
23812 dest->rack_enable_scwnd = src->rack_enable_scwnd; in rack_inherit()
23816 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { in rack_inherit()
23817 dest->r_use_cmp_ack = src->r_use_cmp_ack; in rack_inherit()
23821 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23822 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23826 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23827 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23830 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { in rack_inherit()
23831 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_inherit()
23833 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_inherit()
23835 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_inherit()
23836 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_inherit()
23839 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { in rack_inherit()
23840 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; in rack_inherit()
23844 if (dest->rack_no_prr != src->rack_no_prr) { in rack_inherit()
23845 dest->rack_no_prr = src->rack_no_prr; in rack_inherit()
23848 if (dest->no_prr_addback != src->no_prr_addback) { in rack_inherit()
23849 dest->no_prr_addback = src->no_prr_addback; in rack_inherit()
23853 if (dest->cspr_is_fcc != src->cspr_is_fcc) { in rack_inherit()
23854 dest->cspr_is_fcc = src->cspr_is_fcc; in rack_inherit()
23858 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { in rack_inherit()
23859 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; in rack_inherit()
23862 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23863 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23867 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { in rack_inherit()
23868 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; in rack_inherit()
23873 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { in rack_inherit()
23874 dest->r_ctl.init_rate = src->r_ctl.init_rate; in rack_inherit()
23878 if (dest->rc_force_max_seg != src->rc_force_max_seg) { in rack_inherit()
23879 dest->rc_force_max_seg = src->rc_force_max_seg; in rack_inherit()
23883 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { in rack_inherit()
23884 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; in rack_inherit()
23889 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { in rack_inherit()
23890 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; in rack_inherit()
23893 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { in rack_inherit()
23894 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; in rack_inherit()
23897 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { in rack_inherit()
23898 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; in rack_inherit()
23902 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { in rack_inherit()
23903 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; in rack_inherit()
23906 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23907 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23911 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { in rack_inherit()
23912 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; in rack_inherit()
23916 if (dest->r_rr_config != src->r_rr_config) { in rack_inherit()
23917 dest->r_rr_config = src->r_rr_config; in rack_inherit()
23921 if (dest->rc_pace_dnd != src->rc_pace_dnd) { in rack_inherit()
23922 dest->rc_pace_dnd = src->rc_pace_dnd; in rack_inherit()
23926 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { in rack_inherit()
23927 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; in rack_inherit()
23931 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { in rack_inherit()
23932 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; in rack_inherit()
23935 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { in rack_inherit()
23936 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; in rack_inherit()
23940 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { in rack_inherit()
23941 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; in rack_inherit()
23944 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { in rack_inherit()
23945 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; in rack_inherit()
23948 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { in rack_inherit()
23949 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; in rack_inherit()
23952 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { in rack_inherit()
23953 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; in rack_inherit()
23957 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { in rack_inherit()
23958 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; in rack_inherit()
23961 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { in rack_inherit()
23962 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; in rack_inherit()
23966 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { in rack_inherit()
23967 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; in rack_inherit()
23971 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { in rack_inherit()
23972 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; in rack_inherit()
23976 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { in rack_inherit()
23977 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; in rack_inherit()
23981 if (par->t_ccv.flags != tp->t_ccv.flags) { in rack_inherit()
23983 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_inherit()
23984 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_inherit()
23986 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_inherit()
23988 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_inherit()
23990 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_inherit()
23994 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { in rack_inherit()
23995 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; in rack_inherit()
23999 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { in rack_inherit()
24000 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; in rack_inherit()
24004 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { in rack_inherit()
24005 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; in rack_inherit()
24009 if (dest->use_rack_rr != src->use_rack_rr) { in rack_inherit()
24010 dest->use_rack_rr = src->use_rack_rr; in rack_inherit()
24014 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { in rack_inherit()
24015 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; in rack_inherit()
24020 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { in rack_inherit()
24021 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; in rack_inherit()
24025 if (dest->r_use_hpts_min != src->r_use_hpts_min) { in rack_inherit()
24026 dest->r_use_hpts_min = src->r_use_hpts_min; in rack_inherit()
24029 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { in rack_inherit()
24030 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; in rack_inherit()
24034 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { in rack_inherit()
24035 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; in rack_inherit()
24038 if (dest->rc_skip_timely != src->rc_skip_timely) { in rack_inherit()
24039 dest->rc_skip_timely = src->rc_skip_timely; in rack_inherit()
24043 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { in rack_inherit()
24044 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; in rack_inherit()
24048 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { in rack_inherit()
24049 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; in rack_inherit()
24052 if (dest->dis_lt_bw != src->dis_lt_bw) { in rack_inherit()
24053 dest->dis_lt_bw = src->dis_lt_bw; in rack_inherit()
24066 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
24067 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
24069 s_optval = (uint32_t)dol->optval; in rack_apply_deferred_options()
24070 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
24081 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
24083 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
24085 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
24103 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
24104 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
24106 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
24110 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
24112 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
24116 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
24150 * socket option arguments. When it re-acquires the lock after the copy, it
24166 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
24172 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
24175 switch (sopt->sopt_level) { in rack_set_sockopt()
24178 MPASS(inp->inp_vflag & INP_IPV6PROTO); in rack_set_sockopt()
24179 switch (sopt->sopt_name) { in rack_set_sockopt()
24189 switch (sopt->sopt_name) { in rack_set_sockopt()
24194 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24200 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24208 switch (sopt->sopt_name) { in rack_set_sockopt()
24209 case SO_PEERPRIO: /* SC-URL:bs */ in rack_set_sockopt()
24211 if (inp->inp_socket) { in rack_set_sockopt()
24212 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24220 switch (sopt->sopt_name) { in rack_set_sockopt()
24237 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ in rack_set_sockopt()
24238 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ in rack_set_sockopt()
24298 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_set_sockopt()
24299 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { in rack_set_sockopt()
24302 * We truncate it down to 32 bits for the socket-option trace this in rack_set_sockopt()
24306 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { in rack_set_sockopt()
24316 if (tp->t_fb != &__tcp_rack) { in rack_set_sockopt()
24320 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24321 (sopt->sopt_name != TCP_DEFER_OPTIONS) && in rack_set_sockopt()
24322 (sopt->sopt_name != TCP_HYBRID_PACING) && in rack_set_sockopt()
24323 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && in rack_set_sockopt()
24324 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && in rack_set_sockopt()
24325 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { in rack_set_sockopt()
24327 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24336 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24348 ti->tcpi_state = tp->t_state; in rack_fill_info()
24349 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) in rack_fill_info()
24350 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; in rack_fill_info()
24351 if (tp->t_flags & TF_SACK_PERMIT) in rack_fill_info()
24352 ti->tcpi_options |= TCPI_OPT_SACK; in rack_fill_info()
24353 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { in rack_fill_info()
24354 ti->tcpi_options |= TCPI_OPT_WSCALE; in rack_fill_info()
24355 ti->tcpi_snd_wscale = tp->snd_scale; in rack_fill_info()
24356 ti->tcpi_rcv_wscale = tp->rcv_scale; in rack_fill_info()
24358 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) in rack_fill_info()
24359 ti->tcpi_options |= TCPI_OPT_ECN; in rack_fill_info()
24360 if (tp->t_flags & TF_FASTOPEN) in rack_fill_info()
24361 ti->tcpi_options |= TCPI_OPT_TFO; in rack_fill_info()
24363 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; in rack_fill_info()
24365 ti->tcpi_rtt = tp->t_srtt; in rack_fill_info()
24366 ti->tcpi_rttvar = tp->t_rttvar; in rack_fill_info()
24367 ti->tcpi_rto = tp->t_rxtcur; in rack_fill_info()
24368 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; in rack_fill_info()
24369 ti->tcpi_snd_cwnd = tp->snd_cwnd; in rack_fill_info()
24371 * FreeBSD-specific extension fields for tcp_info. in rack_fill_info()
24373 ti->tcpi_rcv_space = tp->rcv_wnd; in rack_fill_info()
24374 ti->tcpi_rcv_nxt = tp->rcv_nxt; in rack_fill_info()
24375 ti->tcpi_snd_wnd = tp->snd_wnd; in rack_fill_info()
24376 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ in rack_fill_info()
24377 ti->tcpi_snd_nxt = tp->snd_nxt; in rack_fill_info()
24378 ti->tcpi_snd_mss = tp->t_maxseg; in rack_fill_info()
24379 ti->tcpi_rcv_mss = tp->t_maxseg; in rack_fill_info()
24380 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; in rack_fill_info()
24381 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; in rack_fill_info()
24382 ti->tcpi_snd_zerowin = tp->t_sndzerowin; in rack_fill_info()
24383 ti->tcpi_total_tlp = tp->t_sndtlppack; in rack_fill_info()
24384 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; in rack_fill_info()
24385 ti->tcpi_rttmin = tp->t_rttlow; in rack_fill_info()
24387 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); in rack_fill_info()
24390 if (tp->t_flags & TF_TOE) { in rack_fill_info()
24391 ti->tcpi_options |= TCPI_OPT_TOE; in rack_fill_info()
24412 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24417 switch (sopt->sopt_name) { in rack_get_sockopt()
24432 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24434 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24435 optval = rack->r_ctl.rc_saved_beta; in rack_get_sockopt()
24442 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24443 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; in rack_get_sockopt()
24456 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24458 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24459 optval = rack->r_ctl.rc_saved_beta_ecn; in rack_get_sockopt()
24466 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24467 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; in rack_get_sockopt()
24474 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24477 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24483 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_get_sockopt()
24485 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) in rack_get_sockopt()
24487 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) in rack_get_sockopt()
24498 optval = rack->rack_hibeta; in rack_get_sockopt()
24501 optval = rack->defer_options; in rack_get_sockopt()
24504 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24507 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24510 optval = rack->rc_labc; in rack_get_sockopt()
24513 optval= rack->r_up_only; in rack_get_sockopt()
24516 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24519 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24526 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24533 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24536 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24539 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24542 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24545 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24548 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24550 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24556 if (rack->dis_lt_bw) { in rack_get_sockopt()
24559 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24571 /* Now do we use the LRO mbuf-queue feature */ in rack_get_sockopt()
24572 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24575 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24578 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24585 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24588 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24595 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24598 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24602 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24606 optval = rack->rc_always_pace; in rack_get_sockopt()
24610 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24613 /* Minimum time between rack t-o's in ms */ in rack_get_sockopt()
24614 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24617 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24621 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24625 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24628 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24631 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24633 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24634 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24641 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24645 optval = rack->use_rack_rr; in rack_get_sockopt()
24648 optval = rack->r_rr_config; in rack_get_sockopt()
24651 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24654 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24658 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24661 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_get_sockopt()
24662 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24665 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24668 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24671 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24674 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24677 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24680 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24682 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24685 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24688 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24691 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24694 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24697 optval = tp->t_delayed_ack; in rack_get_sockopt()
24700 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24703 optval = rack->r_limit_scw; in rack_get_sockopt()
24706 if (rack->r_use_hpts_min) in rack_get_sockopt()
24707 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24712 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24715 optval = rack->rc_skip_timely; in rack_get_sockopt()
24718 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()
24726 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_get_sockopt()
24727 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) in rack_get_sockopt()
24738 if (sopt->sopt_dir == SOPT_SET) { in rack_ctloutput()
24740 } else if (sopt->sopt_dir == SOPT_GET) { in rack_ctloutput()
24743 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); in rack_ctloutput()
24814 printf("Failed to register rack module -- err:%d\n", err); in tcp_addrack()