Lines Matching refs:rack

442 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int lin…
453 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
455 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
456 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
462 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack);
471 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_overr…
477 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
484 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
488 rack_find_high_nonack(struct tcp_rack *rack,
490 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
491 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
495 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
498 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, u…
501 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
515 static uint64_t rack_get_gp_est(struct tcp_rack *rack);
519 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
521 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
525 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
531 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
533 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
535 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
538 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
541 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
581 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64…
583 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
585 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
590 rack_set_profile(struct tcp_rack *rack, int prof);
592 rack_apply_deferred_options(struct tcp_rack *rack);
597 rack_get_lt_bw(struct tcp_rack *rack) in rack_get_lt_bw() argument
602 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
603 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
604 if (rack->lt_bw_up) { in rack_get_lt_bw()
607 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
608 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
617 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) in rack_swap_beta_values() argument
625 tp = rack->rc_tp; in rack_swap_beta_values()
630 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
664 opt.val = rack->r_ctl.rc_saved_beta.beta; in rack_swap_beta_values()
671 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_swap_beta_values()
678 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); in rack_swap_beta_values()
680 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
691 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; in rack_swap_beta_values()
692 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_swap_beta_values()
694 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
696 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
698 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
699 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
707 rack_set_cc_pacing(struct tcp_rack *rack) in rack_set_cc_pacing() argument
709 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
715 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
716 rack_swap_beta_values(rack, 3); in rack_set_cc_pacing()
720 rack_undo_cc_pacing(struct tcp_rack *rack) in rack_undo_cc_pacing() argument
722 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
728 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
729 rack_swap_beta_values(rack, 4); in rack_undo_cc_pacing()
733 rack_remove_pacing(struct tcp_rack *rack) in rack_remove_pacing() argument
735 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
736 rack_undo_cc_pacing(rack); in rack_remove_pacing()
737 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
739 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
741 rack->rc_always_pace = 0; in rack_remove_pacing()
742 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
743 rack->dgp_on = 0; in rack_remove_pacing()
744 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
745 rack->use_fixed_rate = 0; in rack_remove_pacing()
749 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, in rack_log_gpset() argument
752 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
758 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
760 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
762 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
765 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
766 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
768 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
769 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
770 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
771 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
778 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
779 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
780 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
1977 rc_init_window(struct tcp_rack *rack) in rc_init_window() argument
1979 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
1984 rack_get_fixed_pacing_bw(struct tcp_rack *rack) in rack_get_fixed_pacing_bw() argument
1986 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
1987 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
1988 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
1989 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
1991 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
1995 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, in rack_log_hybrid_bw() argument
2013 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2021 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2023 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
2038 log.u_bbr.delRate = rack_get_gp_est(rack); in rack_log_hybrid_bw()
2039 lt_bw = rack_get_lt_bw(rack); in rack_log_hybrid_bw()
2048 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
2050 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
2051 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
2054 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
2081 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
2085 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
2086 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
2099 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
2101 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
2103 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
2105 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
2107 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
2108 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
2109 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
2119 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) in rack_log_hybrid_sends() argument
2121 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
2138 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2139 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2150 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2174 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2175 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2183 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2185 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2187 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2189 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2192 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2193 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2194 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2202 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) in rack_compensate_for_linerate() argument
2207 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2208 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2219 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2227 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) in rack_rate_cap_bw() argument
2234 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2237 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2238 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2246 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2251 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2253 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2257 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2260 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2262 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2272 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2273 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2276 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2278 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2287 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2288 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2295 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2297 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2303 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2305 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2313 calcbw = rack_compensate_for_linerate(rack, calcbw); in rack_rate_cap_bw()
2315 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2316 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2318 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2322 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2323 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2324 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2325 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2327 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2330 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2339 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2341 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2342 rack->rc_catch_up && in rack_rate_cap_bw()
2343 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2344 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2346 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2350 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2351 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2352rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2353 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2357 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2358 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2365 rack_get_gp_est(struct tcp_rack *rack) in rack_get_gp_est() argument
2369 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2383 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2386 lt_bw = rack_get_lt_bw(rack); in rack_get_gp_est()
2395 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2396 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2399 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2407 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2408 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2415 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2417 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2420 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2422 if (rack->dis_lt_bw) { in rack_get_gp_est()
2427 lt_bw = rack_get_lt_bw(rack); in rack_get_gp_est()
2430 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2432 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2454 ret_bw = rack_compensate_for_linerate(rack, ret_bw); in rack_get_gp_est()
2460 rack_get_bw(struct tcp_rack *rack) in rack_get_bw() argument
2464 if (rack->use_fixed_rate) { in rack_get_bw()
2466 return (rack_get_fixed_pacing_bw(rack)); in rack_get_bw()
2468 bw = rack_get_gp_est(rack); in rack_get_bw()
2473 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) in rack_get_output_gain() argument
2475 if (rack->use_fixed_rate) { in rack_get_output_gain()
2477 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2478 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2479 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2480 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2483 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2484 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2487 } else if (rack->rack_no_prr && in rack_get_output_gain()
2488 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2497 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2502 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2503 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2505 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2509 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t f… in rack_log_dsack_event() argument
2520 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2525 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2527 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2529 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2530 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2531 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2535 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2538 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2539 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2540 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2541 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2542 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2549 rack_log_hdwr_pacing(struct tcp_rack *rack, in rack_log_hdwr_pacing() argument
2553 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2562 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2563 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2564 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2565 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2566 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2579 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2580 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2582 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2583 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2584 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2585 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2586 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2589 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2590 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2591 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2592 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2593 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2594 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2601 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) in rack_get_output_bw() argument
2609 gain = (uint64_t)rack_get_output_gain(rack, rsm); in rack_get_output_bw()
2615 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2617 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2619 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2624 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2627 rack_log_hdwr_pacing(rack, in rack_get_output_bw()
2634 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2635 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2636 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2637 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2638 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2646 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2662 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t t… in rack_log_retran_reason() argument
2664 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2688 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2690 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2691 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2692 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2693 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2694 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2695 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2696 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2697 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2698 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2705 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) in rack_log_to_start() argument
2707 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2712 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2714 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2716 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2717 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2718 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2720 if (rack->rack_no_prr) in rack_log_to_start()
2723 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2724 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2726 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2727 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2728 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2729 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2730 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2731 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2732 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2734 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2735 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2737 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2738 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2739 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2740 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2741 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2748 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) in rack_log_to_event() argument
2750 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2755 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2757 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2758 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2763 if (rack->rack_no_prr) in rack_log_to_event()
2766 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2768 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2769 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2770 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2771 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2772 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2774 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2775 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2776 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2777 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2784 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, in rack_log_map_chg() argument
2790 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2796 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2819 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2820 if (rack->rack_no_prr) in rack_log_map_chg()
2823 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2824 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2826 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2827 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2828 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2829 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2836 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, in rack_log_rtt_upd() argument
2843 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2846 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2847 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2848 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2849 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2851 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2852 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2854 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2855 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2856 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2865 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2871 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2873 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2875 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2877 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2879 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2881 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2883 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2885 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2886 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2887 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2888 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2889 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2890 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2891 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2896 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2897 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2906 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) in rack_log_rtt_sample() argument
2913 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2920 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2923 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2924 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2925 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2926 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2933 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2935 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2939 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2942 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2943 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2945 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2946 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2947 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2948 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2955 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time… in rack_log_rtt_sample_calc() argument
2957 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2969 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2971 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2972 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2973 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2974 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
2982 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) in rack_log_rtt_sendmap() argument
2984 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
2996 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
2998 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
2999 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
3000 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
3001 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
3009 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int lin… in rack_log_progress_event() argument
3011 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
3016 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
3023 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
3024 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
3025 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
3026 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
3027 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
3029 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
3031 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
3032 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
3039 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timev… in rack_log_type_bbrsnd() argument
3041 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
3045 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
3047 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
3050 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
3051 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
3053 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
3054 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
3056 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
3057 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
3058 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
3059 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
3060 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
3061 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
3062 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
3069 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_… in rack_log_doseg_done() argument
3071 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
3079 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
3080 if (rack->rack_no_prr) in rack_log_doseg_done()
3083 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
3085 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
3086 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
3088 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
3090 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
3091 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
3092 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
3094 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
3095 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
3097 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
3098 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
3099 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
3100 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
3101 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
3103 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
3104 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
3105 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
3106 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
3107 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
3108 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
3109 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
3110 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
3117 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, u… in rack_log_type_pacing_sizes() argument
3119 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
3124 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
3125 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
3128 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
3132 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
3133 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3134 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3135 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3136 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3144 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, in rack_log_type_just_return() argument
3147 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3152 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3154 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3156 if (rack->rack_no_prr) in rack_log_type_just_return()
3159 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3161 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3164 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3165 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3166 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3167 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3168 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3169 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3171 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3172 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3173 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3174 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3181 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, in rack_log_to_cancel() argument
3184 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3190 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3193 if (rack->rack_no_prr) in rack_log_to_cancel()
3196 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3197 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3200 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3203 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3204 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3205 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3206 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3208 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3209 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3210 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3211 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3218 rack_log_alt_to_to_cancel(struct tcp_rack *rack, in rack_log_alt_to_to_cancel() argument
3224 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3242 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3243 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3244 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3251 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) in rack_log_to_processing() argument
3253 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3260 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3261 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3263 if (rack->rack_no_prr) in rack_log_to_processing()
3266 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3267 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3268 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3269 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3271 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3272 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3273 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3274 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3281 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) in rack_log_to_prr() argument
3283 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3288 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3289 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3290 if (rack->rack_no_prr) in rack_log_to_prr()
3293 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3294 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3295 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3296 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3301 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3302 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3304 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3305 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3306 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3307 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3376 rack_alloc(struct tcp_rack *rack) in rack_alloc() argument
3385 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3386 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3387 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3389 rack->rc_free_cnt--; in rack_alloc()
3399 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3407 if (rack->rc_free_cnt) { in rack_alloc()
3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3411 rack->rc_free_cnt--; in rack_alloc()
3418 rack_alloc_full_limit(struct tcp_rack *rack) in rack_alloc_full_limit() argument
3421 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3423 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3424 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3429 return (rack_alloc(rack)); in rack_alloc_full_limit()
3434 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) in rack_alloc_limit() argument
3440 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3441 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3443 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3444 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3452 rsm = rack_alloc(rack); in rack_alloc_limit()
3455 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3461 rack_free_trim(struct tcp_rack *rack) in rack_free_trim() argument
3469 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3470 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3471 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3472 rack->rc_free_cnt--; in rack_free_trim()
3473 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3479 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) in rack_free() argument
3482 if (rack->r_ctl.rc_app_limited_cnt > 0) { in rack_free()
3483 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3488 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3490 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3491 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); in rack_free()
3492 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3493 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3494 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3496 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3498 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3499 rack->r_ctl.rc_resend = NULL; in rack_free()
3500 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3501 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3502 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3503 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3504 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3505 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3508 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3509 rack_free_trim(rack); in rack_free()
3511 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3512 rack->rc_free_cnt++; in rack_free()
3516 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) in rack_get_measure_window() argument
3521 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3523 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3559 bw = rack_get_bw(rack); in rack_get_measure_window()
3595 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quali… in rack_enough_for_measurement() argument
3631 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3633 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3637 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3638 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3647 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3648 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3649 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3664 rack_log_timely(struct tcp_rack *rack, in rack_log_timely() argument
3668 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3674 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3676 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3678 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3680 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3681 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3682 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3683 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3684 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3685 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3690 log.u_bbr.rttProp = rack_get_bw(rack); in rack_log_timely()
3692 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3694 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3695 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3696 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3697 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3699 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3701 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3703 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3704 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3705 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3706 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3707 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3714 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) in rack_bw_can_be_raised() argument
3745 rack_log_timely(rack, mult, cur_bw, 0, 0, in rack_bw_can_be_raised()
3753 rack_log_timely(rack, mult, cur_bw, 0, 0, in rack_bw_can_be_raised()
3768 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, in rack_bw_can_be_raised()
3780 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, in rack_bw_can_be_raised()
3787 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) in rack_validate_multipliers_at_or_above100() argument
3794 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3796 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3798 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3799 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3801 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3802 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3807 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) in rack_validate_multipliers_at_or_below_100() argument
3809 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3810 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3812 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3813 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3818 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est,… in rack_increase_bw_mul() argument
3824 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3836 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3837 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3844 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3849 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3851 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3852 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3853 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, in rack_increase_bw_mul()
3854 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3856 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3860 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3861 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3862 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3863 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3864 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3866 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3867 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3868 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, in rack_increase_bw_mul()
3869 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3871 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3875 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3876 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3877 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3878 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3879 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3881 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3882 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, in rack_increase_bw_mul()
3883 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3885 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3888 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3889 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3890 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3891 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3892 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3896 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3898 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3899 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3901 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3903 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3904 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3906 rack_log_timely(rack, logged, plus, 0, 0, in rack_increase_bw_mul()
3911 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) in rack_get_decrease() argument
3943 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3954 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) in rack_decrease_highrtt() argument
3967 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3973 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3979 rack_log_timely(rack, in rack_decrease_highrtt()
3991 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) in rack_decrease_bw_mul() argument
3996 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
3998 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
4000 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
4001 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
4010 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
4013 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
4014 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4020 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4021 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
4022 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
4023 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
4025 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4028 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
4029 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4032 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
4035 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
4036 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4042 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4043 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
4044 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
4045 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
4048 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4057 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4058 rack_log_timely(rack, timely_says, in rack_decrease_bw_mul()
4062 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
4063 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4065 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
4068 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
4069 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4075 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4076 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
4077 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
4078 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
4080 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4090 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4091 rack_log_timely(rack, timely_says, in rack_decrease_bw_mul()
4095 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
4096 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4099 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
4100 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
4102 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
4103 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
4108 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, in rack_decrease_bw_mul()
4113 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, in rack_log_rtt_shrinks() argument
4116 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
4122 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
4123 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4124 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
4126 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
4128 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
4130 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
4132 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
4134 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
4135 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
4136 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
4137 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
4140 log.u_bbr.delRate = rack_get_bw(rack); in rack_log_rtt_shrinks()
4141 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
4143 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
4144 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
4145 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
4146 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4147 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4148 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4149 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4150 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4151 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4154 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4155 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4156 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4157 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4159 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4164 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) in rack_set_prtt_target() argument
4168 bwdp = rack_get_bw(rack); in rack_set_prtt_target()
4171 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4172 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4178 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4183 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) in rack_enter_probertt() argument
4199 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4200 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4203 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4207 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4208 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4216 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4217 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4220 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4221 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4222 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4223 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4224 rack->in_probe_rtt = 1; in rack_enter_probertt()
4225 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4226 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4227 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4229 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4231 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4232 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4237 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) in rack_exit_probertt() argument
4242 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4243 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4244 rack->in_probe_rtt = 0; in rack_exit_probertt()
4245 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4246 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4254 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4255 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4257 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4264 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4273 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4275 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4276 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4283 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4284 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4285 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4288 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4300 rack->rc_gp_incr = 0; in rack_exit_probertt()
4301 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4302 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4303 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4306 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4307 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4308 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4310 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4311 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4312 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4318 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4321 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4322 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4334 rack_set_prtt_target(rack, segsiz, in rack_exit_probertt()
4335 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4338 rack_set_prtt_target(rack, segsiz, in rack_exit_probertt()
4339 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4342 rack_set_prtt_target(rack, segsiz, in rack_exit_probertt()
4343 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4348 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4350 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4358 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4365 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4366 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4367 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4374 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4376 rack_set_prtt_target(rack, segsiz, in rack_exit_probertt()
4381 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4384 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4386 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4387 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4388 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4390 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4393 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4395 rack_log_rtt_shrinks(rack, us_cts, in rack_exit_probertt()
4396 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4399 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4400 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4401 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4402 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4406 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) in rack_check_probe_rtt() argument
4410 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4414 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4418 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4419 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4423 rack_exit_probertt(rack, us_cts); in rack_check_probe_rtt()
4425 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4426 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4430 rack_log_rtt_shrinks(rack, us_cts, in rack_check_probe_rtt()
4431 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4433 rack_exit_probertt(rack, us_cts); in rack_check_probe_rtt()
4436 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4437 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4438 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4440 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4441 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4446 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4447 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4450 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4455 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4457 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4459 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4460 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4465 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4467 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4468 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4469 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4473 rack_log_rtt_shrinks(rack, us_cts, in rack_check_probe_rtt()
4474 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4476 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4477 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4478 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4480 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4485 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4492 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4495 rack_exit_probertt(rack, us_cts); in rack_check_probe_rtt()
4498 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4499 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4500 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4502 rack_enter_probertt(rack, us_cts); in rack_check_probe_rtt()
4507 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, in rack_update_multiplier() argument
4513 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4514 (rack->use_fixed_rate) || in rack_update_multiplier()
4515 (rack->in_probe_rtt) || in rack_update_multiplier()
4516 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4520 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4521 cur_bw = rack_get_bw(rack); in rack_update_multiplier()
4523 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4525 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4527 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4529 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4530 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4541 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, in rack_update_multiplier()
4543 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4544 rack_validate_multipliers_at_or_below_100(rack); in rack_update_multiplier()
4545 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); in rack_update_multiplier()
4560 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, in rack_update_multiplier()
4562 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4563 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4565 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4566 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4568 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4574 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4575 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4583 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); in rack_update_multiplier()
4586 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, in rack_update_multiplier()
4589 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4591 rack->rc_gp_incr = 0; in rack_update_multiplier()
4592 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4610 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, in rack_update_multiplier()
4612 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4613 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4614 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4615 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4622 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4623 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4624 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4631 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4632 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4634 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4635 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); in rack_update_multiplier()
4638 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, in rack_update_multiplier()
4647 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, in rack_update_multiplier()
4651 rack->rc_gp_incr = 0; in rack_update_multiplier()
4652 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4653 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4657 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); in rack_update_multiplier()
4658 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4660 rack->rc_gp_incr = 0; in rack_update_multiplier()
4661 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4663 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); in rack_update_multiplier()
4665 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4666 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4667 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); in rack_update_multiplier()
4673 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) in rack_make_timely_judgement() argument
4681 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4685 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4688 rack_log_timely(rack, timely_says, log_mult, in rack_make_timely_judgement()
4689 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4691 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4692 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4695 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4696 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4701 rack_log_timely(rack, timely_says, log_mult , in rack_make_timely_judgement()
4702 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4724 rack_log_timely(rack, timely_says, log_mult, in rack_make_timely_judgement()
4725 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4729 rack_log_timely(rack, timely_says, log_mult, in rack_make_timely_judgement()
4730 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4791 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) in rack_clear_gp_marks() argument
4796 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4798 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4803 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4809 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) in rack_tend_gp_marks() argument
4824 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4829 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4837 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4850 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4855 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4860 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64… in rack_log_gp_calc() argument
4862 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4874 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4877 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4878 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4879 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4881 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4886 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, in rack_do_goodput_measurement() argument
4895 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4896 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4901 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4902 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4916 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); in rack_do_goodput_measurement()
4917 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4925 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, in rack_do_goodput_measurement()
4929 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4933 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, in rack_do_goodput_measurement()
4963 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4964 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4965 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4970 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, in rack_do_goodput_measurement()
4985 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4998 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5000 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
5001 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, in rack_do_goodput_measurement()
5002 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5010 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
5011 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5013 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
5015 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
5022 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
5023 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
5026 timely_says = rack_make_timely_judgement(rack, in rack_do_goodput_measurement()
5027 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5028 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
5029 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
5033 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
5043 rack_log_pacing_delay_calc(rack, bytes, reqbytes, in rack_do_goodput_measurement()
5044 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
5046 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
5049 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5052 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
5053 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
5054 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
5055 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5057 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, in rack_do_goodput_measurement()
5058 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5061 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
5062 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
5071 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
5072 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
5073 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
5076 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
5078 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
5079 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5080 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5081 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
5083 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5085 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5102 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
5103 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5110 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
5111 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
5131 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
5132 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5134 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5153 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5157 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); in rack_do_goodput_measurement()
5158 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5159 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5171 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5182 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5186 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5187 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5194 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); in rack_do_goodput_measurement()
5195 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5196 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5199 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5207 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5208 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5212 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5218 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5219 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5221 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5226 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5227 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5234 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5235 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5236 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5243 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5244 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5248 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5254 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5256 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5258 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5263 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5264 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5265 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5267 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5271 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5272 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5274 rack->gp_ready = 1; in rack_do_goodput_measurement()
5275 if (rack->dgp_on || in rack_do_goodput_measurement()
5276 rack->rack_hibeta) in rack_do_goodput_measurement()
5277 rack_set_cc_pacing(rack); in rack_do_goodput_measurement()
5278 if (rack->defer_options) in rack_do_goodput_measurement()
5279 rack_apply_deferred_options(rack); in rack_do_goodput_measurement()
5281 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, in rack_do_goodput_measurement()
5282 rack_get_bw(rack), 22, did_add, NULL, quality); in rack_do_goodput_measurement()
5285 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5286 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5287 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5288 rack_update_multiplier(rack, timely_says, bytes_ps, in rack_do_goodput_measurement()
5289 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5290 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5293 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, in rack_do_goodput_measurement()
5294 rack_get_bw(rack), 3, line, NULL, quality); in rack_do_goodput_measurement()
5295 rack_log_pacing_delay_calc(rack, in rack_do_goodput_measurement()
5299 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5300 rack_get_lt_bw(rack), /* rttProp */ in rack_do_goodput_measurement()
5303 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5305 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5311 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5312 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5313 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5314 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5315 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5346 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5348 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5349 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5350 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5351 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_do_goodput_measurement()
5357 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5358 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5359 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5360 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5362 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5363 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5364 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5365 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5366 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5367 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5369 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5372 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5378 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5399 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5400 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5417 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5421 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5424 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5426 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5436 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5438 rack_tend_gp_marks(tp, rack); in rack_do_goodput_measurement()
5439 rack_log_pacing_delay_calc(rack, in rack_do_goodput_measurement()
5444 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5447 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5453 rack_clear_gp_marks(tp, rack); in rack_do_goodput_measurement()
5461 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, in rack_ack_received() argument
5471 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5474 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5481 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5483 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5492 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5493 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5494 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5495 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5496 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5498 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5499 rack->lt_bw_up = 0; in rack_ack_received()
5503 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { in rack_ack_received()
5505 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); in rack_ack_received()
5516 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5517 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5525 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5526 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5527 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5528 labc_to_use = rack->rc_labc; in rack_ack_received()
5531 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5558 if (rack->r_must_retran) { in rack_ack_received()
5559 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5564 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5565 rack->r_must_retran = 0; in rack_ack_received()
5573 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5574 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5576 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5581 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5583 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5584 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5586 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5592 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5599 struct tcp_rack *rack; in tcp_rack_partialack() local
5601 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5610 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5611 rack->rack_no_prr) in tcp_rack_partialack()
5612 rack->r_wanted_output = 1; in tcp_rack_partialack()
5616 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) in rack_exit_recovery() argument
5627 struct tcp_rack *rack; in rack_post_recovery() local
5632 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5646 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5659 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5664 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5665 (rack->no_prr_addback == 0) && in rack_post_recovery()
5666 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5682 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5684 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5685 rack_log_to_prr(rack, 1, 0, __LINE__); in rack_post_recovery()
5687 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); in rack_post_recovery()
5689 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5690 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5691 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5692 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5694 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); in rack_post_recovery()
5696 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5697 rack->rto_from_rec = 0; in rack_post_recovery()
5698 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5699 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5701 rack_exit_recovery(tp, rack, 1); in rack_post_recovery()
5707 struct tcp_rack *rack; in rack_cong_signal() local
5720 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5727 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5729 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5731 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5732 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5733 rack->r_fast_output = 0; in rack_cong_signal()
5734 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5735 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5736 rack_log_to_prr(rack, 2, in_rec_at_entry, line); in rack_cong_signal()
5738 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5753 rack->r_fast_output = 0; in rack_cong_signal()
5762 rack->r_fast_output = 0; in rack_cong_signal()
5764 rack_exit_recovery(tp, rack, 2); in rack_cong_signal()
5766 rack_log_to_prr(rack, 16, orig_cwnd, line); in rack_cong_signal()
5770 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5801 rack_log_to_prr(rack, 15, cwnd_enter, line); in rack_cong_signal()
5802 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5803 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5804 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5805 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5806 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5811 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) in rack_cc_after_idle() argument
5823 i_cwnd = rc_init_window(rack); in rack_cc_after_idle()
5853 rack_find_lowest_rsm(struct tcp_rack *rack) in rack_find_lowest_rsm() argument
5862 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5873 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) in rack_find_high_nonack() argument
5884 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5894 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) in rack_calc_thresh_rack() argument
5917 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5918 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5919 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5920 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5932 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5934 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5944 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5945 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5950 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5952 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5953 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5957 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5959 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5964 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
5966 rack_log_dsack_event(rack, 4, line, srtt, thresh); in rack_calc_thresh_rack()
5977 rack_log_dsack_event(rack, 6, line, srtt, thresh); in rack_calc_thresh_rack()
5982 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, in rack_calc_thresh_tlp() argument
5991 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
5992 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
5997 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
5999 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
6001 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
6010 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
6038 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
6066 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) in rack_grab_rtt() argument
6076 if (rack->rc_rack_rtt) in rack_grab_rtt()
6077 return (rack->rc_rack_rtt); in rack_grab_rtt()
6091 struct tcp_rack *rack; in rack_check_recovery_mode() local
6096 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
6097 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
6100 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
6106 rsm = rack_find_lowest_rsm(rack); in rack_check_recovery_mode()
6111 srtt = rack_grab_rtt(tp, rack); in rack_check_recovery_mode()
6112 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); in rack_check_recovery_mode()
6125 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) in rack_get_persists_timer_val() argument
6133 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
6134 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6140 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) in rack_timer_start() argument
6154 if (rack->t_timers_stopped) { in rack_timer_start()
6158 if (rack->rc_in_persist) { in rack_timer_start()
6160 return (rack_get_persists_timer_val(tp, rack)); in rack_timer_start()
6162 rack->rc_on_min_to = 0; in rack_timer_start()
6167 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6172 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6185 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6186 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6194 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6199 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6229 rsm = rack_find_lowest_rsm(rack); in rack_timer_start()
6248 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6250 (rack->rack_no_prr == 0) && in rack_timer_start()
6251 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6262 srtt = rack_grab_rtt(tp, rack); in rack_timer_start()
6263 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); in rack_timer_start()
6268 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6269 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6270 if (rack->r_rr_config == 3) in rack_timer_start()
6271 rack->rc_on_min_to = 1; in rack_timer_start()
6274 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6275 if (rack->r_rr_config == 3) in rack_timer_start()
6276 rack->rc_on_min_to = 1; in rack_timer_start()
6281 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6282 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6289 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6301 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6304 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6309 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6329 (srtt < rack_grab_rtt(tp, rack))) { in rack_timer_start()
6330 srtt = rack_grab_rtt(tp, rack); in rack_timer_start()
6332 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); in rack_timer_start()
6336 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6337 rack_log_alt_to_to_cancel(rack, in rack_timer_start()
6341 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6358 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6360 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6368 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) in rack_enter_persist() argument
6370 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6376 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6380 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6381 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6382 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6385 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6386 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6387 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6388 if (rack->lt_bw_up) { in rack_enter_persist()
6392 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6393 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6394 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6395 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6396 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6398 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6399 rack->lt_bw_up = 0; in rack_enter_persist()
6400 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6402 rack_timer_cancel(tp, rack, cts, __LINE__); in rack_enter_persist()
6403 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6404 rack->probe_not_answered = 0; in rack_enter_persist()
6405 rack->forced_ack = 0; in rack_enter_persist()
6408 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6409 rack->rc_in_persist = 1; in rack_enter_persist()
6414 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_exit_persist() argument
6416 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6417 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6418 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6421 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6422 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6423 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6426 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6427 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6428 (rack->rc_always_pace)) { in rack_exit_persist()
6435 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6439 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6449 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6450 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6451 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6452 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6453 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6455 rack_exit_probertt(rack, us_cts); in rack_exit_persist()
6459 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6461 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6462 rack->lt_bw_up = 1; in rack_exit_persist()
6463 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6465 rack->rc_in_persist = 0; in rack_exit_persist()
6466 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6469 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6470 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6471 rack->r_early = 0; in rack_exit_persist()
6472 rack->r_late = 0; in rack_exit_persist()
6473 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6477 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, in rack_log_hpts_diag() argument
6480 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6506 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6507 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6508 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6516 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) in rack_log_wakeup() argument
6518 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6528 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6529 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6530 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6537 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, in rack_start_hpts_timer() argument
6558 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6559 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6560 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6562 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6563 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6566 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); in rack_start_hpts_timer()
6567 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6577 slot += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6578 rack->r_early = 0; in rack_start_hpts_timer()
6579 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6581 if ((rack->r_late) && in rack_start_hpts_timer()
6582 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6589 if (rack->r_ctl.rc_agg_delayed >= slot) { in rack_start_hpts_timer()
6598 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); in rack_start_hpts_timer()
6602 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); in rack_start_hpts_timer()
6606 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6607 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6610 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; in rack_start_hpts_timer()
6613 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6614 rack->r_late = 0; in rack_start_hpts_timer()
6616 } else if (rack->r_late) { in rack_start_hpts_timer()
6620 max_red = (slot * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6621 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6622 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6623 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6626 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6629 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6631 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6643 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); in rack_start_hpts_timer()
6646 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6652 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6679 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6680 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6693 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6713 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6715 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); in rack_start_hpts_timer()
6716 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6717 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6719 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6761 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6762 rack->r_ctl.rc_last_output_to = us_cts + slot; in rack_start_hpts_timer()
6777 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6779 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6781 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6793 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6803 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6804 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6812 rack_log_hpts_diag(rack, us_cts, &diag, &tv); in rack_start_hpts_timer()
6813 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); in rack_start_hpts_timer()
6817 rack_log_hpts_diag(rack, us_cts, &diag, &tv); in rack_start_hpts_timer()
6818 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); in rack_start_hpts_timer()
6829 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6832 rack_log_hpts_diag(rack, us_cts, &diag, &tv); in rack_start_hpts_timer()
6833 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); in rack_start_hpts_timer()
6839 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); in rack_start_hpts_timer()
6843 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6845 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); in rack_start_hpts_timer()
6850 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) in rack_mark_lost() argument
6855 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); in rack_mark_lost()
6857 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6867 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6884 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_timeout_rack() argument
6896 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6897 rack_set_state(tp, rack); in rack_timeout_rack()
6898 rack->rc_on_min_to = 0; in rack_timeout_rack()
6900 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); in rack_timeout_rack()
6903 rack_mark_lost(tp, rack, rsm, cts); in rack_timeout_rack()
6904 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6905 rack->r_timer_override = 1; in rack_timeout_rack()
6906 if (rack->use_rack_rr) { in rack_timeout_rack()
6915 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6918 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6921 rack_start_hpts_timer(rack, tp, cts, in rack_timeout_rack()
6976 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap … in rack_setup_offset_for_rsm() argument
6998 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
6999 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7003 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7004 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7018 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, in rack_clone_rsm() argument
7033 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
7058 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
7060 rack_setup_offset_for_rsm(rack, rsm, nrsm); in rack_clone_rsm()
7064 rack_merge_rsm(struct tcp_rack *rack, in rack_merge_rsm() argument
7078 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
7080 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
7087 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
7107 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
7108 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
7110 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
7142 rack_free(rack, r_rsm); in rack_merge_rsm()
7156 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) in rack_timeout_tlp() argument
7168 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7173 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); in rack_timeout_tlp()
7180 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); in rack_timeout_tlp()
7181 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7182 rack->r_might_revert = 0; in rack_timeout_tlp()
7183 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7185 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7186 rack_set_state(tp, rack); in rack_timeout_tlp()
7189 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7194 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7195 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7196 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7197 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7199 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); in rack_timeout_tlp()
7202 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7212 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7213 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7221 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7239 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7241 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7242 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7243 rack_log_to_prr(rack, 4, 0, __LINE__); in rack_timeout_tlp()
7250 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7254 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7265 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7267 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7269 rsm = rack_find_high_nonack(rack, rsm); in rack_timeout_tlp()
7283 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7284 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7286 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7299 nrsm = rack_alloc_full_limit(rack); in rack_timeout_tlp()
7307 rack_clone_rsm(rack, nrsm, rsm, in rack_timeout_tlp()
7309 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); in rack_timeout_tlp()
7311 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7313 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7315 nrsm, insret, rack, rsm); in rack_timeout_tlp()
7319 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7324 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7328 rack->r_timer_override = 1; in rack_timeout_tlp()
7329 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7332 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7345 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_timeout_delack() argument
7348 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); in rack_timeout_delack()
7352 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7357 rack_send_ack_challange(struct tcp_rack *rack) in rack_send_ack_challange() argument
7361 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7363 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7364 rack->forced_ack = 1; in rack_send_ack_challange()
7365 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7367 rack->probe_not_answered = 1; in rack_send_ack_challange()
7369 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7371 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7374 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7375 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7391 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_timeout_persist() argument
7395 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7399 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); in rack_timeout_persist()
7400 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7419 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7423 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7425 rack_exit_persist(tp, rack, cts); in rack_timeout_persist()
7426 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7435 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7439 if (rack_send_ack_challange(rack)) { in rack_timeout_persist()
7441 if (rack->probe_not_answered) { in rack_timeout_persist()
7443 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7451 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); in rack_timeout_persist()
7452 rack_start_hpts_timer(rack, tp, cts, in rack_timeout_persist()
7464 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_timeout_keepalive() argument
7468 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7469 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); in rack_timeout_keepalive()
7492 rack_send_ack_challange(rack); in rack_timeout_keepalive()
7494 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); in rack_timeout_keepalive()
7514 struct tcp_rack *rack; in rack_remxt_tmr() local
7516 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7517 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); in rack_remxt_tmr()
7518 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); in rack_remxt_tmr()
7519 rack->r_timer_override = 1; in rack_remxt_tmr()
7520 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7521 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7522 rack->r_late = 0; in rack_remxt_tmr()
7523 rack->r_early = 0; in rack_remxt_tmr()
7524 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7525 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7526 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7527 rack_set_state(tp, rack); in rack_remxt_tmr()
7533 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7534 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7535 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7555 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7557 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7560 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_remxt_tmr()
7563 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7565 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7575 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7577 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7578 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7580 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7581 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7582 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7583 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7584 rack_log_to_prr(rack, 6, 0, __LINE__); in rack_remxt_tmr()
7585 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7586 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7587 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7595 rack->r_must_retran = 1; in rack_remxt_tmr()
7596 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7597 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7617 struct tcp_rack *rack; in rack_cc_conn_init() local
7620 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7642 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7643 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7651 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) in rack_timeout_rxt() argument
7667 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7668 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7674 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); in rack_timeout_rxt()
7677 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7678 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7679 rack->rc_ack_required = 1; in rack_timeout_rxt()
7680 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7682 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7689 rack->rto_from_rec = 1; in rack_timeout_rxt()
7690 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7705 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7706 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7707 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7708 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7710 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); in rack_timeout_rxt()
7724 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7740 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7741 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7792 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7927 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7937 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, ui… in rack_process_timers() argument
7940 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7961 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7962 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7973 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
7978 rack->rc_on_min_to) { in rack_process_timers()
7991 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
7994 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
7996 rack_log_to_processing(rack, cts, ret, 0); in rack_process_timers()
8007 rack_log_to_processing(rack, cts, ret, 0); in rack_process_timers()
8016 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
8018 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
8020 rack_log_to_processing(rack, cts, ret, left); in rack_process_timers()
8024 rack->rc_tmr_stopped = 0; in rack_process_timers()
8025 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
8027 ret = rack_timeout_delack(tp, rack, cts); in rack_process_timers()
8029 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8030 rack->r_fast_output = 0; in rack_process_timers()
8031 ret = rack_timeout_rack(tp, rack, cts); in rack_process_timers()
8033 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8034 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); in rack_process_timers()
8036 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8037 rack->r_fast_output = 0; in rack_process_timers()
8038 ret = rack_timeout_rxt(tp, rack, cts); in rack_process_timers()
8040 ret = rack_timeout_persist(tp, rack, cts); in rack_process_timers()
8042 ret = rack_timeout_keepalive(tp, rack, cts); in rack_process_timers()
8044 rack_log_to_processing(rack, cts, ret, timers); in rack_process_timers()
8049 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) in rack_timer_cancel() argument
8055 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
8057 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
8058 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
8060 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8064 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
8065 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); in rack_timer_cancel()
8067 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
8068 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
8069 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
8070 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
8076 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8079 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
8082 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); in rack_timer_cancel()
8088 struct tcp_rack *rack; in rack_stopall() local
8090 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
8091 rack->t_timers_stopped = 1; in rack_stopall()
8099 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) in rack_stop_all_timers() argument
8106 rack->rc_in_persist = 1; in rack_stop_all_timers()
8108 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
8109 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
8114 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, in rack_update_rsm() argument
8126 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_update_rsm()
8129 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8139 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_update_rsm()
8140 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_update_rsm()
8141 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_update_rsm()
8142 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_update_rsm()
8144 rack->r_ctl.rc_considered_lost = 0; in rack_update_rsm()
8152 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8153 rack->r_ctl.rc_sacked); in rack_update_rsm()
8157 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8160 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8165 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8170 if (rack->r_must_retran) in rack_update_rsm()
8171 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8172 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8177 rack->r_must_retran = 0; in rack_update_rsm()
8178 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8192 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, in rack_update_entry() argument
8211 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); in rack_update_entry()
8229 nrsm = rack_alloc_full_limit(rack); in rack_update_entry()
8244 rack_clone_rsm(rack, nrsm, rsm, c_end); in rack_update_entry()
8246 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); in rack_update_entry()
8248 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8250 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8252 nrsm, insret, rack, rsm); in rack_update_entry()
8256 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8260 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); in rack_update_entry()
8262 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); in rack_update_entry()
8273 struct tcp_rack *rack; in rack_log_output() local
8315 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8350 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8355 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); in rack_log_output()
8357 rsm = rack_alloc(rack); in rack_log_output()
8395 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8396 rack->r_ctl.rc_sacked) + in rack_log_output()
8398 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8399 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8400 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8421 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8432 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_log_output()
8434 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); in rack_log_output()
8436 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8438 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8440 nrsm, insret, rack, rsm); in rack_log_output()
8443 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8446 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8447 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8449 if (rack->pcm_in_progress == 0) in rack_log_output()
8450 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8459 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8462 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8480 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); in rack_log_output()
8489 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8492 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); in rack_log_output()
8505 nrsm = rack_alloc_full_limit(rack); in rack_log_output()
8507 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); in rack_log_output()
8514 rack_clone_rsm(rack, nrsm, rsm, seq_out); in rack_log_output()
8515 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); in rack_log_output()
8517 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8519 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8521 nrsm, insret, rack, rsm); in rack_log_output()
8525 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8529 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); in rack_log_output()
8547 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8553 rack, tp); in rack_log_output()
8573 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, in tcp_rack_xmit_timer() argument
8576 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8577 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8578 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8580 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8581 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8582 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8584 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8585 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8586 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8587 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8588 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8594 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8608 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8609 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8610 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8615 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8616 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8617 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8626 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8627 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8628 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8631 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8632 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8633 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8634 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8642 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) in tcp_rack_xmit_timer_commit() argument
8647 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8650 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8652 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8653 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8655 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8656 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8658 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8659 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8662 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8668 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8673 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8674 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8675 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8677 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8678 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8680 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8685 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8686 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8688 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8694 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8695 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8696 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8697 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8699 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8703 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8704 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8709 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8711 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8712 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8713 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8714 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8717 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8752 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8772 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8776 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8778 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8780 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8793 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8794 rack_log_rtt_sample(rack, rtt); in tcp_rack_xmit_timer_commit()
8800 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) in rack_apply_updated_usrtt() argument
8807 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8808 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8812 rack_log_rtt_shrinks(rack, us_cts, old_rtt, in rack_apply_updated_usrtt()
8820 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8821 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8822 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8833 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8834 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8835 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8836 rack_enter_probertt(rack, us_cts); in rack_apply_updated_usrtt()
8839 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8845 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, in rack_update_rtt() argument
8879 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8880 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8881 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8882 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8883 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8886 …if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)… in rack_update_rtt()
8887 …us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr… in rack_update_rtt()
8896 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8898 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8899 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8934 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8939 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8940 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, in rack_update_rtt()
8946 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8950 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8951 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8954 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8955 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8956 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8957 rack->rc_rack_rtt = t; in rack_update_rtt()
8968 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8991 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
8992 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9011 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9012 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9013 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9014 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9017 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9018 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9021 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
9022 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9023 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9024 rack->rc_rack_rtt = t; in rack_update_rtt()
9026 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
9027 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, in rack_update_rtt()
9033 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
9035 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
9051 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9062 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
9067 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
9068 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9069 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9070 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9071 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9074 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9075 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9078 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9079 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9080 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9081 rack->rc_rack_rtt = t; in rack_update_rtt()
9094 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) in rack_log_sack_passed() argument
9100 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); in rack_log_sack_passed()
9103 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
9132 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
9150 struct tcp_rack *rack, in rack_need_set_test() argument
9184 rack->app_limited_needs_set = 0; in rack_need_set_test()
9185 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9226 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9240 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9243 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9245 rack->app_limited_needs_set = 1; in rack_need_set_test()
9252 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9253 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9255 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9262 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9263 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9264 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9265 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9266 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9268 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9269 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9271 if (rack->rc_gp_filled && in rack_need_set_test()
9273 max(rc_init_window(rack), (MIN_GP_WIN * in rack_need_set_test()
9277 ideal_amount = rack_get_measure_window(tp, rack); in rack_need_set_test()
9286 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9288 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9289 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9298 rack_tend_gp_marks(tp, rack); in rack_need_set_test()
9299 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9304 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) in is_rsm_inside_declared_tlp_block() argument
9306 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9310 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9319 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, in rack_proc_sack_blk() argument
9344 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9363 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9364 (is_rsm_inside_declared_tlp_block(rack, rsm))) { in rack_proc_sack_blk()
9371 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9375 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9376 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9377 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9378 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9380 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9381 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9382 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9383 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9386 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9387 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9388 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9389 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9390 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9414 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9449 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9454 rack_setup_offset_for_rsm(rack, rsm, next); in rack_proc_sack_blk()
9480 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9481 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9488 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_proc_sack_blk()
9492 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); in rack_proc_sack_blk()
9493 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9494 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9496 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9501 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9502 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_proc_sack_blk()
9503 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9504 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9506 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9509 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9510 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9511 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9530 rack_log_sack_passed(tp, rack, nrsm, cts); in rack_proc_sack_blk()
9538 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); in rack_proc_sack_blk()
9542 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9563 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); in rack_proc_sack_blk()
9572 rack_clone_rsm(rack, nrsm, rsm, start); in rack_proc_sack_blk()
9575 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9577 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9579 nrsm, insret, rack, rsm); in rack_proc_sack_blk()
9583 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9586 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); in rack_proc_sack_blk()
9596 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9600 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9609 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9634 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9635 (is_rsm_inside_declared_tlp_block(rack, rsm))) { in rack_proc_sack_blk()
9641 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9645 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9646 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9647 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9648 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9650 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9651 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9652 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9653 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9656 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9657 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9658 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9659 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9660 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9663 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); in rack_proc_sack_blk()
9671 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9672 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_proc_sack_blk()
9673 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9674 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9676 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9678 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9680 rack_log_sack_passed(tp, rack, rsm, cts); in rack_proc_sack_blk()
9684 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9685 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9686 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9688 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9689 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9690 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9692 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9694 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9697 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); in rack_proc_sack_blk()
9709 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9732 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9733 (is_rsm_inside_declared_tlp_block(rack, rsm))) { in rack_proc_sack_blk()
9739 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9743 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9744 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9745 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9746 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9748 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9749 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9750 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9751 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9754 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9755 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9756 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9757 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9758 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9766 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9799 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9836 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9837 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9839 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_proc_sack_blk()
9844 rack_setup_offset_for_rsm(rack, prev, rsm); in rack_proc_sack_blk()
9851 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); in rack_proc_sack_blk()
9852 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9853 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9855 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9860 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9861 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_proc_sack_blk()
9862 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9863 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9865 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9868 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9869 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9870 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9872 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); in rack_proc_sack_blk()
9881 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); in rack_proc_sack_blk()
9892 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9893 (is_rsm_inside_declared_tlp_block(rack, rsm))) { in rack_proc_sack_blk()
9899 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9903 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9904 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9905 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9906 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9908 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9909 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9910 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9911 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9914 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9915 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9916 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9917 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9918 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9940 rack_clone_rsm(rack, nrsm, rsm, end); in rack_proc_sack_blk()
9944 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9946 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9948 nrsm, insret, rack, rsm); in rack_proc_sack_blk()
9952 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9956 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); in rack_proc_sack_blk()
9957 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); in rack_proc_sack_blk()
9964 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9965 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_proc_sack_blk()
9966 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9967 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9969 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9971 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9974 rack_log_sack_passed(tp, rack, rsm, cts); in rack_proc_sack_blk()
9978 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9979 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9980 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9982 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9983 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9984 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9986 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9987 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); in rack_proc_sack_blk()
9989 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
10008 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10033 rsm = rack_merge_rsm(rack, rsm, next); in rack_proc_sack_blk()
10034 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10039 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10064 rsm = rack_merge_rsm(rack, prev, rsm); in rack_proc_sack_blk()
10065 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10076 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
10077 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
10082 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) in rack_peer_reneges() argument
10089 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
10093 rack, rsm, rsm->r_flags); in rack_peer_reneges()
10099 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
10102 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
10106 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
10112 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10118 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t f… in rack_rsm_sender_update() argument
10180 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10199 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10202 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10203 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10209 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t … in rack_process_to_cumack() argument
10218 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10223 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10227 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10229 rack->r_wanted_output = 1; in rack_process_to_cumack()
10231 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10234 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10235 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10236 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10250 rack_log_dsack_event(rack, 9, __LINE__, in rack_process_to_cumack()
10251 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10252 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10253 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10254 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10255 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10256 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10257 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10261 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10264 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10265 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10266 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10267 rack_log_dsack_event(rack, 9, __LINE__, in rack_process_to_cumack()
10268 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10269 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10270 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10271 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10272 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10273 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10274 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10275 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10279 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10282 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10300 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10310 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10314 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); in rack_process_to_cumack()
10328 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10329 (is_rsm_inside_declared_tlp_block(rack, rsm))) { in rack_process_to_cumack()
10335 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10339 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10340 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10341 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10342 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10344 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10345 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10346 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10347 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10350 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10351 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10352 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10353 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10354 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10358 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10371 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_process_to_cumack()
10372 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); in rack_process_to_cumack()
10373 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_process_to_cumack()
10374 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_process_to_cumack()
10376 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10378 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10379 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10385 rack_rsm_sender_update(rack, tp, rsm, 4); in rack_process_to_cumack()
10386 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10388 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10397 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10406 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10408 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10409 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10410 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10411 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10416 rack->r_might_revert = 1; in rack_process_to_cumack()
10418 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10420 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10439 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10440 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); in rack_process_to_cumack()
10442 rack_free(rack, rsm); in rack_process_to_cumack()
10447 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10463 rack_peer_reneges(rack, rsm, th_ack); in rack_process_to_cumack()
10472 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10474 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10484 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), in rack_process_to_cumack()
10485 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); in rack_process_to_cumack()
10486 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) in rack_process_to_cumack()
10487 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; in rack_process_to_cumack()
10489 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10496 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); in rack_process_to_cumack()
10506 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10515 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); in rack_process_to_cumack()
10524 rack_rsm_sender_update(rack, tp, rsm, 5); in rack_process_to_cumack()
10526 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10551 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10566 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10568 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10572 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) in rack_handle_might_revert() argument
10577 if (rack->r_might_revert) { in rack_handle_might_revert()
10588 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10601 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10603 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10605 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); in rack_handle_might_revert()
10607 rack_exit_recovery(tp, rack, 3); in rack_handle_might_revert()
10608 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10619 rack->rto_from_rec = 0; in rack_handle_might_revert()
10620 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10621 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10625 rack->r_might_revert = 0; in rack_handle_might_revert()
10631 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) in rack_note_dsack() argument
10641 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10642 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10643 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10650 rack_log_dsack_event(rack, 7, __LINE__, start, end); in rack_note_dsack()
10654 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10655 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10656 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10662 rack_log_dsack_event(rack, 7, __LINE__, start, end); in rack_note_dsack()
10667 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10668 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10669 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10670 rack->r_ctl.num_dsack++; in rack_note_dsack()
10671 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10672 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); in rack_note_dsack()
10679 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10680 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10681 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10682 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10687 rack->r_might_revert = 1; in rack_note_dsack()
10688 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10689 rack->r_might_revert = 0; in rack_note_dsack()
10690 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10691 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10697 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) in do_rack_compute_pipe() argument
10700 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10712 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) in rack_update_prr() argument
10717 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10719 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10726 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10735 pipe = do_rack_compute_pipe(tp, rack, snd_una); in rack_update_prr()
10739 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10740 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10741 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10743 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10744 rack_log_to_prr(rack, 9, 0, __LINE__); in rack_update_prr()
10748 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10749 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10752 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10753 rack_log_to_prr(rack, 10, 0, __LINE__); in rack_update_prr()
10757 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10758 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10765 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10766 rack_log_to_prr(rack, 11, 0, __LINE__); in rack_update_prr()
10768 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10769 rack_log_to_prr(rack, 12, 0, __LINE__); in rack_update_prr()
10779 struct tcp_rack *rack; in rack_log_ack() local
10795 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10797 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10800 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10809 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10813 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); in rack_log_ack()
10819 rack_process_to_cumack(tp, rack, th_ack, cts, to, in rack_log_ack()
10820 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10824 rack_handle_might_revert(tp, rack); in rack_log_ack()
10831 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10858 was_tlp = rack_note_dsack(rack, sack.start, sack.end); in rack_log_ack()
10865 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10867 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10869 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10870 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); in rack_log_ack()
10877 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10879 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10946 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10954 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); in rack_log_ack()
10956 rack->r_wanted_output = 1; in rack_log_ack()
10970 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); in rack_log_ack()
10982 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
10984 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); in rack_log_ack()
10986 rack->r_wanted_output = 1; in rack_log_ack()
11005 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
11008 rsm = tcp_rack_output(tp, rack, tsused); in rack_log_ack()
11019 if (rack->rack_no_prr == 0) { in rack_log_ack()
11020 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
11021 rack_log_to_prr(rack, 8, 0, __LINE__); in rack_log_ack()
11023 rack->r_timer_override = 1; in rack_log_ack()
11024 rack->r_early = 0; in rack_log_ack()
11025 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11028 (rack->r_rr_config == 3)) { in rack_log_ack()
11033 rack->r_timer_override = 1; in rack_log_ack()
11034 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
11035 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
11038 (rack->rack_no_prr == 0) && in rack_log_ack()
11040 rack_update_prr(tp, rack, changed, th_ack); in rack_log_ack()
11041 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
11042 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
11043 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
11048 rack->r_early = 0; in rack_log_ack()
11049 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11050 rack->r_timer_override = 1; in rack_log_ack()
11056 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) in rack_strike_dupack() argument
11060 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
11086 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
11087 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
11088 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
11089 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
11092 rack->r_wanted_output = 1; in rack_strike_dupack()
11093 rack->r_timer_override = 1; in rack_strike_dupack()
11094 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); in rack_strike_dupack()
11097 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); in rack_strike_dupack()
11104 struct tcp_rack *rack, in rack_check_bottom_drag() argument
11142 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
11143 lt_bw = rack_get_lt_bw(rack); in rack_check_bottom_drag()
11144 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11145 rack_validate_multipliers_at_or_above100(rack); in rack_check_bottom_drag()
11146 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
11147 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
11148 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
11154 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
11166 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11167 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11168 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
11169 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11170 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11171 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11172 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
11173 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11174 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11175 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11176 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11177 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11179 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11180 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
11181 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
11183 rack->gp_ready = 1; in rack_check_bottom_drag()
11184 if (rack->dgp_on || in rack_check_bottom_drag()
11185 rack->rack_hibeta) in rack_check_bottom_drag()
11186 rack_set_cc_pacing(rack); in rack_check_bottom_drag()
11187 if (rack->defer_options) in rack_check_bottom_drag()
11188 rack_apply_deferred_options(rack); in rack_check_bottom_drag()
11194 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11199 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11201 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11211 rack_validate_multipliers_at_or_above100(rack); in rack_check_bottom_drag()
11212 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11213 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11219 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, in rack_log_hybrid() argument
11224 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11226 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11262 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11271 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11273 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11275 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11283 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11285 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11287 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11289 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11291 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11292 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11293 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11294 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11295 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11296 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11297 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11298 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11299 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11308 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) in rack_set_dgp_hybrid_mode() argument
11314 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11315 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11318 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11319 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11320 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11327 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11328 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11329 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11330 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11331 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11333 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11335 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11336 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11338 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11339 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11345 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11346 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11347 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11348 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11350 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11351 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11356 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11357 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11368 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11369 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11370 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11372 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11373 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); in rack_set_dgp_hybrid_mode()
11376 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11377 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11379 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11380 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11383 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); in rack_set_dgp_hybrid_mode()
11388 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11389 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11391 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11393 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11394 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11395 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11397 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11401 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11403 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11404 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11416 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11458 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11461 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11466 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_set_dgp_hybrid_mode()
11469 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11470 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11473 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); in rack_set_dgp_hybrid_mode()
11475 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11476 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11481 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) in rack_chk_req_and_hybrid_on_out() argument
11486 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11491 rack_set_dgp_hybrid_mode(rack, seq, len, cts); in rack_chk_req_and_hybrid_on_out()
11492 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11510 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11511 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); in rack_chk_req_and_hybrid_on_out()
11517 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11518 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11524 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked… in rack_gain_for_fastoutput() argument
11547 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11553 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11554 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11556 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11557 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11564 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) in rack_adjust_sendmap_head() argument
11597 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11600 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11608 rack, sb, rsm)); in rack_adjust_sendmap_head()
11623 rack, rsm)); in rack_adjust_sendmap_head()
11654 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11662 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) in rack_req_check_for_comp() argument
11667 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11668 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11673 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11683 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11693 rack_log_hybrid(rack, th_ack, in rack_req_check_for_comp()
11695 rack_log_hybrid_sends(rack, ent, __LINE__); in rack_req_check_for_comp()
11698 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11717 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); in rack_req_check_for_comp()
11723 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11724 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11725 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11726 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11727 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11728 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11730 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11731 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11735 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11738 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11739 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11761 struct tcp_rack *rack; in rack_process_ack() local
11768 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11802 rack->r_wanted_output = 1; in rack_process_ack()
11808 rack->r_wanted_output = 1; in rack_process_ack()
11811 if (rack->gp_ready && in rack_process_ack()
11812 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11820 if (rack->rc_in_persist) { in rack_process_ack()
11823 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11830 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11844 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11845 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11846 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11879 rack->probe_not_answered = 0; in rack_process_ack()
11900 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11901 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11902 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11907 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11908 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11910 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11948 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11952 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11960 rack->rto_from_rec = 0; in rack_process_ack()
11967 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11992 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
11993 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
12004 (rack->use_fixed_rate == 0) && in rack_process_ack()
12005 (rack->in_probe_rtt == 0) && in rack_process_ack()
12006 rack->rc_gp_dyn_mul && in rack_process_ack()
12007 rack->rc_always_pace) { in rack_process_ack()
12009 rack_check_bottom_drag(tp, rack, so); in rack_process_ack()
12014 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
12015 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
12016 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
12017 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
12018 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); in rack_process_ack()
12021 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
12022 rack->rc_suspicious = 0; in rack_process_ack()
12024 rack->r_wanted_output = 1; in rack_process_ack()
12025 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
12049 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, in rack_log_collapse() argument
12052 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
12061 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
12063 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
12073 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
12074 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
12075 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
12076 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
12083 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) in rack_collapsed_window() argument
12091 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
12092 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
12093 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
12095 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
12096 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
12097 rack->rc_has_collapsed = 1; in rack_collapsed_window()
12098 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
12099 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
12103 rack_un_collapse_window(struct tcp_rack *rack, int line) in rack_un_collapse_window() argument
12110 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
12111 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
12112 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12115 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12119 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
12120 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
12121 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
12122 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); in rack_un_collapse_window()
12130 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12132 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
12134 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
12136 nrsm, insret, rack, rsm); in rack_un_collapse_window()
12139 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
12140 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
12142 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
12153 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
12156 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
12162 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12166 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, in rack_handle_delayed_ack() argument
12170 rack_timer_cancel(tp, rack, in rack_handle_delayed_ack()
12171 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
12174 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
12180 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) in rack_validate_fo_sendwin_up() argument
12187 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
12195 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12199 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12202 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
12203 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12205 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12228 struct tcp_rack *rack; in rack_process_data() local
12232 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12243 rack_validate_fo_sendwin_up(tp, rack); in rack_process_data()
12248 rack->r_wanted_output = 1; in rack_process_data()
12252 rack_validate_fo_sendwin_up(tp, rack); in rack_process_data()
12259 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12260 else if (rack->rc_has_collapsed) in rack_process_data()
12261 rack_un_collapse_window(rack, __LINE__); in rack_process_data()
12262 if ((rack->r_collapse_point_valid) && in rack_process_data()
12263 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12264 rack->r_collapse_point_valid = 0; in rack_process_data()
12266 if ((rack->rc_in_persist != 0) && in rack_process_data()
12267 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12268 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12269 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12272 rack->r_wanted_output = 1; in rack_process_data()
12275 if ((rack->rc_in_persist == 0) && in rack_process_data()
12276 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12278 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12287 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12345 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); in rack_process_data()
12376 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12455 rack_timer_cancel(tp, rack, in rack_process_data()
12456 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12472 rack_timer_cancel(tp, rack, in rack_process_data()
12473 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12482 rack_timer_cancel(tp, rack, in rack_process_data()
12483 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12493 rack_timer_cancel(tp, rack, in rack_process_data()
12494 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12504 rack->r_wanted_output = 1; in rack_process_data()
12521 struct tcp_rack *rack; in rack_do_fastnewdata() local
12555 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12619 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12626 rack_handle_delayed_ack(tp, rack, tlen, 0); in rack_do_fastnewdata()
12628 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12649 struct tcp_rack *rack; in rack_fastack() local
12675 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12676 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12681 if (rack->gp_ready && in rack_fastack()
12682 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12690 rack_validate_fo_sendwin_up(tp, rack); in rack_fastack()
12696 if ((rack->rc_in_persist != 0) && in rack_fastack()
12697 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12698 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12699 rack_exit_persist(tp, rack, cts); in rack_fastack()
12702 if ((rack->rc_in_persist == 0) && in rack_fastack()
12703 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12705 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12714 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12759 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12764 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12766 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12771 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12772 rack->rc_tlp_in_progress = 0; in rack_fastack()
12773 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12778 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12779 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12782 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12792 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12793 } else if (rack->rc_has_collapsed) in rack_fastack()
12794 rack_un_collapse_window(rack, __LINE__); in rack_fastack()
12795 if ((rack->r_collapse_point_valid) && in rack_fastack()
12796 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12797 rack->r_collapse_point_valid = 0; in rack_fastack()
12814 (rack->use_fixed_rate == 0) && in rack_fastack()
12815 (rack->in_probe_rtt == 0) && in rack_fastack()
12816 rack->rc_gp_dyn_mul && in rack_fastack()
12817 rack->rc_always_pace) { in rack_fastack()
12819 rack_check_bottom_drag(tp, rack, so); in rack_fastack()
12823 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12824 rack->rc_suspicious = 0; in rack_fastack()
12825 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12826 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12827 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12828 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12829 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); in rack_fastack()
12832 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12834 if (acked && rack->r_fast_output) in rack_fastack()
12835 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); in rack_fastack()
12837 rack->r_wanted_output = 1; in rack_fastack()
12856 struct tcp_rack *rack; in rack_do_syn_sent() local
12896 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12927 rack_timer_cancel(tp, rack, in rack_do_syn_sent()
12928 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12931 rack->r_wanted_output = 1; in rack_do_syn_sent()
12960 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12966 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
13027 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
13028 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); in rack_do_syn_sent()
13029 tcp_rack_xmit_timer_commit(rack, tp); in rack_do_syn_sent()
13076 struct tcp_rack *rack; in rack_do_syn_recv() local
13081 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
13107 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
13108 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
13109 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
13165 rack_validate_fo_sendwin_up(tp, rack); in rack_do_syn_recv()
13242 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13243 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); in rack_do_syn_recv()
13244 tcp_rack_xmit_timer_commit(rack, tp); in rack_do_syn_recv()
13292 struct tcp_rack *rack; in rack_do_established() local
13308 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13315 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13399 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); in rack_do_established()
13508 struct tcp_rack *rack; in rack_check_data_after_close() local
13510 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13511 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13527 rack->r_wanted_output = 1; in rack_check_data_after_close()
13962 rack_clear_rate_sample(struct tcp_rack *rack) in rack_clear_rate_sample() argument
13964 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13965 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13966 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13970 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_overr… in rack_set_pace_segments() argument
13977 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
13978 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
13980 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
13981 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
13985 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13986 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13987 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
13988 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
13990 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
13991 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
13992 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13995 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
13996 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
13997 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
13998 bw_est = rack_get_bw(rack); in rack_set_pace_segments()
13999 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
14000 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
14001 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14007 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
14012 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
14013 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
14015 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
14017 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
14018 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
14019 rack->r_ctl.init_rate) { in rack_set_pace_segments()
14023 bw_est = rack_get_bw(rack); in rack_set_pace_segments()
14024 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14028 rate_wanted = rack_get_gp_est(rack); in rack_set_pace_segments()
14031 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
14033 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
14035 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14036 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14038 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
14039 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
14045 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
14048 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
14050 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
14053 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); in rack_set_pace_segments()
14058 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) in rack_init_fsb_block() argument
14070 if (rack->r_is_v6) { in rack_init_fsb_block()
14071 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
14072 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14074 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14078 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14079 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14082 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
14083 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14085 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14087 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14088 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
14093 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
14094 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14096 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14100 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14101 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14104 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14105 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
14107 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14109 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14110 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
14113 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
14115 rack->r_fsb_inited = 1; in rack_init_fsb_block()
14119 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) in rack_init_fsb() argument
14126rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
14128 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
14130 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
14132 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
14135 rack->r_fsb_inited = 0; in rack_init_fsb()
14140 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) in rack_log_hystart_event() argument
14149 tp = rack->rc_tp; in rack_log_hystart_event()
14155 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
14156 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
14161 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
14162 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
14172 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) in rack_deferred_init() argument
14174 rack->rack_deferred_inited = 1; in rack_deferred_init()
14175 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
14176 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
14177 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
14181 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) in rack_init_retransmit_value() argument
14208 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14209 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14211 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14212 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14214 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14215 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14220 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, in rack_log_chg_info() argument
14225 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14243 struct tcp_rack *rack; in rack_chg_query() local
14248 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14256 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14273 rack_log_chg_info(tp, rack, 1, in rack_chg_query()
14280 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14284 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14285 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14286 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14288 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14289 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14291 rack_log_chg_info(tp, rack, 2, in rack_chg_query()
14292 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14293 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14294 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14299 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14300 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14302 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14303 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14304 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14305 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14306 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14308 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14309 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14310 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14311 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14312 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14313 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14315 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14316 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14317 if (rack->rc_in_persist) { in rack_chg_query()
14318 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14324 if (rack->r_wanted_output) in rack_chg_query()
14344 struct tcp_rack *rack; in rack_switch_failed() local
14350 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14352 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14356 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14363 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14364 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14365 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14370 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14371 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14372 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14381 rack_log_hpts_diag(rack, cts, &diag, &tv); in rack_switch_failed()
14385 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) in rack_init_outstanding() argument
14400 rsm = rack_alloc(rack); in rack_init_outstanding()
14406 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14420 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14421 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14440 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14442 insret, rack, rsm); in rack_init_outstanding()
14445 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14447 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14465 rsm = rack_alloc(rack); in rack_init_outstanding()
14490 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14500 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14502 insret, rack, rsm); in rack_init_outstanding()
14505 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14508 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14526 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14530 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14531 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14532 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14535 rack_log_chg_info(tp, rack, 3, in rack_init_outstanding()
14549 struct tcp_rack *rack = NULL; in rack_init() local
14578 rack = (struct tcp_rack *)*ptr; in rack_init()
14579 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14580 if (rack->r_ctl.tqh == NULL) { in rack_init()
14581 uma_zfree(rack_pcb_zone, rack); in rack_init()
14584 tqhash_init(rack->r_ctl.tqh); in rack_init()
14585 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14586 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14587 rack->rc_tp = tp; in rack_init()
14588 rack->rc_inp = inp; in rack_init()
14590 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14592 rack_clear_rate_sample(rack); in rack_init()
14607 rack->rc_new_rnd_needed = 1; in rack_init()
14608 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14611 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; in rack_init()
14612 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14613 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14614 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14616 rack->rc_pace_to_cwnd = 1; in rack_init()
14618 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14620 rack->use_rack_rr = 1; in rack_init()
14622 rack->rc_pace_dnd = 1; in rack_init()
14633 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14634 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14635 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14636 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14637 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14640 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; in rack_init()
14642 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14643 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14645 rack->rack_enable_scwnd = 1; in rack_init()
14646 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14647 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14648 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14649 rack->rc_force_max_seg = 0; in rack_init()
14650 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14651 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; in rack_init()
14652 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; in rack_init()
14654 rack->rack_hibeta = 1; in rack_init()
14657 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; in rack_init()
14658 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14661 rack->r_ctl.saved_hibeta = 50; in rack_init()
14668 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14669 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14670 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14671 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14672 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14673 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14674 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14675 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14677 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14678 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14680 rack->r_use_cmp_ack = 1; in rack_init()
14682 rack->rack_no_prr = 1; in rack_init()
14684 rack->rc_gp_no_rec_chg = 1; in rack_init()
14686 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14687 rack->rc_always_pace = 1; in rack_init()
14688 if (rack->rack_hibeta) in rack_init()
14689 rack_set_cc_pacing(rack); in rack_init()
14691 rack->rc_always_pace = 0; in rack_init()
14692 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14693 rack->r_mbuf_queue = 1; in rack_init()
14695 rack->r_mbuf_queue = 0; in rack_init()
14696 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_init()
14698 rack->r_limit_scw = 1; in rack_init()
14700 rack->r_limit_scw = 0; in rack_init()
14701 rack_init_retransmit_value(rack, rack_rxt_controls); in rack_init()
14702 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14704 rack->r_use_hpts_min = 1; in rack_init()
14706 rack->rc_sendvars_notset = 0; in rack_init()
14718 rack->rc_sendvars_notset = 1; in rack_init()
14721 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14722 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14723 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14724 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14725 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14726 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; in rack_init()
14727 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14729 rack->r_up_only = 1; in rack_init()
14732 rack->rc_gp_dyn_mul = 1; in rack_init()
14734 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14736 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14737 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14739 rack->rc_skip_timely = 1; in rack_init()
14741 if (rack->rc_skip_timely) { in rack_init()
14742 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14743 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14744 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14746 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14747 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14748 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14750 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14752 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_init()
14753 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14754 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14755 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14756 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14758 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14760 rack->r_ctl.gate_to_fs = 1; in rack_init()
14761 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14767 rack->rc_rack_tmr_std_based = 1; in rack_init()
14771 rack->rc_rack_use_dsack = 1; in rack_init()
14775 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14777 rack->r_ctl.req_measurements = 1; in rack_init()
14779 rack->rack_hdw_pace_ena = 1; in rack_init()
14781 rack->r_rack_hw_rate_caps = 1; in rack_init()
14783 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14785 err = rack_init_fsb(tp, rack); in rack_init()
14799 rack_log_chg_info(tp, rack, 7, in rack_init()
14802 rack_set_profile(rack, rack_def_profile); in rack_init()
14815 iwin = rc_init_window(rack); in rack_init()
14854 rack_deferred_init(tp, rack); in rack_init()
14860 err = rack_init_outstanding(tp, rack, us_cts, *ptr); in rack_init()
14866 rack_stop_all_timers(tp, rack); in rack_init()
14868 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14872 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14884 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14888 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); in rack_init()
14899 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14900 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14901 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14902 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14903 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14904 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14905 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14906 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14907 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14908 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14909 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14910 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14912 rack->rc_tlp_in_progress = 1; in rack_init()
14913 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14915 rack->rc_tlp_in_progress = 0; in rack_init()
14916 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14919 rack->rc_srtt_measure_made = 1; in rack_init()
14921 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14923 if (rack->r_ctl.rc_scw) { in rack_init()
14924 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14925 rack->rack_scwnd_is_idle = 1; in rack_init()
14928 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14929 rack->probe_not_answered = 0; in rack_init()
14930 rack->forced_ack = 0; in rack_init()
14932 rack->rc_in_persist = 1; in rack_init()
14934 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14937 rack->r_wanted_output = 1; in rack_init()
14938 rack_log_chg_info(tp, rack, 6, in rack_init()
14955 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14957 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14964 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14972 rack_log_chg_info(tp, rack, 4, in rack_init()
14973 rack->r_ctl.rc_hpts_flags, in rack_init()
14974 rack->r_ctl.rc_last_output_to, in rack_init()
14975 rack->r_ctl.rc_timer_exp); in rack_init()
14981 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
14985 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
15036 struct tcp_rack *rack; in rack_fini() local
15041 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
15042 rack_log_pacing_delay_calc(rack, in rack_fini()
15046 rack_get_gp_est(rack), /* delRate */ in rack_fini()
15047 rack_get_lt_bw(rack), /* rttProp */ in rack_fini()
15050 if (rack->r_ctl.rc_scw) { in rack_fini()
15053 if (rack->r_limit_scw) in rack_fini()
15054 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
15057 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
15058 rack->r_ctl.rc_scw_index, in rack_fini()
15060 rack->r_ctl.rc_scw = NULL; in rack_fini()
15063 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
15064 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
15065 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
15066 rack->r_ctl.fsb.th = NULL; in rack_fini()
15068 if (rack->rc_always_pace == 1) { in rack_fini()
15069 rack_remove_pacing(rack); in rack_fini()
15072 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
15075 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
15076 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
15080 if (rack->r_ctl.crte != NULL) { in rack_fini()
15081 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
15082 rack->rack_hdrw_pacing = 0; in rack_fini()
15083 rack->r_ctl.crte = NULL; in rack_fini()
15093 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15095 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
15096 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15098 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15100 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15102 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
15103 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15104 rack->rc_free_cnt--; in rack_fini()
15107 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15109 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
15110 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
15111 rack->r_ctl.pcm_s = NULL; in rack_fini()
15112 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
15113 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
15115 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
15122 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
15123 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
15125 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
15126 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15128 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15135 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
15137 rack, in rack_fini()
15138 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
15139 rack->rc_free_cnt = 0; in rack_fini()
15140 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
15141 rack->r_ctl.tqh = NULL; in rack_fini()
15150 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) in rack_set_state() argument
15152 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
15153 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
15157 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
15158 rack->r_substate = rack_do_syn_sent; in rack_set_state()
15161 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
15162 rack->r_substate = rack_do_syn_recv; in rack_set_state()
15165 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_set_state()
15166 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
15167 rack->r_substate = rack_do_established; in rack_set_state()
15170 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
15171 rack->r_substate = rack_do_close_wait; in rack_set_state()
15174 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_set_state()
15175 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
15176 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
15179 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_set_state()
15180 rack->r_state = TCPS_CLOSING; in rack_set_state()
15181 rack->r_substate = rack_do_closing; in rack_set_state()
15184 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_set_state()
15185 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
15186 rack->r_substate = rack_do_lastack; in rack_set_state()
15189 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
15190 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
15198 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
15199 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15204 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) in rack_timer_audit() argument
15215 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15216 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15222 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15223 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); in rack_timer_audit()
15226 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15228 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15241 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15273 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15274 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15278 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15279 rack->r_early = 1; in rack_timer_audit()
15280 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15282 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15284 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15286 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15287 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); in rack_timer_audit()
15292 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t… in rack_do_win_updates() argument
15301 rack_validate_fo_sendwin_up(tp, rack); in rack_do_win_updates()
15306 rack->r_wanted_output = 1; in rack_do_win_updates()
15309 rack_validate_fo_sendwin_up(tp, rack); in rack_do_win_updates()
15319 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15320 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15321 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15322 rack_exit_persist(tp, rack, cts); in rack_do_win_updates()
15325 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15326 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15328 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15337 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15342 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, u… in rack_log_input_packet() argument
15345 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15365 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15366 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15367 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15370 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15372 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15373 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15374 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15375 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15377 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15490 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) in rack_handle_probe_response() argument
15506 rack->forced_ack = 0; in rack_handle_probe_response()
15507 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15508 if ((rack->rc_in_persist && in rack_handle_probe_response()
15509 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15510 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15525 if (rack->rc_in_persist) in rack_handle_probe_response()
15527 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15530 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15531 rack_apply_updated_usrtt(rack, us_rtt, us_cts); in rack_handle_probe_response()
15532 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); in rack_handle_probe_response()
15536 rack_apply_updated_usrtt(rack, us_rtt, us_cts); in rack_handle_probe_response()
15537 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); in rack_handle_probe_response()
15544 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) in rack_new_round_starts() argument
15552 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15553 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15554 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15559 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, in rack_log_pcm() argument
15562 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15569 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15575 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15576 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15578 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15580 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15581 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15587 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) in rack_new_round_setup() argument
15595 rack_log_hystart_event(rack, high_seq, 21); in rack_new_round_setup()
15596 rack->r_ctl.current_round++; in rack_new_round_setup()
15598 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15599 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15600 (rack->pcm_needed == 0) && in rack_new_round_setup()
15601 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15609 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15610 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15611 rack->pcm_needed = 1; in rack_new_round_setup()
15612 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15614 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15620 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15629 if (rack->dgp_on && in rack_new_round_setup()
15630 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15632 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15633 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15634 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15644 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15646 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15652 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15653 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15654 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15655 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15656 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15661 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15662 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15663 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15667 rack->r_fast_output = 0; in rack_new_round_setup()
15694 struct tcp_rack *rack; in rack_do_compressed_ack_processing() local
15708 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15709 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15710 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15713 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15714 rack_set_state(tp, rack); in rack_do_compressed_ack_processing()
15734 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15735 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15753 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15755 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15756 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15757 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15759 rack_check_probe_rtt(rack, cts); in rack_do_compressed_ack_processing()
15765 rack_clear_rate_sample(rack); in rack_do_compressed_ack_processing()
15768 rack_log_pacing_delay_calc(rack, in rack_do_compressed_ack_processing()
15772 rack_get_gp_est(rack), /* delRate */ in rack_do_compressed_ack_processing()
15773 rack_get_lt_bw(rack), /* rttProp */ in rack_do_compressed_ack_processing()
15777 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15778 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15786 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15793 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); in rack_do_compressed_ack_processing()
15794 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15837 rack_cc_after_idle(rack, tp); in rack_do_compressed_ack_processing()
15868 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15869 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15870 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15874 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15880 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15881 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15883 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15885 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15886 rack_handle_probe_response(rack, tiwin, in rack_do_compressed_ack_processing()
15887 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15895 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); in rack_do_compressed_ack_processing()
15906 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15915 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); in rack_do_compressed_ack_processing()
15928 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15929 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15931 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15933 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15934 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15936 rack_req_check_for_comp(rack, high_seq); in rack_do_compressed_ack_processing()
15938 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15940 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15942 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15943 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); in rack_do_compressed_ack_processing()
15949 tcp_rack_xmit_timer_commit(rack, tp); in rack_do_compressed_ack_processing()
15967 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15968 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15969 rack_un_collapse_window(rack, __LINE__); in rack_do_compressed_ack_processing()
15970 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15971 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
15972 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
15986 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
15987 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
15993 rack_new_round_setup(tp, rack, high_seq); in rack_do_compressed_ack_processing()
15999 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
16014 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
16015 rack_update_prr(tp, rack, acked_amount, high_seq); in rack_do_compressed_ack_processing()
16024 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
16032 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
16074 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
16078 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); in rack_do_compressed_ack_processing()
16087 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
16089 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
16095 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
16100 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
16101 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
16102 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
16112 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
16113 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16117 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
16118 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
16119 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
16120 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
16122 rack_check_bottom_drag(tp, rack, so); in rack_do_compressed_ack_processing()
16126 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
16127 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
16128 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
16129 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
16130 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
16131 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
16132 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); in rack_do_compressed_ack_processing()
16136 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16137 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16138 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
16147 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16207 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16285 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16289 rack_gain_for_fastoutput(rack, tp, so, acked_amount); in rack_do_compressed_ack_processing()
16316 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); in rack_do_compressed_ack_processing()
16319 rack_handle_might_revert(tp, rack); in rack_do_compressed_ack_processing()
16321 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16322 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16335 rack_free_trim(rack); in rack_do_compressed_ack_processing()
16339 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16340 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); in rack_do_compressed_ack_processing()
16369 struct tcp_rack *rack; in rack_do_segment_nounlock() local
16386 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16387 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16394 rack_deferred_init(tp, rack); in rack_do_segment_nounlock()
16408 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16412 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16413 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16414 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16415 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16422 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16423 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16460 rack_log_pacing_delay_calc(rack, in rack_do_segment_nounlock()
16461 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16464 rack_get_gp_est(rack), /* delRate */ in rack_do_segment_nounlock()
16465 rack_get_lt_bw(rack), /* rttProp */ in rack_do_segment_nounlock()
16482 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16483 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16485 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16486 kern_prefetch(rack, &prev_state); in rack_do_segment_nounlock()
16554 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16555 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16561 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16574 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16575 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16576 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16579 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16581 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16582 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16583 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16584 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16585 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16587 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16642 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16643 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); in rack_do_segment_nounlock()
16686 rack_cc_after_idle(rack, tp); in rack_do_segment_nounlock()
16692 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16693 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16713 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16715 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16726 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16727 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16728 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16731 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); in rack_do_segment_nounlock()
16732 tcp_rack_xmit_timer_commit(rack, tp); in rack_do_segment_nounlock()
16739 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16741 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16743 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16744 rack->rc_inp = inp; in rack_do_segment_nounlock()
16768 rack_validate_fo_sendwin_up(tp, rack); in rack_do_segment_nounlock()
16819 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16823 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16824 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16825 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16826 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16828 rack_check_probe_rtt(rack, cts); in rack_do_segment_nounlock()
16830 rack_clear_rate_sample(rack); in rack_do_segment_nounlock()
16831 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16833 rack_handle_probe_response(rack, tiwin, us_cts); in rack_do_segment_nounlock()
16840 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16841 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16842 rack_set_state(tp, rack); in rack_do_segment_nounlock()
16844 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16846 prev_state = rack->r_state; in rack_do_segment_nounlock()
16852 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16854 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16863 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16864 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16865 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16866 rack->in_probe_rtt && in rack_do_segment_nounlock()
16867 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16872 rack_check_probe_rtt(rack, cts); in rack_do_segment_nounlock()
16874 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16878 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16879 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_do_segment_nounlock()
16882 tcp_rack_xmit_timer_commit(rack, tp); in rack_do_segment_nounlock()
16903 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16905 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16916 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); in rack_do_segment_nounlock()
16917 rack_free_trim(rack); in rack_do_segment_nounlock()
16922 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16927 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); in rack_do_segment_nounlock()
16928 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); in rack_do_segment_nounlock()
16929 rack_free_trim(rack); in rack_do_segment_nounlock()
16944 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16945 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16951 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16954 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16957 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16962 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16963 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16969 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16971 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
16972 rack->r_early = 1; in rack_do_segment_nounlock()
16973 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
16976 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
16988 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); in rack_do_segment_nounlock()
16993 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
16997 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); in rack_do_segment_nounlock()
16999 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
17034 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) in tcp_rack_output() argument
17041 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
17049 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
17050 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
17056 rsm = rack_find_lowest_rsm(rack); in tcp_rack_output()
17061 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
17077 srtt = rack_grab_rtt(tp, rack); in tcp_rack_output()
17080 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); in tcp_rack_output()
17100 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
17101 rack->r_fast_output = 0; in tcp_rack_output()
17108 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, in rack_log_pacing_delay_calc() argument
17112 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
17133 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
17134 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
17135 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
17136 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
17137 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
17139 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
17141 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
17143 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
17145 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
17147 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
17149 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
17151 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
17153 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
17154 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
17155 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
17158 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
17161 log.u_bbr.cur_del_rate = rack_get_bw(rack); in rack_log_pacing_delay_calc()
17163 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
17164 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
17165 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); in rack_log_pacing_delay_calc()
17166 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
17175 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
17176 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
17178 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
17180 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
17182 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
17184 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
17186 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
17187 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
17188 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
17195 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) in rack_get_pacing_len() argument
17199 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
17200 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
17203 if (rack->use_fixed_rate && in rack_get_pacing_len()
17204 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17205 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17210 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17215 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17216 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17219 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17220 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17221 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17223 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17224 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17225 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17230 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, ui… in rack_arrive_at_discounted_rate() argument
17235 if (window_input < rc_init_window(rack)) { in rack_arrive_at_discounted_rate()
17241 reduced_win = rack_get_lt_bw(rack); in rack_arrive_at_discounted_rate()
17245 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17250 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17254 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17256 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17263 gain = rack_get_output_gain(rack, NULL); in rack_arrive_at_discounted_rate()
17284 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, … in pace_to_fill_cwnd() argument
17288 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17289 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17291 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17293 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17295 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17296 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17297 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17301 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17307 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17308 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17311 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); in pace_to_fill_cwnd()
17317 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17318 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17321 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17323 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17324 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17335 gp = rack_get_gp_est(rack); in pace_to_fill_cwnd()
17336 lt_bw = rack_get_lt_bw(rack); in pace_to_fill_cwnd()
17343 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17356 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17369 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17370 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17371 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17374 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17379 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17381 rack_log_hdwr_pacing(rack, in pace_to_fill_cwnd()
17388 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17389 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17390 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17391 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17392 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17393 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17394 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17401 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17410 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17411 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17413 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17424 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, in pace_to_fill_cwnd()
17432 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *r… in rack_get_pacing_delay() argument
17441 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17445 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17461 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17462 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17465 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17466 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17468 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17494 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17497 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); in rack_get_pacing_delay()
17498 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17499 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); in rack_get_pacing_delay()
17501 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); in rack_get_pacing_delay()
17511 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17512 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17514 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17515 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); in rack_get_pacing_delay()
17516 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17517 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17520 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17521 bw_est = rack_get_bw(rack); in rack_get_pacing_delay()
17522 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); in rack_get_pacing_delay()
17526 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17527 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); in rack_get_pacing_delay()
17529 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17530 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17533 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17536 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17537 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17539 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17543 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { in rack_get_pacing_delay()
17550 rack_rate_cap_bw(rack, &rate_wanted, &capped); in rack_get_pacing_delay()
17561 if (rack->r_is_v6) { in rack_get_pacing_delay()
17579 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17584 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17585 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17586 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17587 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17590 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17591 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17597 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17604 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17605 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17606 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17607 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17610 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17615 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17616 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17618 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17619 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17620 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17624 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17625 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17627 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17628 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17629 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17630 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17635 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); in rack_get_pacing_delay()
17637 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17638 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17644 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17651 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17652 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17653 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17654 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17655 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17659 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17660 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17661 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17663 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17664 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17669 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17670 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17671 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17674 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17675 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17676 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17677 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17678 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17679 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17680 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17681 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17683 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17687 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17688 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17692 if (rack->r_up_only && in rack_get_pacing_delay()
17693 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17709 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17712 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17713 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17720 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17721 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17723 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17724 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17725 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17726 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17727 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17730 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17731 rack->rc_tp, in rack_get_pacing_delay()
17732 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17735 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17741 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17742 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17743 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17746 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17748 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17749 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17750 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17751 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17752 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17753 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17754 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17756 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17760 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17761 rack_log_hdwr_pacing(rack, in rack_get_pacing_delay()
17762 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17764 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17770 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17771 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17782 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17783 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17787 … rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); in rack_get_pacing_delay()
17794 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); in rack_get_pacing_delay()
17796 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17806 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17817 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, in rack_start_gp_measurement() argument
17837 max(rc_init_window(rack), in rack_start_gp_measurement()
17844 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17845 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17846 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17848 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17849 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17850 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17851 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17852 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17853 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17854 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17855 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17861 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17871 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17872 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17873 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17875 rack_log_pacing_delay_calc(rack, in rack_start_gp_measurement()
17880 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17883 rack_tend_gp_marks(tp, rack); in rack_start_gp_measurement()
17884 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17893 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17900 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17907 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17922 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17928 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17932 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17957 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17967 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17978 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17979 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17980 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17981 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
17982 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { in rack_start_gp_measurement()
17988 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17989 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
17991 rack_log_pacing_delay_calc(rack, in rack_start_gp_measurement()
17996 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18000 rack_tend_gp_marks(tp, rack); in rack_start_gp_measurement()
18001 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18013 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18014 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18015 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18017 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
18019 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18026 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
18041 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
18043 rack_tend_gp_marks(tp, rack); in rack_start_gp_measurement()
18044 rack_log_pacing_delay_calc(rack, in rack_start_gp_measurement()
18049 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18051 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18055 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, in rack_what_can_we_send() argument
18071 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
18097 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, in rack_log_fsb() argument
18101 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
18106 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
18112 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
18114 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
18115 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
18116 log.u_bbr.bw_inuse = rack_get_bw(rack); in rack_log_fsb()
18117 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); in rack_log_fsb()
18123 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
18295 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, in rack_fo_m_copym() argument
18301 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18302 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18309 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18312 rack, in rack_fo_m_copym()
18314 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18315 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18317 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18318 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18320 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18325 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18328 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18329 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18331 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18332 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18334 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18335 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18337 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18339 rack, m); in rack_fo_m_copym()
18342 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18347 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18350 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18352 &rack->r_ctl.fsb, in rack_fo_m_copym()
18353 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18359 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, in rack_log_queue_level() argument
18367 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18368 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18371 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18374 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18375 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18376 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18380 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18382 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18389 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, in rack_check_queue_level() argument
18398 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18404 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18425 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18436 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18439 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18440 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18441 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18445 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18448 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18457 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, in rack_fast_rsm_output() argument
18491 if (rack->r_is_v6) { in rack_fast_rsm_output()
18492 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18497 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18511 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18512 inp = rack->rc_inp; in rack_fast_rsm_output()
18538 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18541 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18542 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18543 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18544 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18562 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18617 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18661 if (rack->r_is_v6) in rack_fast_rsm_output()
18675 if (rack->r_is_v6) { in rack_fast_rsm_output()
18686 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18688 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); in rack_fast_rsm_output()
18714 if (rack->r_is_v6) { in rack_fast_rsm_output()
18766 if (rack->r_is_v6) { in rack_fast_rsm_output()
18767 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18781 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18794 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18799 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18802 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18807 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18808 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18815 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18819 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18824 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18825 if (rack->rack_no_prr) in rack_fast_rsm_output()
18828 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18829 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18830 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18833 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18834 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18835 log.u_bbr.bw_inuse = rack_get_bw(rack); in rack_fast_rsm_output()
18836 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18841 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); in rack_fast_rsm_output()
18845 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18861 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18866 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18874 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18876 rack_log_queue_level(tp, rack, len, tv, cts); in rack_fast_rsm_output()
18879 if (rack->r_is_v6) { in rack_fast_rsm_output()
18902 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18903 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18904 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18905 rack->r_early = 0; in rack_fast_rsm_output()
18906 rack->r_late = 0; in rack_fast_rsm_output()
18907 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18912 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18913 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18919 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18920 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18921 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18922 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18924 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18925 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18927 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18930 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18932 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18949 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18950 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18951 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18952 rack_log_queue_level(tp, rack, len, tv, cts); in rack_fast_rsm_output()
18954 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18955 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18956 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18957 rack->rc_enobuf++; in rack_fast_rsm_output()
18960 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18962 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
18966 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); in rack_fast_rsm_output()
18968 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); in rack_fast_rsm_output()
18986 rack_sndbuf_autoscale(struct tcp_rack *rack) in rack_sndbuf_autoscale() argument
19015 tp = rack->rc_tp; in rack_sndbuf_autoscale()
19016 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
19017 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
19041 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, in rack_fast_output() argument
19080 if (rack->r_is_v6) { in rack_fast_output()
19081 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19087 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19095 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
19097 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
19098 inp = rack->rc_inp; in rack_fast_output()
19099 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19101 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
19114 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
19117 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
19118 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19119 else if (rack->rc_user_set_max_segs) in rack_fast_output()
19120 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
19139 th = rack->r_ctl.fsb.th; in rack_fast_output()
19185 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
19198 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
19202 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19213 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19214 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19222 if (rack->r_is_v6) in rack_fast_output()
19236 if (rack->r_is_v6) { in rack_fast_output()
19270 if (rack->r_is_v6) { in rack_fast_output()
19322 if (rack->r_is_v6) { in rack_fast_output()
19323 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19337 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19350 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19353 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19357 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19358 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19365 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19367 rack_log_queue_level(tp, rack, len, tv, cts); in rack_fast_output()
19369 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19373 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19374 if (rack->rack_no_prr) in rack_fast_output()
19377 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19378 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19379 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19382 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19383 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19384 log.u_bbr.bw_inuse = rack_get_bw(rack); in rack_fast_output()
19385 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19387 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); in rack_fast_output()
19391 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19393 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19396 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19407 if (rack->r_is_v6) { in rack_fast_output()
19431 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19432 rack->rc_hw_nobuf = 0; in rack_fast_output()
19433 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19434 rack->r_early = 0; in rack_fast_output()
19435 rack->r_late = 0; in rack_fast_output()
19436 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19438 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19440 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); in rack_fast_output()
19441 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19442 rack->lt_bw_up = 1; in rack_fast_output()
19444 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19452 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19453 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19455 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19456 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19457 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19461 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19463 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19464 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); in rack_fast_output()
19468 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19470 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19473 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19476 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19477 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19488 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19489 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19491 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19492 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19493 rack->r_fast_output = 0; in rack_fast_output()
19494 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19496 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19497 rack_sndbuf_autoscale(rack); in rack_fast_output()
19498 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19505 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19510 th = rack->r_ctl.fsb.th; in rack_fast_output()
19518 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); in rack_fast_output()
19519 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); in rack_fast_output()
19533 rack->r_fast_output = 0; in rack_fast_output()
19538 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, in rack_setup_fast_output() argument
19544 rack->r_fast_output = 1; in rack_setup_fast_output()
19545 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19546 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19547 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19548 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19549 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19550 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19552 rack->r_fast_output = 0; in rack_setup_fast_output()
19556 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19559 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19561 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19562 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19564 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19566 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19567 rack->r_fast_output = 0; in rack_setup_fast_output()
19569 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19570 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19572 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19577 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) in rack_get_hpts_pacing_min_for_bw() argument
19583 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19589 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) in rack_check_collapsed() argument
19595 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19598 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19602 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19614 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19616 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19617 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19618 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19624 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19626 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19632 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19639 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) in rack_validate_sizes() argument
19641 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19642 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19645 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19646 rack->gp_ready) { in rack_validate_sizes()
19650 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); in rack_validate_sizes()
19685 struct tcp_rack *rack; in rack_output() local
19735 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19750 if (rack->rack_deferred_inited == 0) { in rack_output()
19757 rack_deferred_init(tp, rack); in rack_output()
19766 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19773 if (rack->r_state) { in rack_output()
19775 isipv6 = rack->r_is_v6; in rack_output()
19777 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19783 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19784 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19789 rack_timer_cancel(tp, rack, cts, __LINE__); in rack_output()
19792 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19793 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19795 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19800 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19803 retval = rack_process_timers(tp, rack, cts, hpts_calling, in rack_output()
19817 if (rack->rc_in_persist) { in rack_output()
19818 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19820 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); in rack_output()
19827 if ((rack->rc_ack_required == 1) && in rack_output()
19828 (rack->r_timer_override == 0)){ in rack_output()
19830 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19832 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); in rack_output()
19839 if ((rack->r_timer_override) || in rack_output()
19840 (rack->rc_ack_can_sendout_data) || in rack_output()
19843 rack->rc_ack_can_sendout_data = 0; in rack_output()
19844 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19845 tcp_hpts_remove(rack->rc_tp); in rack_output()
19846 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19863 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19864 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19865 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19868 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19869 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19870 rack->r_late = 1; in rack_output()
19871 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19872 rack->r_ctl.rc_agg_early += early; in rack_output()
19873 rack->r_early = 1; in rack_output()
19874 } else if (rack->rc_always_pace == 0) { in rack_output()
19876 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19877 rack->r_early = rack->r_late = 0; in rack_output()
19880 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19881 rack->r_wanted_output = 0; in rack_output()
19882 rack->r_timer_override = 0; in rack_output()
19883 if ((tp->t_state != rack->r_state) && in rack_output()
19885 rack_set_state(tp, rack); in rack_output()
19887 if ((rack->r_fast_output) && in rack_output()
19893 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); in rack_output()
19897 inp = rack->rc_inp; in rack_output()
19903 inp = rack->rc_inp; in rack_output()
19914 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); in rack_output()
19929 rack_cc_after_idle(rack, tp); in rack_output()
19939 rack->r_ctl.rc_went_idle_time && in rack_output()
19940 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19941 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19944 if (rack->in_probe_rtt == 0) { in rack_output()
19945 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19946 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19947 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19948 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19950 rack_exit_probertt(rack, cts); in rack_output()
19956 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19957 (rack->r_fsb_inited == 0) && in rack_output()
19958 (rack->r_state != TCPS_CLOSED)) in rack_output()
19959 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
19960 if (rack->rc_sendvars_notset == 1) { in rack_output()
19961 rack->rc_sendvars_notset = 0; in rack_output()
19967 if ((rack->rack_no_prr == 1) && in rack_output()
19968 (rack->rc_always_pace == 0)) { in rack_output()
19979 rack->rack_no_prr = 0; in rack_output()
19981 if ((rack->pcm_enabled == 1) && in rack_output()
19982 (rack->pcm_needed == 0) && in rack_output()
19994 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
19995 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
19996 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
19997 rack->pcm_needed = 1; in rack_output()
19998 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
20007 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
20009 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
20010 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
20012 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
20014 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
20020 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
20021 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
20025 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
20028 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
20035 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
20036 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
20039 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
20040 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
20042 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
20044 rack->r_fast_output = 0; in rack_output()
20047 rack_log_pcm(rack, 4, in rack_output()
20048 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
20052 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
20054 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
20055 rsm = rack_alloc(rack); in rack_output()
20064 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
20065 rack->rc_free_cnt++; in rack_output()
20077 if (rack->r_ctl.rc_resend) { in rack_output()
20079 rsm = rack->r_ctl.rc_resend; in rack_output()
20080 rack->r_ctl.rc_resend = NULL; in rack_output()
20087 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20089 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); in rack_output()
20090 } else if (rack->r_collapse_point_valid && in rack_output()
20091 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { in rack_output()
20097 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
20098 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
20100 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
20101 rack->r_ctl.high_collapse_point)) in rack_output()
20102 rack->r_collapse_point_valid = 0; in rack_output()
20109 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); in rack_output()
20110 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { in rack_output()
20121 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
20128 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20131 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); in rack_output()
20138 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
20149 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
20152 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
20160 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20165 if (rack->r_must_retran && in rack_output()
20187 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
20201 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20204 rack->r_must_retran = 0; in rack_output()
20205 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20215 rack->r_must_retran = 0; in rack_output()
20216 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20224 if ((rack->full_size_rxt == 0) && in rack_output()
20225 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20228 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20229 rack->gp_ready) { in rack_output()
20233 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); in rack_output()
20250 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20252 if (!rack->alloc_limit_reported) { in rack_output()
20253 rack->alloc_limit_reported = 1; in rack_output()
20271 if (rsm && rack->r_fsb_inited && in rack_output()
20276 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); in rack_output()
20288 rack->rack_enable_scwnd) { in rack_output()
20290 if (rack->gp_ready && in rack_output()
20291 (rack->rack_attempted_scwnd == 0) && in rack_output()
20292 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20296 rack->rack_attempted_scwnd = 1; in rack_output()
20297 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20298 &rack->r_ctl.rc_scw_index, in rack_output()
20301 if (rack->r_ctl.rc_scw && in rack_output()
20302 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20305 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20306 rack->rack_scwnd_is_idle = 0; in rack_output()
20308 if (rack->r_ctl.rc_scw) { in rack_output()
20310 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20311 rack->r_ctl.rc_scw_index, in rack_output()
20326 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20348 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20349 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20351 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20352 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20354 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20360 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20362 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20364 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); in rack_output()
20366 if ((rack->r_ctl.crte == NULL) && in rack_output()
20368 (rack->full_size_rxt == 0) && in rack_output()
20369 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20379 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20380 rack->gp_ready) { in rack_output()
20384 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); in rack_output()
20399 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20419 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20420 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20434 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20535 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20545 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20563 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20566 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20581 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20582 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20594 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20597 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20627 rack_sndbuf_autoscale(rack); in rack_output()
20862 rack_send_ack_challange(rack); in rack_output()
20865 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20866 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); in rack_output()
20871 rack->r_fsb_inited && in rack_output()
20874 (rack->r_must_retran == 0) && in rack_output()
20882 rack_setup_fast_output(tp, rack, sb, len, orig_len, in rack_output()
20885 rack->r_fast_output = 0; in rack_output()
20886 rack_log_fsb(rack, tp, so, flags, in rack_output()
20896 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20905 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20906 rack->r_early = 0; in rack_output()
20907 rack->r_late = 0; in rack_output()
20908 rack->r_ctl.rc_agg_early = 0; in rack_output()
20910 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20915 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20920 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20944 (rack->rack_no_prr == 0) && in rack_output()
20945 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
20951 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); in rack_output()
21008 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
21009 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
21016 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
21018 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
21019 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
21026 if (rack->r_ctl.rc_end_appl) in rack_output()
21027 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
21028 rack->r_ctl.rc_end_appl = rsm; in rack_output()
21031 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
21034 rack_log_pacing_delay_calc(rack, in rack_output()
21035 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
21044 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
21046 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
21048 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); in rack_output()
21049 …rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_u… in rack_output()
21053 rack->r_ctl.rc_scw) { in rack_output()
21054 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
21055 rack->rack_scwnd_is_idle = 1; in rack_output()
21078 if ((rack->r_ctl.crte != NULL) && in rack_output()
21080 ((rack->rc_hw_nobuf == 1) || in rack_output()
21088 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); in rack_output()
21090 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21091 rack->r_ctl.rc_agg_early = 0; in rack_output()
21092 rack->r_early = 0; in rack_output()
21093 rack->r_late = 0; in rack_output()
21125 rack_send_ack_challange(rack); in rack_output()
21135 (rack->pcm_in_progress == 0) && in rack_output()
21136 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
21137 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
21140 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21142 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21242 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21243 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21262 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21266 (rack->r_ctl.current_round != 0) && in rack_output()
21268 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21269 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21270 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21271 rack->r_rcvpath_rtt_up = 1; in rack_output()
21462 rack_send_ack_challange(rack); in rack_output()
21627 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21630 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21634 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21636 th = rack->r_ctl.fsb.th; in rack_output()
21637 udp = rack->r_ctl.fsb.udp; in rack_output()
21756 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21760 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21780 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21783 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21880 if ((rack->r_ctl.crte != NULL) && in rack_output()
21881 (rack->rc_hw_nobuf == 0) && in rack_output()
21883 rack_log_queue_level(tp, rack, len, &tv, cts); in rack_output()
21886 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21890 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21891 if (rack->rack_no_prr) in rack_output()
21894 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21895 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21896 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21899 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21900 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21901 log.u_bbr.bw_inuse = rack_get_bw(rack); in rack_output()
21902 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21906 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21918 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); in rack_output()
21924 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21948 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
21952 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
21978 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
22017 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
22072 rack->pcm_in_progress = 1; in rack_output()
22073 rack->pcm_needed = 0; in rack_output()
22074 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
22077 if (rack->lt_bw_up == 0) { in rack_output()
22078 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); in rack_output()
22079 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22080 rack->lt_bw_up = 1; in rack_output()
22081 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
22088 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
22089 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22091 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
22092 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
22093 rack->r_ctl.lt_timemark = tmark; in rack_output()
22097 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
22101 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
22102 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
22103 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
22104 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
22106 if (rack->rc_hw_nobuf) { in rack_output()
22107 rack->rc_hw_nobuf = 0; in rack_output()
22108 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22109 rack->r_early = 0; in rack_output()
22110 rack->r_late = 0; in rack_output()
22111 rack->r_ctl.rc_agg_early = 0; in rack_output()
22115 rack->rc_gp_saw_rec = 1; in rack_output()
22119 rack->rc_gp_saw_ca = 1; in rack_output()
22122 rack->rc_gp_saw_ss = 1; in rack_output()
22142 if ((rack->rack_no_prr == 0) && in rack_output()
22145 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
22146 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
22148 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
22163 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
22174 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
22185 rack->rc_tlp_in_progress = 0; in rack_output()
22186 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
22194 rack->rc_tlp_in_progress = 1; in rack_output()
22195 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22208 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); in rack_output()
22227 if (rack->rc_new_rnd_needed) { in rack_output()
22228 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22243 rack_start_gp_measurement(tp, rack, startseq, sb_offset); in rack_output()
22253 if (rack->r_fast_output && len) { in rack_output()
22254 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22255 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22257 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22258 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22259 rack->r_fast_output = 0; in rack_output()
22260 if (rack->r_fast_output) { in rack_output()
22261 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22262 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22263 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22310 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22311 rack->r_early = 0; in rack_output()
22312 rack->r_late = 0; in rack_output()
22313 rack->r_ctl.rc_agg_early = 0; in rack_output()
22343 if (rack->r_ctl.crte != NULL) { in rack_output()
22344 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22345 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22346 rack_log_queue_level(tp, rack, len, &tv, cts); in rack_output()
22348 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22349 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22350 if (rack->rc_enobuf < 0x7f) in rack_output()
22351 rack->rc_enobuf++; in rack_output()
22354 if (rack->r_ctl.crte != NULL) { in rack_output()
22356 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22381 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); in rack_output()
22404 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); in rack_output()
22416 rack->rc_enobuf = 0; in rack_output()
22418 rack->r_ctl.retran_during_recovery += len; in rack_output()
22458 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); in rack_output()
22461 rack->r_ent_rec_ns = 0; in rack_output()
22462 if (rack->r_must_retran) { in rack_output()
22464 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22465 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22469 rack->r_must_retran = 0; in rack_output()
22470 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22472 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22477 rack->r_must_retran = 0; in rack_output()
22478 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22481 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22483 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22497 rack->r_fsb_inited && in rack_output()
22500 (rack->r_must_retran == 0) && in rack_output()
22508 rack_setup_fast_output(tp, rack, sb, len, orig_len, in rack_output()
22511 rack->r_fast_output = 0; in rack_output()
22512 rack_log_fsb(rack, tp, so, flags, in rack_output()
22524 (rack->r_must_retran == 0) && in rack_output()
22525 rack->r_fsb_inited && in rack_output()
22535 rack_setup_fast_output(tp, rack, sb, len, orig_len, in rack_output()
22537 if (rack->r_fast_output) { in rack_output()
22539 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); in rack_output()
22553 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); in rack_output()
22576 rack_update_seg(struct tcp_rack *rack) in rack_update_seg() argument
22580 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22581 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22582 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22583 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); in rack_update_seg()
22592 struct tcp_rack *rack; in rack_mtu_change() local
22595 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22596 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22602 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_mtu_change()
22605 rack->r_fast_output = 0; in rack_mtu_change()
22606 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22607 rack->r_ctl.rc_sacked); in rack_mtu_change()
22608 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22609 rack->r_must_retran = 1; in rack_mtu_change()
22611 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22615 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22621 rack_set_dgp(struct tcp_rack *rack) in rack_set_dgp() argument
22623 if (rack->dgp_on == 1) in rack_set_dgp()
22625 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22626 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22633 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22634 rack_remove_pacing(rack); in rack_set_dgp()
22638 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22639 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22640 rack->dgp_on = 1; in rack_set_dgp()
22641 rack->rc_always_pace = 1; in rack_set_dgp()
22642 rack->rc_pace_dnd = 1; in rack_set_dgp()
22643 rack->use_fixed_rate = 0; in rack_set_dgp()
22644 if (rack->gp_ready) in rack_set_dgp()
22645 rack_set_cc_pacing(rack); in rack_set_dgp()
22646 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22647 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22649 rack->full_size_rxt = 1; in rack_set_dgp()
22650 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22652 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22653 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22654 rack->r_use_cmp_ack) in rack_set_dgp()
22655 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22657 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22659 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22661 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22663 rack->r_rr_config = 3; in rack_set_dgp()
22665 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22667 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22668 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22669 rack->rtt_limit_mul = 0; in rack_set_dgp()
22671 rack->rack_no_prr = 1; in rack_set_dgp()
22673 rack->r_limit_scw = 1; in rack_set_dgp()
22675 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22680 rack_set_profile(struct tcp_rack *rack, int prof) in rack_set_profile() argument
22688 err = rack_set_dgp(rack); in rack_set_profile()
22692 err = rack_set_dgp(rack); in rack_set_profile()
22700 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22703 if (rack->rc_always_pace == 1) { in rack_set_profile()
22704 rack_remove_pacing(rack); in rack_set_profile()
22707 rack->dgp_on = 0; in rack_set_profile()
22708 rack->rc_hybrid_mode = 0; in rack_set_profile()
22709 rack->use_fixed_rate = 0; in rack_set_profile()
22713 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22715 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22718 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22719 rack->rc_always_pace = 1; in rack_set_profile()
22720 if (rack->rack_hibeta) in rack_set_profile()
22721 rack_set_cc_pacing(rack); in rack_set_profile()
22723 rack->rc_always_pace = 0; in rack_set_profile()
22726 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22730 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22733 rack->r_use_cmp_ack = 1; in rack_set_profile()
22735 rack->r_use_cmp_ack = 0; in rack_set_profile()
22737 rack->rack_no_prr = 1; in rack_set_profile()
22739 rack->rack_no_prr = 0; in rack_set_profile()
22741 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22743 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22744 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22745 rack->r_mbuf_queue = 1; in rack_set_profile()
22746 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22747 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22748 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22750 rack->r_mbuf_queue = 0; in rack_set_profile()
22751 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22754 rack->rack_enable_scwnd = 1; in rack_set_profile()
22756 rack->rack_enable_scwnd = 0; in rack_set_profile()
22759 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22761 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22763 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22764 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22766 rack->r_rr_config = 0; in rack_set_profile()
22767 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22768 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22769 rack->rtt_limit_mul = 0; in rack_set_profile()
22772 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22774 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22776 rack->rack_no_prr = 1; in rack_set_profile()
22778 rack->rack_no_prr = 0; in rack_set_profile()
22780 rack->r_limit_scw = 1; in rack_set_profile()
22782 rack->r_limit_scw = 0; in rack_set_profile()
22783 rack_init_retransmit_value(rack, rack_rxt_controls); in rack_set_profile()
22790 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) in rack_add_deferred_option() argument
22804 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22809 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) in process_hybrid_pacing() argument
22820 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22821 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22822 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22823 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22825 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); in process_hybrid_pacing()
22827 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22829 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22830 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); in process_hybrid_pacing()
22839 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22840 rack_set_profile(rack, 0); in process_hybrid_pacing()
22841 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22843 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); in process_hybrid_pacing()
22846 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22852 if ((err = rack_set_profile(rack, 1)) != 0){ in process_hybrid_pacing()
22854 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22855 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); in process_hybrid_pacing()
22863 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22866 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22867 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22871 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22876 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22889 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22890 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); in process_hybrid_pacing()
22907 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, in rack_process_option() argument
22921 rack_init_retransmit_value(rack, optval); in rack_process_option()
22933 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22935 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22938 rack->rc_rack_use_dsack = 1; in rack_process_option()
22940 rack->rc_rack_use_dsack = 0; in rack_process_option()
22942 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); in rack_process_option()
22947 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
22950 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
22952 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
22958 rack->rack_hibeta = 1; in rack_process_option()
22964 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
22965 if (rack->rc_pacing_cc_set) in rack_process_option()
22966 rack_undo_cc_pacing(rack); in rack_process_option()
22967 rack->r_ctl.rc_saved_beta.beta = optval; in rack_process_option()
22969 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
22970 rack_set_cc_pacing(rack); in rack_process_option()
22972 rack->rack_hibeta = 0; in rack_process_option()
22973 if (rack->rc_pacing_cc_set) in rack_process_option()
22974 rack_undo_cc_pacing(rack); in rack_process_option()
22982 rack->r_ctl.timer_slop = optval; in rack_process_option()
22983 if (rack->rc_tp->t_srtt) { in rack_process_option()
22990 rack->r_ctl.timer_slop); in rack_process_option()
23000 if (rack->rc_pacing_cc_set) { in rack_process_option()
23018 rack->r_ctl.rc_saved_beta.beta_ecn = optval; in rack_process_option()
23019 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; in rack_process_option()
23025 if (rack->gp_ready) { in rack_process_option()
23030 rack->defer_options = 1; in rack_process_option()
23032 rack->defer_options = 0; in rack_process_option()
23037 rack->r_ctl.req_measurements = optval; in rack_process_option()
23044 rack->r_use_labc_for_rec = 1; in rack_process_option()
23046 rack->r_use_labc_for_rec = 0; in rack_process_option()
23051 rack->rc_labc = optval; in rack_process_option()
23058 rack->r_up_only = 1; in rack_process_option()
23060 rack->r_up_only = 0; in rack_process_option()
23064 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
23068 if ((rack->dgp_on == 1) && in rack_process_option()
23069 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23081 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23083 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23085 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
23092 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
23096 error = process_hybrid_pacing(rack, hybrid); in rack_process_option()
23100 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
23102 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
23106 error = rack_set_profile(rack, optval); in rack_process_option()
23113 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
23114 rack->r_use_cmp_ack = 1; in rack_process_option()
23115 rack->r_mbuf_queue = 1; in rack_process_option()
23118 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
23124 rack->r_limit_scw = 1; in rack_process_option()
23126 rack->r_limit_scw = 0; in rack_process_option()
23134 rack->rc_pace_to_cwnd = 0; in rack_process_option()
23136 rack->rc_pace_to_cwnd = 1; in rack_process_option()
23141 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
23142 rack->rtt_limit_mul = optval; in rack_process_option()
23144 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
23145 rack->rtt_limit_mul = 0; in rack_process_option()
23151 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
23153 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
23160 rack->rack_enable_scwnd = 0; in rack_process_option()
23162 rack->rack_enable_scwnd = 1; in rack_process_option()
23167 if (optval || rack->r_use_cmp_ack) in rack_process_option()
23168 rack->r_mbuf_queue = 1; in rack_process_option()
23170 rack->r_mbuf_queue = 0; in rack_process_option()
23171 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23179 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
23181 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
23186 rack->rack_no_prr = 0; in rack_process_option()
23188 rack->rack_no_prr = 1; in rack_process_option()
23190 rack->no_prr_addback = 1; in rack_process_option()
23196 rack->cspr_is_fcc = 1; in rack_process_option()
23198 rack->cspr_is_fcc = 0; in rack_process_option()
23203 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23205 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23211 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23224 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23229 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23238 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23243 if (rack->rc_always_pace) { in rack_process_option()
23247 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23248 rack->rc_always_pace = 1; in rack_process_option()
23249 if (rack->rack_hibeta) in rack_process_option()
23250 rack_set_cc_pacing(rack); in rack_process_option()
23257 if (rack->rc_always_pace == 1) { in rack_process_option()
23258 rack_remove_pacing(rack); in rack_process_option()
23261 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23266 rack_update_seg(rack); in rack_process_option()
23274 rack->r_ctl.init_rate = val; in rack_process_option()
23275 if (rack->rc_always_pace) in rack_process_option()
23276 rack_update_seg(rack); in rack_process_option()
23284 rack->rc_force_max_seg = 1; in rack_process_option()
23286 rack->rc_force_max_seg = 0; in rack_process_option()
23290 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23291 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_process_option()
23296 if ((rack->dgp_on == 1) && in rack_process_option()
23297 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23310 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23312 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23315 rack->rc_user_set_max_segs = optval; in rack_process_option()
23317 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23318 rack_set_pace_segments(tp, rack, __LINE__, NULL); in rack_process_option()
23323 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23327 if (rack->dgp_on) { in rack_process_option()
23335 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23336 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23337 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23338 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23339 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23340 rack->use_fixed_rate = 1; in rack_process_option()
23341 if (rack->rack_hibeta) in rack_process_option()
23342 rack_set_cc_pacing(rack); in rack_process_option()
23343 rack_log_pacing_delay_calc(rack, in rack_process_option()
23344 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23345 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23346 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23353 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23357 if (rack->dgp_on) { in rack_process_option()
23365 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23366 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23367 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23368 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23369 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23370 rack->use_fixed_rate = 1; in rack_process_option()
23371 if (rack->rack_hibeta) in rack_process_option()
23372 rack_set_cc_pacing(rack); in rack_process_option()
23373 rack_log_pacing_delay_calc(rack, in rack_process_option()
23374 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23375 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23376 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23383 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23387 if (rack->dgp_on) { in rack_process_option()
23395 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23396 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23397 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23398 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23399 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23400 rack->use_fixed_rate = 1; in rack_process_option()
23401 if (rack->rack_hibeta) in rack_process_option()
23402 rack_set_cc_pacing(rack); in rack_process_option()
23403 rack_log_pacing_delay_calc(rack, in rack_process_option()
23404 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23405 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23406 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23411 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23412 rack_log_pacing_delay_calc(rack, in rack_process_option()
23413 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23414 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23415 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23429 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23430 rack_log_pacing_delay_calc(rack, in rack_process_option()
23431 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23432 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23433 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23447 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23448 rack_log_pacing_delay_calc(rack, in rack_process_option()
23449 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23450 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23451 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23457 rack->r_rr_config = optval; in rack_process_option()
23459 rack->r_rr_config = 0; in rack_process_option()
23463 rack->rc_pace_dnd = 1; in rack_process_option()
23465 rack->rc_pace_dnd = 0; in rack_process_option()
23470 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23471 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23475 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23482 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23484 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23489 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23491 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23493 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23496 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23498 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23505 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23509 rack->rc_initial_ss_comp = 1; in rack_process_option()
23510 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23515 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23520 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23521 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23522 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23526 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23528 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23529 rack->rack_hdrw_pacing = 0; in rack_process_option()
23530 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23531 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23532 rack->r_ctl.crte = NULL; in rack_process_option()
23541 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23546 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23551 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23570 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23577 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23583 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23590 rack->use_rack_rr = 1; in rack_process_option()
23592 rack->use_rack_rr = 0; in rack_process_option()
23597 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23621 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23626 rack->r_use_hpts_min = 1; in rack_process_option()
23632 rack->r_ctl.max_reduction = optval; in rack_process_option()
23635 rack->r_use_hpts_min = 0; in rack_process_option()
23640 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23642 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23647 rack->rc_skip_timely = 1; in rack_process_option()
23648 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23649 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23650 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23652 rack->rc_skip_timely = 0; in rack_process_option()
23657 rack->use_lesser_lt_bw = 0; in rack_process_option()
23658 rack->dis_lt_bw = 1; in rack_process_option()
23660 rack->use_lesser_lt_bw = 1; in rack_process_option()
23661 rack->dis_lt_bw = 0; in rack_process_option()
23663 rack->use_lesser_lt_bw = 0; in rack_process_option()
23664 rack->dis_lt_bw = 0; in rack_process_option()
23670 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23672 rack->rc_allow_data_af_clo = 0; in rack_process_option()
24066 rack_apply_deferred_options(struct tcp_rack *rack) in rack_apply_deferred_options() argument
24071 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
24072 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
24075 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
24084 struct tcp_rack *rack; in rack_hw_tls_change() local
24086 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
24088 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
24090 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
24104 struct tcp_rack *rack; in rack_wake_check() local
24108 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
24109 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
24111 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
24115 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
24117 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
24121 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
24166 struct tcp_rack *rack; in rack_set_sockopt() local
24171 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
24172 if (rack == NULL) { in rack_set_sockopt()
24177 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
24199 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24205 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24217 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24325 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24332 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24341 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24406 struct tcp_rack *rack; in rack_get_sockopt() local
24417 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24418 if (rack == NULL) { in rack_get_sockopt()
24439 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24440 optval = rack->r_ctl.rc_saved_beta.beta; in rack_get_sockopt()
24463 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24464 optval = rack->r_ctl.rc_saved_beta.beta_ecn; in rack_get_sockopt()
24479 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24482 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24503 optval = rack->rack_hibeta; in rack_get_sockopt()
24506 optval = rack->defer_options; in rack_get_sockopt()
24509 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24512 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24515 optval = rack->rc_labc; in rack_get_sockopt()
24518 optval= rack->r_up_only; in rack_get_sockopt()
24521 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24524 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24531 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24538 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24541 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24544 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24547 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24550 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24553 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24555 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24561 if (rack->dis_lt_bw) { in rack_get_sockopt()
24564 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24577 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24580 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24583 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24590 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24593 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24600 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24603 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24607 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24611 optval = rack->rc_always_pace; in rack_get_sockopt()
24615 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24619 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24622 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24626 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24630 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24633 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24636 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24638 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24639 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24646 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24650 optval = rack->use_rack_rr; in rack_get_sockopt()
24653 optval = rack->r_rr_config; in rack_get_sockopt()
24656 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24659 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24663 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24667 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24670 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24673 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24676 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24679 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24682 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24685 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24687 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24690 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24693 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24696 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24699 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24705 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24708 optval = rack->r_limit_scw; in rack_get_sockopt()
24711 if (rack->r_use_hpts_min) in rack_get_sockopt()
24712 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24717 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24720 optval = rack->rc_skip_timely; in rack_get_sockopt()
24723 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()