xref: /freebsd/sys/netinet/tcp_stacks/rack.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*-
2  * Copyright (c) 2016-2020 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include <sys/param.h>
36 #include <sys/arb.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
39 #ifdef TCP_HHOOK
40 #include <sys/hhook.h>
41 #endif
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/mbuf.h>
47 #include <sys/proc.h>		/* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #ifdef STATS
53 #include <sys/qmath.h>
54 #include <sys/tree.h>
55 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
56 #else
57 #include <sys/tree.h>
58 #endif
59 #include <sys/refcount.h>
60 #include <sys/queue.h>
61 #include <sys/tim_filter.h>
62 #include <sys/smp.h>
63 #include <sys/kthread.h>
64 #include <sys/kern_prefetch.h>
65 #include <sys/protosw.h>
66 #ifdef TCP_ACCOUNTING
67 #include <sys/sched.h>
68 #include <machine/cpu.h>
69 #endif
70 #include <vm/uma.h>
71 
72 #include <net/route.h>
73 #include <net/route/nhop.h>
74 #include <net/vnet.h>
75 
76 #define TCPSTATES		/* for logging */
77 
78 #include <netinet/in.h>
79 #include <netinet/in_kdtrace.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
83 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
84 #include <netinet/ip_var.h>
85 #include <netinet/ip6.h>
86 #include <netinet6/in6_pcb.h>
87 #include <netinet6/ip6_var.h>
88 #include <netinet/tcp.h>
89 #define	TCPOUTFLAGS
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_log_buf.h>
92 #include <netinet/tcp_seq.h>
93 #include <netinet/tcp_timer.h>
94 #include <netinet/tcp_var.h>
95 #include <netinet/tcp_hpts.h>
96 #include <netinet/tcp_ratelimit.h>
97 #include <netinet/tcp_accounting.h>
98 #include <netinet/tcpip.h>
99 #include <netinet/cc/cc.h>
100 #include <netinet/cc/cc_newreno.h>
101 #include <netinet/tcp_fastopen.h>
102 #include <netinet/tcp_lro.h>
103 #ifdef NETFLIX_SHARED_CWND
104 #include <netinet/tcp_shared_cwnd.h>
105 #endif
106 #ifdef TCPDEBUG
107 #include <netinet/tcp_debug.h>
108 #endif				/* TCPDEBUG */
109 #ifdef TCP_OFFLOAD
110 #include <netinet/tcp_offload.h>
111 #endif
112 #ifdef INET6
113 #include <netinet6/tcp6_var.h>
114 #endif
115 
116 #include <netipsec/ipsec_support.h>
117 
118 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
119 #include <netipsec/ipsec.h>
120 #include <netipsec/ipsec6.h>
121 #endif				/* IPSEC */
122 
123 #include <netinet/udp.h>
124 #include <netinet/udp_var.h>
125 #include <machine/in_cksum.h>
126 
127 #ifdef MAC
128 #include <security/mac/mac_framework.h>
129 #endif
130 #include "sack_filter.h"
131 #include "tcp_rack.h"
132 #include "rack_bbr_common.h"
133 
134 uma_zone_t rack_zone;
135 uma_zone_t rack_pcb_zone;
136 
137 #ifndef TICKS2SBT
138 #define	TICKS2SBT(__t)	(tick_sbt * ((sbintime_t)(__t)))
139 #endif
140 
141 VNET_DECLARE(uint32_t, newreno_beta);
142 VNET_DECLARE(uint32_t, newreno_beta_ecn);
143 #define V_newreno_beta VNET(newreno_beta)
144 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
145 
146 
147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
148 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
149 
150 struct sysctl_ctx_list rack_sysctl_ctx;
151 struct sysctl_oid *rack_sysctl_root;
152 
153 #define CUM_ACKED 1
154 #define SACKED 2
155 
156 /*
157  * The RACK module incorporates a number of
158  * TCP ideas that have been put out into the IETF
159  * over the last few years:
160  * - Matt Mathis's Rate Halving which slowly drops
161  *    the congestion window so that the ack clock can
162  *    be maintained during a recovery.
163  * - Yuchung Cheng's RACK TCP (for which its named) that
164  *    will stop us using the number of dup acks and instead
165  *    use time as the gage of when we retransmit.
166  * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
167  *    of Dukkipati et.al.
168  * RACK depends on SACK, so if an endpoint arrives that
169  * cannot do SACK the state machine below will shuttle the
170  * connection back to using the "default" TCP stack that is
171  * in FreeBSD.
172  *
173  * To implement RACK the original TCP stack was first decomposed
174  * into a functional state machine with individual states
175  * for each of the possible TCP connection states. The do_segement
176  * functions role in life is to mandate the connection supports SACK
177  * initially and then assure that the RACK state matches the conenction
178  * state before calling the states do_segment function. Each
179  * state is simplified due to the fact that the original do_segment
180  * has been decomposed and we *know* what state we are in (no
181  * switches on the state) and all tests for SACK are gone. This
182  * greatly simplifies what each state does.
183  *
184  * TCP output is also over-written with a new version since it
185  * must maintain the new rack scoreboard.
186  *
187  */
188 static int32_t rack_tlp_thresh = 1;
189 static int32_t rack_tlp_limit = 2;	/* No more than 2 TLPs w-out new data */
190 static int32_t rack_tlp_use_greater = 1;
191 static int32_t rack_reorder_thresh = 2;
192 static int32_t rack_reorder_fade = 60000000;	/* 0 - never fade, def 60,000,000
193 						 * - 60 seconds */
194 static uint8_t rack_req_measurements = 1;
195 /* Attack threshold detections */
196 static uint32_t rack_highest_sack_thresh_seen = 0;
197 static uint32_t rack_highest_move_thresh_seen = 0;
198 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
199 static int32_t rack_hw_pace_extra_slots = 2;	/* 2 extra MSS time betweens */
200 static int32_t rack_hw_rate_caps = 1; /* 1; */
201 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
202 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
203 static int32_t rack_hw_up_only = 1;
204 static int32_t rack_stats_gets_ms_rtt = 1;
205 static int32_t rack_prr_addbackmax = 2;
206 
207 static int32_t rack_pkt_delay = 1000;
208 static int32_t rack_send_a_lot_in_prr = 1;
209 static int32_t rack_min_to = 1000;	/* Number of microsecond  min timeout */
210 static int32_t rack_verbose_logging = 0;
211 static int32_t rack_ignore_data_after_close = 1;
212 static int32_t rack_enable_shared_cwnd = 1;
213 static int32_t rack_use_cmp_acks = 1;
214 static int32_t rack_use_fsb = 1;
215 static int32_t rack_use_rfo = 1;
216 static int32_t rack_use_rsm_rfo = 1;
217 static int32_t rack_max_abc_post_recovery = 2;
218 static int32_t rack_client_low_buf = 0;
219 #ifdef TCP_ACCOUNTING
220 static int32_t rack_tcp_accounting = 0;
221 #endif
222 static int32_t rack_limits_scwnd = 1;
223 static int32_t rack_enable_mqueue_for_nonpaced = 0;
224 static int32_t rack_disable_prr = 0;
225 static int32_t use_rack_rr = 1;
226 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
227 static int32_t rack_persist_min = 250000;	/* 250usec */
228 static int32_t rack_persist_max = 2000000;	/* 2 Second in usec's */
229 static int32_t rack_sack_not_required = 1;	/* set to one to allow non-sack to use rack */
230 static int32_t rack_default_init_window = 0;	/* Use system default */
231 static int32_t rack_limit_time_with_srtt = 0;
232 static int32_t rack_autosndbuf_inc = 20;	/* In percentage form */
233 static int32_t rack_enobuf_hw_boost_mult = 2;	/* How many times the hw rate we boost slot using time_between */
234 static int32_t rack_enobuf_hw_max = 12000;	/* 12 ms in usecs */
235 static int32_t rack_enobuf_hw_min = 10000;	/* 10 ms in usecs */
236 static int32_t rack_hw_rwnd_factor = 2;		/* How many max_segs the rwnd must be before we hold off sending */
237 /*
238  * Currently regular tcp has a rto_min of 30ms
239  * the backoff goes 12 times so that ends up
240  * being a total of 122.850 seconds before a
241  * connection is killed.
242  */
243 static uint32_t rack_def_data_window = 20;
244 static uint32_t rack_goal_bdp = 2;
245 static uint32_t rack_min_srtts = 1;
246 static uint32_t rack_min_measure_usec = 0;
247 static int32_t rack_tlp_min = 10000;	/* 10ms */
248 static int32_t rack_rto_min = 30000;	/* 30,000 usec same as main freebsd */
249 static int32_t rack_rto_max = 4000000;	/* 4 seconds in usec's */
250 static const int32_t rack_free_cache = 2;
251 static int32_t rack_hptsi_segments = 40;
252 static int32_t rack_rate_sample_method = USE_RTT_LOW;
253 static int32_t rack_pace_every_seg = 0;
254 static int32_t rack_delayed_ack_time = 40000;	/* 40ms in usecs */
255 static int32_t rack_slot_reduction = 4;
256 static int32_t rack_wma_divisor = 8;		/* For WMA calculation */
257 static int32_t rack_cwnd_block_ends_measure = 0;
258 static int32_t rack_rwnd_block_ends_measure = 0;
259 static int32_t rack_def_profile = 0;
260 
261 static int32_t rack_lower_cwnd_at_tlp = 0;
262 static int32_t rack_limited_retran = 0;
263 static int32_t rack_always_send_oldest = 0;
264 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
265 
266 static uint16_t rack_per_of_gp_ss = 250;	/* 250 % slow-start */
267 static uint16_t rack_per_of_gp_ca = 200;	/* 200 % congestion-avoidance */
268 static uint16_t rack_per_of_gp_rec = 200;	/* 200 % of bw */
269 
270 /* Probertt */
271 static uint16_t rack_per_of_gp_probertt = 60;	/* 60% of bw */
272 static uint16_t rack_per_of_gp_lowthresh = 40;	/* 40% is bottom */
273 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
274 static uint16_t rack_atexit_prtt_hbp = 130;	/* Clamp to 130% on exit prtt if highly buffered path */
275 static uint16_t rack_atexit_prtt = 130;	/* Clamp to 100% on exit prtt if non highly buffered path */
276 
277 static uint32_t rack_max_drain_wait = 2;	/* How man gp srtt's before we give up draining */
278 static uint32_t rack_must_drain = 1;		/* How many GP srtt's we *must* wait */
279 static uint32_t rack_probertt_use_min_rtt_entry = 1;	/* Use the min to calculate the goal else gp_srtt */
280 static uint32_t rack_probertt_use_min_rtt_exit = 0;
281 static uint32_t rack_probe_rtt_sets_cwnd = 0;
282 static uint32_t rack_probe_rtt_safety_val = 2000000;	/* No more than 2 sec in probe-rtt */
283 static uint32_t rack_time_between_probertt = 9600000;	/* 9.6 sec in usecs */
284 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0;	/* How many srtt periods does probe-rtt last top fraction */
285 static uint32_t rack_probertt_gpsrtt_cnt_div = 0;	/* How many srtt periods does probe-rtt last bottom fraction */
286 static uint32_t rack_min_probertt_hold = 40000;		/* Equal to delayed ack time */
287 static uint32_t rack_probertt_filter_life = 10000000;
288 static uint32_t rack_probertt_lower_within = 10;
289 static uint32_t rack_min_rtt_movement = 250000;	/* Must move at least 250ms (in microseconds)  to count as a lowering */
290 static int32_t rack_pace_one_seg = 0;		/* Shall we pace for less than 1.4Meg 1MSS at a time */
291 static int32_t rack_probertt_clear_is = 1;
292 static int32_t rack_max_drain_hbp = 1;		/* Extra drain times gpsrtt for highly buffered paths */
293 static int32_t rack_hbp_thresh = 3;		/* what is the divisor max_rtt/min_rtt to decided a hbp */
294 
295 /* Part of pacing */
296 static int32_t rack_max_per_above = 30;		/* When we go to increment stop if above 100+this% */
297 
298 /* Timely information */
299 /* Combine these two gives the range of 'no change' to bw */
300 /* ie the up/down provide the upper and lower bound */
301 static int32_t rack_gp_per_bw_mul_up = 2;	/* 2% */
302 static int32_t rack_gp_per_bw_mul_down = 4;	/* 4% */
303 static int32_t rack_gp_rtt_maxmul = 3;		/* 3 x maxmin */
304 static int32_t rack_gp_rtt_minmul = 1;		/* minrtt + (minrtt/mindiv) is lower rtt */
305 static int32_t rack_gp_rtt_mindiv = 4;		/* minrtt + (minrtt * minmul/mindiv) is lower rtt */
306 static int32_t rack_gp_decrease_per = 20;	/* 20% decrease in multipler */
307 static int32_t rack_gp_increase_per = 2;	/* 2% increase in multipler */
308 static int32_t rack_per_lower_bound = 50;	/* Don't allow to drop below this multiplier */
309 static int32_t rack_per_upper_bound_ss = 0;	/* Don't allow SS to grow above this */
310 static int32_t rack_per_upper_bound_ca = 0;	/* Don't allow CA to grow above this */
311 static int32_t rack_do_dyn_mul = 0;		/* Are the rack gp multipliers dynamic */
312 static int32_t rack_gp_no_rec_chg = 1;		/* Prohibit recovery from reducing it's multiplier */
313 static int32_t rack_timely_dec_clear = 6;	/* Do we clear decrement count at a value (6)? */
314 static int32_t rack_timely_max_push_rise = 3;	/* One round of pushing */
315 static int32_t rack_timely_max_push_drop = 3;	/* Three round of pushing */
316 static int32_t rack_timely_min_segs = 4;	/* 4 segment minimum */
317 static int32_t rack_use_max_for_nobackoff = 0;
318 static int32_t rack_timely_int_timely_only = 0;	/* do interim timely's only use the timely algo (no b/w changes)? */
319 static int32_t rack_timely_no_stopping = 0;
320 static int32_t rack_down_raise_thresh = 100;
321 static int32_t rack_req_segs = 1;
322 static uint64_t rack_bw_rate_cap = 0;
323 
324 /* Weird delayed ack mode */
325 static int32_t rack_use_imac_dack = 0;
326 /* Rack specific counters */
327 counter_u64_t rack_badfr;
328 counter_u64_t rack_badfr_bytes;
329 counter_u64_t rack_rtm_prr_retran;
330 counter_u64_t rack_rtm_prr_newdata;
331 counter_u64_t rack_timestamp_mismatch;
332 counter_u64_t rack_reorder_seen;
333 counter_u64_t rack_paced_segments;
334 counter_u64_t rack_unpaced_segments;
335 counter_u64_t rack_calc_zero;
336 counter_u64_t rack_calc_nonzero;
337 counter_u64_t rack_saw_enobuf;
338 counter_u64_t rack_saw_enobuf_hw;
339 counter_u64_t rack_saw_enetunreach;
340 counter_u64_t rack_per_timer_hole;
341 counter_u64_t rack_large_ackcmp;
342 counter_u64_t rack_small_ackcmp;
343 #ifdef INVARIANTS
344 counter_u64_t rack_adjust_map_bw;
345 #endif
346 /* Tail loss probe counters */
347 counter_u64_t rack_tlp_tot;
348 counter_u64_t rack_tlp_newdata;
349 counter_u64_t rack_tlp_retran;
350 counter_u64_t rack_tlp_retran_bytes;
351 counter_u64_t rack_tlp_retran_fail;
352 counter_u64_t rack_to_tot;
353 counter_u64_t rack_to_arm_rack;
354 counter_u64_t rack_to_arm_tlp;
355 counter_u64_t rack_hot_alloc;
356 counter_u64_t rack_to_alloc;
357 counter_u64_t rack_to_alloc_hard;
358 counter_u64_t rack_to_alloc_emerg;
359 counter_u64_t rack_to_alloc_limited;
360 counter_u64_t rack_alloc_limited_conns;
361 counter_u64_t rack_split_limited;
362 
363 #define MAX_NUM_OF_CNTS 13
364 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS];
365 counter_u64_t rack_multi_single_eq;
366 counter_u64_t rack_proc_non_comp_ack;
367 
368 counter_u64_t rack_fto_send;
369 counter_u64_t rack_fto_rsm_send;
370 counter_u64_t rack_nfto_resend;
371 counter_u64_t rack_non_fto_send;
372 counter_u64_t rack_extended_rfo;
373 
374 counter_u64_t rack_sack_proc_all;
375 counter_u64_t rack_sack_proc_short;
376 counter_u64_t rack_sack_proc_restart;
377 counter_u64_t rack_sack_attacks_detected;
378 counter_u64_t rack_sack_attacks_reversed;
379 counter_u64_t rack_sack_used_next_merge;
380 counter_u64_t rack_sack_splits;
381 counter_u64_t rack_sack_used_prev_merge;
382 counter_u64_t rack_sack_skipped_acked;
383 counter_u64_t rack_ack_total;
384 counter_u64_t rack_express_sack;
385 counter_u64_t rack_sack_total;
386 counter_u64_t rack_move_none;
387 counter_u64_t rack_move_some;
388 
389 counter_u64_t rack_used_tlpmethod;
390 counter_u64_t rack_used_tlpmethod2;
391 counter_u64_t rack_enter_tlp_calc;
392 counter_u64_t rack_input_idle_reduces;
393 counter_u64_t rack_collapsed_win;
394 counter_u64_t rack_tlp_does_nada;
395 counter_u64_t rack_try_scwnd;
396 counter_u64_t rack_hw_pace_init_fail;
397 counter_u64_t rack_hw_pace_lost;
398 counter_u64_t rack_sbsndptr_right;
399 counter_u64_t rack_sbsndptr_wrong;
400 
401 /* Temp CPU counters */
402 counter_u64_t rack_find_high;
403 
404 counter_u64_t rack_progress_drops;
405 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
406 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
407 
408 
409 #define	RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
410 
411 #define	RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do {	\
412 	(tv) = (value) + slop;	 \
413 	if ((u_long)(tv) < (u_long)(tvmin)) \
414 		(tv) = (tvmin); \
415 	if ((u_long)(tv) > (u_long)(tvmax)) \
416 		(tv) = (tvmax); \
417 } while (0)
418 
419 static void
420 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line);
421 
422 static int
423 rack_process_ack(struct mbuf *m, struct tcphdr *th,
424     struct socket *so, struct tcpcb *tp, struct tcpopt *to,
425     uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
426 static int
427 rack_process_data(struct mbuf *m, struct tcphdr *th,
428     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
429     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
430 static void
431 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
432    uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
433 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
434 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
435     uint8_t limit_type);
436 static struct rack_sendmap *
437 rack_check_recovery_mode(struct tcpcb *tp,
438     uint32_t tsused);
439 static void
440 rack_cong_signal(struct tcpcb *tp,
441 		 uint32_t type, uint32_t ack);
442 static void rack_counter_destroy(void);
443 static int
444 rack_ctloutput(struct socket *so, struct sockopt *sopt,
445     struct inpcb *inp, struct tcpcb *tp);
446 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
447 static void
448 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
449 static void
450 rack_do_segment(struct mbuf *m, struct tcphdr *th,
451     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
452     uint8_t iptos);
453 static void rack_dtor(void *mem, int32_t size, void *arg);
454 static void
455 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
456     uint32_t flex1, uint32_t flex2,
457     uint32_t flex3, uint32_t flex4,
458     uint32_t flex5, uint32_t flex6,
459     uint16_t flex7, uint8_t mod);
460 static void
461 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
462    uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm);
463 static struct rack_sendmap *
464 rack_find_high_nonack(struct tcp_rack *rack,
465     struct rack_sendmap *rsm);
466 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
467 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
468 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
469 static int
470 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
471     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
472 static void
473 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
474 			    tcp_seq th_ack, int line);
475 static uint32_t
476 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
477 static int32_t rack_handoff_ok(struct tcpcb *tp);
478 static int32_t rack_init(struct tcpcb *tp);
479 static void rack_init_sysctls(void);
480 static void
481 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
482     struct tcphdr *th, int entered_rec, int dup_ack_struck);
483 static void
484 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
485     uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts,
486     struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff);
487 
488 static void
489 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
490     struct rack_sendmap *rsm);
491 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
492 static int32_t rack_output(struct tcpcb *tp);
493 
494 static uint32_t
495 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
496     struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
497     uint32_t cts, int *moved_two);
498 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
499 static void rack_remxt_tmr(struct tcpcb *tp);
500 static int
501 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
502     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
503 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
504 static int32_t rack_stopall(struct tcpcb *tp);
505 static void
506 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
507     uint32_t delta);
508 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
509 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
510 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
511 static uint32_t
512 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
513     struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag);
514 static void
515 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
516     struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag);
517 static int
518 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
519     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
520 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
521 static int
522 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
523     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
524     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
525 static int
526 rack_do_closing(struct mbuf *m, struct tcphdr *th,
527     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
528     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
529 static int
530 rack_do_established(struct mbuf *m, struct tcphdr *th,
531     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
532     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
533 static int
534 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
535     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
536     int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
537 static int
538 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
539     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
540     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
541 static int
542 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
543     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
544     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
545 static int
546 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
547     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
548     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
549 static int
550 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
551     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
552     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
553 static int
554 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
555     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
556     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
557 struct rack_sendmap *
558 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
559     uint32_t tsused);
560 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
561     uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
562 static void
563      tcp_rack_partialack(struct tcpcb *tp);
564 static int
565 rack_set_profile(struct tcp_rack *rack, int prof);
566 static void
567 rack_apply_deferred_options(struct tcp_rack *rack);
568 
569 int32_t rack_clear_counter=0;
570 
571 static void
572 rack_set_cc_pacing(struct tcp_rack *rack)
573 {
574 	struct sockopt sopt;
575 	struct cc_newreno_opts opt;
576 	struct newreno old, *ptr;
577 	struct tcpcb *tp;
578 	int error;
579 
580 	if (rack->rc_pacing_cc_set)
581 		return;
582 
583 	tp = rack->rc_tp;
584 	if (tp->cc_algo == NULL) {
585 		/* Tcb is leaving */
586 		printf("No cc algorithm?\n");
587 		return;
588 	}
589 	rack->rc_pacing_cc_set = 1;
590 	if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
591 		/* Not new-reno we can't play games with beta! */
592 		printf("cc_algo:%s is not NEWRENO:%s\n",
593 		       tp->cc_algo->name, CCALGONAME_NEWRENO);
594 		goto out;
595 	}
596 	ptr = ((struct newreno *)tp->ccv->cc_data);
597 	if (CC_ALGO(tp)->ctl_output == NULL)  {
598 		/* Huh, why does new_reno no longer have a set function? */
599 		printf("no ctl_output for algo:%s\n", tp->cc_algo->name);
600 		goto out;
601 	}
602 	if (ptr == NULL) {
603 		/* Just the default values */
604 		old.beta = V_newreno_beta_ecn;
605 		old.beta_ecn = V_newreno_beta_ecn;
606 		old.newreno_flags = 0;
607 	} else {
608 		old.beta = ptr->beta;
609 		old.beta_ecn = ptr->beta_ecn;
610 		old.newreno_flags = ptr->newreno_flags;
611 	}
612 	sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
613 	sopt.sopt_dir = SOPT_SET;
614 	opt.name = CC_NEWRENO_BETA;
615 	opt.val = rack->r_ctl.rc_saved_beta.beta;
616 	error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
617 	if (error)  {
618 		printf("Error returned by ctl_output %d\n", error);
619 		goto out;
620 	}
621 	/*
622 	 * Hack alert we need to set in our newreno_flags
623 	 * so that Abe behavior is also applied.
624 	 */
625 	((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN;
626 	opt.name = CC_NEWRENO_BETA_ECN;
627 	opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
628 	error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
629 	if (error) {
630 		printf("Error returned by ctl_output %d\n", error);
631 		goto out;
632 	}
633 	/* Save off the original values for restoral */
634 	memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
635 out:
636 	if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
637 		union tcp_log_stackspecific log;
638 		struct timeval tv;
639 
640 		ptr = ((struct newreno *)tp->ccv->cc_data);
641 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
642 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
643 		if (ptr) {
644 			log.u_bbr.flex1 = ptr->beta;
645 			log.u_bbr.flex2 = ptr->beta_ecn;
646 			log.u_bbr.flex3 = ptr->newreno_flags;
647 		}
648 		log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
649 		log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
650 		log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
651 		log.u_bbr.flex7 = rack->gp_ready;
652 		log.u_bbr.flex7 <<= 1;
653 		log.u_bbr.flex7 |= rack->use_fixed_rate;
654 		log.u_bbr.flex7 <<= 1;
655 		log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
656 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
657 		log.u_bbr.flex8 = 3;
658 		tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
659 			       0, &log, false, NULL, NULL, 0, &tv);
660 	}
661 }
662 
663 static void
664 rack_undo_cc_pacing(struct tcp_rack *rack)
665 {
666 	struct newreno old, *ptr;
667 	struct tcpcb *tp;
668 
669 	if (rack->rc_pacing_cc_set == 0)
670 		return;
671 	tp = rack->rc_tp;
672 	rack->rc_pacing_cc_set = 0;
673 	if (tp->cc_algo == NULL)
674 		/* Tcb is leaving */
675 		return;
676 	if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
677 		/* Not new-reno nothing to do! */
678 		return;
679 	}
680 	ptr = ((struct newreno *)tp->ccv->cc_data);
681 	if (ptr == NULL) {
682 		/*
683 		 * This happens at rack_fini() if the
684 		 * cc module gets freed on us. In that
685 		 * case we loose our "new" settings but
686 		 * thats ok, since the tcb is going away anyway.
687 		 */
688 		return;
689 	}
690 	/* Grab out our set values */
691 	memcpy(&old, ptr, sizeof(struct newreno));
692 	/* Copy back in the original values */
693 	memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno));
694 	/* Now save back the values we had set in (for when pacing is restored) */
695 	memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
696 	if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
697 		union tcp_log_stackspecific log;
698 		struct timeval tv;
699 
700 		ptr = ((struct newreno *)tp->ccv->cc_data);
701 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
702 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
703 		log.u_bbr.flex1 = ptr->beta;
704 		log.u_bbr.flex2 = ptr->beta_ecn;
705 		log.u_bbr.flex3 = ptr->newreno_flags;
706 		log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
707 		log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
708 		log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
709 		log.u_bbr.flex7 = rack->gp_ready;
710 		log.u_bbr.flex7 <<= 1;
711 		log.u_bbr.flex7 |= rack->use_fixed_rate;
712 		log.u_bbr.flex7 <<= 1;
713 		log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
714 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
715 		log.u_bbr.flex8 = 4;
716 		tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
717 			       0, &log, false, NULL, NULL, 0, &tv);
718 	}
719 }
720 
721 #ifdef NETFLIX_PEAKRATE
722 static inline void
723 rack_update_peakrate_thr(struct tcpcb *tp)
724 {
725 	/* Keep in mind that t_maxpeakrate is in B/s. */
726 	uint64_t peak;
727 	peak = uqmax((tp->t_maxseg * 2),
728 		     (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC));
729 	tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX);
730 }
731 #endif
732 
733 static int
734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
735 {
736 	uint32_t stat;
737 	int32_t error;
738 	int i;
739 
740 	error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
741 	if (error || req->newptr == NULL)
742 		return error;
743 
744 	error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
745 	if (error)
746 		return (error);
747 	if (stat == 1) {
748 #ifdef INVARIANTS
749 		printf("Clearing RACK counters\n");
750 #endif
751 		counter_u64_zero(rack_badfr);
752 		counter_u64_zero(rack_badfr_bytes);
753 		counter_u64_zero(rack_rtm_prr_retran);
754 		counter_u64_zero(rack_rtm_prr_newdata);
755 		counter_u64_zero(rack_timestamp_mismatch);
756 		counter_u64_zero(rack_reorder_seen);
757 		counter_u64_zero(rack_tlp_tot);
758 		counter_u64_zero(rack_tlp_newdata);
759 		counter_u64_zero(rack_tlp_retran);
760 		counter_u64_zero(rack_tlp_retran_bytes);
761 		counter_u64_zero(rack_tlp_retran_fail);
762 		counter_u64_zero(rack_to_tot);
763 		counter_u64_zero(rack_to_arm_rack);
764 		counter_u64_zero(rack_to_arm_tlp);
765 		counter_u64_zero(rack_paced_segments);
766 		counter_u64_zero(rack_calc_zero);
767 		counter_u64_zero(rack_calc_nonzero);
768 		counter_u64_zero(rack_unpaced_segments);
769 		counter_u64_zero(rack_saw_enobuf);
770 		counter_u64_zero(rack_saw_enobuf_hw);
771 		counter_u64_zero(rack_saw_enetunreach);
772 		counter_u64_zero(rack_per_timer_hole);
773 		counter_u64_zero(rack_large_ackcmp);
774 		counter_u64_zero(rack_small_ackcmp);
775 #ifdef INVARIANTS
776 		counter_u64_zero(rack_adjust_map_bw);
777 #endif
778 		counter_u64_zero(rack_to_alloc_hard);
779 		counter_u64_zero(rack_to_alloc_emerg);
780 		counter_u64_zero(rack_sack_proc_all);
781 		counter_u64_zero(rack_fto_send);
782 		counter_u64_zero(rack_fto_rsm_send);
783 		counter_u64_zero(rack_extended_rfo);
784 		counter_u64_zero(rack_hw_pace_init_fail);
785 		counter_u64_zero(rack_hw_pace_lost);
786 		counter_u64_zero(rack_sbsndptr_wrong);
787 		counter_u64_zero(rack_sbsndptr_right);
788 		counter_u64_zero(rack_non_fto_send);
789 		counter_u64_zero(rack_nfto_resend);
790 		counter_u64_zero(rack_sack_proc_short);
791 		counter_u64_zero(rack_sack_proc_restart);
792 		counter_u64_zero(rack_to_alloc);
793 		counter_u64_zero(rack_to_alloc_limited);
794 		counter_u64_zero(rack_alloc_limited_conns);
795 		counter_u64_zero(rack_split_limited);
796 		for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
797 			counter_u64_zero(rack_proc_comp_ack[i]);
798 		}
799 		counter_u64_zero(rack_multi_single_eq);
800 		counter_u64_zero(rack_proc_non_comp_ack);
801 		counter_u64_zero(rack_find_high);
802 		counter_u64_zero(rack_sack_attacks_detected);
803 		counter_u64_zero(rack_sack_attacks_reversed);
804 		counter_u64_zero(rack_sack_used_next_merge);
805 		counter_u64_zero(rack_sack_used_prev_merge);
806 		counter_u64_zero(rack_sack_splits);
807 		counter_u64_zero(rack_sack_skipped_acked);
808 		counter_u64_zero(rack_ack_total);
809 		counter_u64_zero(rack_express_sack);
810 		counter_u64_zero(rack_sack_total);
811 		counter_u64_zero(rack_move_none);
812 		counter_u64_zero(rack_move_some);
813 		counter_u64_zero(rack_used_tlpmethod);
814 		counter_u64_zero(rack_used_tlpmethod2);
815 		counter_u64_zero(rack_enter_tlp_calc);
816 		counter_u64_zero(rack_progress_drops);
817 		counter_u64_zero(rack_tlp_does_nada);
818 		counter_u64_zero(rack_try_scwnd);
819 		counter_u64_zero(rack_collapsed_win);
820 	}
821 	rack_clear_counter = 0;
822 	return (0);
823 }
824 
825 static void
826 rack_init_sysctls(void)
827 {
828 	int i;
829 	struct sysctl_oid *rack_counters;
830 	struct sysctl_oid *rack_attack;
831 	struct sysctl_oid *rack_pacing;
832 	struct sysctl_oid *rack_timely;
833 	struct sysctl_oid *rack_timers;
834 	struct sysctl_oid *rack_tlp;
835 	struct sysctl_oid *rack_misc;
836 	struct sysctl_oid *rack_measure;
837 	struct sysctl_oid *rack_probertt;
838 	struct sysctl_oid *rack_hw_pacing;
839 
840 	rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
841 	    SYSCTL_CHILDREN(rack_sysctl_root),
842 	    OID_AUTO,
843 	    "sack_attack",
844 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
845 	    "Rack Sack Attack Counters and Controls");
846 	rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
847 	    SYSCTL_CHILDREN(rack_sysctl_root),
848 	    OID_AUTO,
849 	    "stats",
850 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
851 	    "Rack Counters");
852 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
853 	    SYSCTL_CHILDREN(rack_sysctl_root),
854 	    OID_AUTO, "rate_sample_method", CTLFLAG_RW,
855 	    &rack_rate_sample_method , USE_RTT_LOW,
856 	    "What method should we use for rate sampling 0=high, 1=low ");
857 	/* Probe rtt related controls */
858 	rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
859 	    SYSCTL_CHILDREN(rack_sysctl_root),
860 	    OID_AUTO,
861 	    "probertt",
862 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
863 	    "ProbeRTT related Controls");
864 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
865 	    SYSCTL_CHILDREN(rack_probertt),
866 	    OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
867 	    &rack_atexit_prtt_hbp, 130,
868 	    "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
869 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
870 	    SYSCTL_CHILDREN(rack_probertt),
871 	    OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
872 	    &rack_atexit_prtt, 130,
873 	    "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
874 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
875 	    SYSCTL_CHILDREN(rack_probertt),
876 	    OID_AUTO, "gp_per_mul", CTLFLAG_RW,
877 	    &rack_per_of_gp_probertt, 60,
878 	    "What percentage of goodput do we pace at in probertt");
879 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
880 	    SYSCTL_CHILDREN(rack_probertt),
881 	    OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
882 	    &rack_per_of_gp_probertt_reduce, 10,
883 	    "What percentage of goodput do we reduce every gp_srtt");
884 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
885 	    SYSCTL_CHILDREN(rack_probertt),
886 	    OID_AUTO, "gp_per_low", CTLFLAG_RW,
887 	    &rack_per_of_gp_lowthresh, 40,
888 	    "What percentage of goodput do we allow the multiplier to fall to");
889 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
890 	    SYSCTL_CHILDREN(rack_probertt),
891 	    OID_AUTO, "time_between", CTLFLAG_RW,
892 	    & rack_time_between_probertt, 96000000,
893 	    "How many useconds between the lowest rtt falling must past before we enter probertt");
894 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
895 	    SYSCTL_CHILDREN(rack_probertt),
896 	    OID_AUTO, "safety", CTLFLAG_RW,
897 	    &rack_probe_rtt_safety_val, 2000000,
898 	    "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
899 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
900 	    SYSCTL_CHILDREN(rack_probertt),
901 	    OID_AUTO, "sets_cwnd", CTLFLAG_RW,
902 	    &rack_probe_rtt_sets_cwnd, 0,
903 	    "Do we set the cwnd too (if always_lower is on)");
904 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
905 	    SYSCTL_CHILDREN(rack_probertt),
906 	    OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
907 	    &rack_max_drain_wait, 2,
908 	    "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
909 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
910 	    SYSCTL_CHILDREN(rack_probertt),
911 	    OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
912 	    &rack_must_drain, 1,
913 	    "We must drain this many gp_srtt's waiting for flight to reach goal");
914 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
915 	    SYSCTL_CHILDREN(rack_probertt),
916 	    OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
917 	    &rack_probertt_use_min_rtt_entry, 1,
918 	    "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
919 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
920 	    SYSCTL_CHILDREN(rack_probertt),
921 	    OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
922 	    &rack_probertt_use_min_rtt_exit, 0,
923 	    "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
924 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
925 	    SYSCTL_CHILDREN(rack_probertt),
926 	    OID_AUTO, "length_div", CTLFLAG_RW,
927 	    &rack_probertt_gpsrtt_cnt_div, 0,
928 	    "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
929 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
930 	    SYSCTL_CHILDREN(rack_probertt),
931 	    OID_AUTO, "length_mul", CTLFLAG_RW,
932 	    &rack_probertt_gpsrtt_cnt_mul, 0,
933 	    "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
934 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
935 	    SYSCTL_CHILDREN(rack_probertt),
936 	    OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
937 	    &rack_min_probertt_hold, 200000,
938 	    "What is the minimum time we hold probertt at target");
939 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
940 	    SYSCTL_CHILDREN(rack_probertt),
941 	    OID_AUTO, "filter_life", CTLFLAG_RW,
942 	    &rack_probertt_filter_life, 10000000,
943 	    "What is the time for the filters life in useconds");
944 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
945 	    SYSCTL_CHILDREN(rack_probertt),
946 	    OID_AUTO, "lower_within", CTLFLAG_RW,
947 	    &rack_probertt_lower_within, 10,
948 	    "If the rtt goes lower within this percentage of the time, go into probe-rtt");
949 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
950 	    SYSCTL_CHILDREN(rack_probertt),
951 	    OID_AUTO, "must_move", CTLFLAG_RW,
952 	    &rack_min_rtt_movement, 250,
953 	    "How much is the minimum movement in rtt to count as a drop for probertt purposes");
954 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
955 	    SYSCTL_CHILDREN(rack_probertt),
956 	    OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
957 	    &rack_probertt_clear_is, 1,
958 	    "Do we clear I/S counts on exiting probe-rtt");
959 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
960 	    SYSCTL_CHILDREN(rack_probertt),
961 	    OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
962 	    &rack_max_drain_hbp, 1,
963 	    "How many extra drain gpsrtt's do we get in highly buffered paths");
964 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
965 	    SYSCTL_CHILDREN(rack_probertt),
966 	    OID_AUTO, "hbp_threshold", CTLFLAG_RW,
967 	    &rack_hbp_thresh, 3,
968 	    "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
969 	/* Pacing related sysctls */
970 	rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
971 	    SYSCTL_CHILDREN(rack_sysctl_root),
972 	    OID_AUTO,
973 	    "pacing",
974 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
975 	    "Pacing related Controls");
976 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
977 	    SYSCTL_CHILDREN(rack_pacing),
978 	    OID_AUTO, "max_pace_over", CTLFLAG_RW,
979 	    &rack_max_per_above, 30,
980 	    "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
981 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
982 	    SYSCTL_CHILDREN(rack_pacing),
983 	    OID_AUTO, "pace_to_one", CTLFLAG_RW,
984 	    &rack_pace_one_seg, 0,
985 	    "Do we allow low b/w pacing of 1MSS instead of two");
986 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
987 	    SYSCTL_CHILDREN(rack_pacing),
988 	    OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
989 	    &rack_limit_time_with_srtt, 0,
990 	    "Do we limit pacing time based on srtt");
991 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
992 	    SYSCTL_CHILDREN(rack_pacing),
993 	    OID_AUTO, "init_win", CTLFLAG_RW,
994 	    &rack_default_init_window, 0,
995 	    "Do we have a rack initial window 0 = system default");
996 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
997 	    SYSCTL_CHILDREN(rack_pacing),
998 	    OID_AUTO, "gp_per_ss", CTLFLAG_RW,
999 	    &rack_per_of_gp_ss, 250,
1000 	    "If non zero, what percentage of goodput to pace at in slow start");
1001 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
1002 	    SYSCTL_CHILDREN(rack_pacing),
1003 	    OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1004 	    &rack_per_of_gp_ca, 150,
1005 	    "If non zero, what percentage of goodput to pace at in congestion avoidance");
1006 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
1007 	    SYSCTL_CHILDREN(rack_pacing),
1008 	    OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1009 	    &rack_per_of_gp_rec, 200,
1010 	    "If non zero, what percentage of goodput to pace at in recovery");
1011 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1012 	    SYSCTL_CHILDREN(rack_pacing),
1013 	    OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1014 	    &rack_hptsi_segments, 40,
1015 	    "What size is the max for TSO segments in pacing and burst mitigation");
1016 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1017 	    SYSCTL_CHILDREN(rack_pacing),
1018 	    OID_AUTO, "burst_reduces", CTLFLAG_RW,
1019 	    &rack_slot_reduction, 4,
1020 	    "When doing only burst mitigation what is the reduce divisor");
1021 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1022 	    SYSCTL_CHILDREN(rack_sysctl_root),
1023 	    OID_AUTO, "use_pacing", CTLFLAG_RW,
1024 	    &rack_pace_every_seg, 0,
1025 	    "If set we use pacing, if clear we use only the original burst mitigation");
1026 	SYSCTL_ADD_U64(&rack_sysctl_ctx,
1027 	    SYSCTL_CHILDREN(rack_pacing),
1028 	    OID_AUTO, "rate_cap", CTLFLAG_RW,
1029 	    &rack_bw_rate_cap, 0,
1030 	    "If set we apply this value to the absolute rate cap used by pacing");
1031 	SYSCTL_ADD_U8(&rack_sysctl_ctx,
1032 	    SYSCTL_CHILDREN(rack_sysctl_root),
1033 	    OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1034 	    &rack_req_measurements, 1,
1035 	    "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1036 	/* Hardware pacing */
1037 	rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1038 	    SYSCTL_CHILDREN(rack_sysctl_root),
1039 	    OID_AUTO,
1040 	    "hdwr_pacing",
1041 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1042 	    "Pacing related Controls");
1043 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1044 	    SYSCTL_CHILDREN(rack_hw_pacing),
1045 	    OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1046 	    &rack_hw_rwnd_factor, 2,
1047 	    "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1048 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1049 	    SYSCTL_CHILDREN(rack_hw_pacing),
1050 	    OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1051 	    &rack_enobuf_hw_boost_mult, 2,
1052 	    "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1053 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1054 	    SYSCTL_CHILDREN(rack_hw_pacing),
1055 	    OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1056 	    &rack_enobuf_hw_max, 2,
1057 	    "What is the max boost the pacing time if we see a ENOBUFS?");
1058 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1059 	    SYSCTL_CHILDREN(rack_hw_pacing),
1060 	    OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1061 	    &rack_enobuf_hw_min, 2,
1062 	    "What is the min boost the pacing time if we see a ENOBUFS?");
1063 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1064 	    SYSCTL_CHILDREN(rack_hw_pacing),
1065 	    OID_AUTO, "enable", CTLFLAG_RW,
1066 	    &rack_enable_hw_pacing, 0,
1067 	    "Should RACK attempt to use hw pacing?");
1068 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1069 	    SYSCTL_CHILDREN(rack_hw_pacing),
1070 	    OID_AUTO, "rate_cap", CTLFLAG_RW,
1071 	    &rack_hw_rate_caps, 1,
1072 	    "Does the highest hardware pacing rate cap the rate we will send at??");
1073 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1074 	    SYSCTL_CHILDREN(rack_hw_pacing),
1075 	    OID_AUTO, "rate_min", CTLFLAG_RW,
1076 	    &rack_hw_rate_min, 0,
1077 	    "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1078 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1079 	    SYSCTL_CHILDREN(rack_hw_pacing),
1080 	    OID_AUTO, "rate_to_low", CTLFLAG_RW,
1081 	    &rack_hw_rate_to_low, 0,
1082 	    "If we fall below this rate, dis-engage hw pacing?");
1083 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1084 	    SYSCTL_CHILDREN(rack_hw_pacing),
1085 	    OID_AUTO, "up_only", CTLFLAG_RW,
1086 	    &rack_hw_up_only, 1,
1087 	    "Do we allow hw pacing to lower the rate selected?");
1088 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1089 	    SYSCTL_CHILDREN(rack_hw_pacing),
1090 	    OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1091 	    &rack_hw_pace_extra_slots, 2,
1092 	    "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1093 	rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1094 	    SYSCTL_CHILDREN(rack_sysctl_root),
1095 	    OID_AUTO,
1096 	    "timely",
1097 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1098 	    "Rack Timely RTT Controls");
1099 	/* Timely based GP dynmics */
1100 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1101 	    SYSCTL_CHILDREN(rack_timely),
1102 	    OID_AUTO, "upper", CTLFLAG_RW,
1103 	    &rack_gp_per_bw_mul_up, 2,
1104 	    "Rack timely upper range for equal b/w (in percentage)");
1105 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1106 	    SYSCTL_CHILDREN(rack_timely),
1107 	    OID_AUTO, "lower", CTLFLAG_RW,
1108 	    &rack_gp_per_bw_mul_down, 4,
1109 	    "Rack timely lower range for equal b/w (in percentage)");
1110 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1111 	    SYSCTL_CHILDREN(rack_timely),
1112 	    OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1113 	    &rack_gp_rtt_maxmul, 3,
1114 	    "Rack timely multipler of lowest rtt for rtt_max");
1115 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1116 	    SYSCTL_CHILDREN(rack_timely),
1117 	    OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1118 	    &rack_gp_rtt_mindiv, 4,
1119 	    "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1120 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1121 	    SYSCTL_CHILDREN(rack_timely),
1122 	    OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1123 	    &rack_gp_rtt_minmul, 1,
1124 	    "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1125 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1126 	    SYSCTL_CHILDREN(rack_timely),
1127 	    OID_AUTO, "decrease", CTLFLAG_RW,
1128 	    &rack_gp_decrease_per, 20,
1129 	    "Rack timely decrease percentage of our GP multiplication factor");
1130 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1131 	    SYSCTL_CHILDREN(rack_timely),
1132 	    OID_AUTO, "increase", CTLFLAG_RW,
1133 	    &rack_gp_increase_per, 2,
1134 	    "Rack timely increase perentage of our GP multiplication factor");
1135 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1136 	    SYSCTL_CHILDREN(rack_timely),
1137 	    OID_AUTO, "lowerbound", CTLFLAG_RW,
1138 	    &rack_per_lower_bound, 50,
1139 	    "Rack timely lowest percentage we allow GP multiplier to fall to");
1140 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1141 	    SYSCTL_CHILDREN(rack_timely),
1142 	    OID_AUTO, "upperboundss", CTLFLAG_RW,
1143 	    &rack_per_upper_bound_ss, 0,
1144 	    "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1145 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1146 	    SYSCTL_CHILDREN(rack_timely),
1147 	    OID_AUTO, "upperboundca", CTLFLAG_RW,
1148 	    &rack_per_upper_bound_ca, 0,
1149 	    "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1150 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1151 	    SYSCTL_CHILDREN(rack_timely),
1152 	    OID_AUTO, "dynamicgp", CTLFLAG_RW,
1153 	    &rack_do_dyn_mul, 0,
1154 	    "Rack timely do we enable dynmaic timely goodput by default");
1155 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1156 	    SYSCTL_CHILDREN(rack_timely),
1157 	    OID_AUTO, "no_rec_red", CTLFLAG_RW,
1158 	    &rack_gp_no_rec_chg, 1,
1159 	    "Rack timely do we prohibit the recovery multiplier from being lowered");
1160 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1161 	    SYSCTL_CHILDREN(rack_timely),
1162 	    OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1163 	    &rack_timely_dec_clear, 6,
1164 	    "Rack timely what threshold do we count to before another boost during b/w decent");
1165 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1166 	    SYSCTL_CHILDREN(rack_timely),
1167 	    OID_AUTO, "max_push_rise", CTLFLAG_RW,
1168 	    &rack_timely_max_push_rise, 3,
1169 	    "Rack timely how many times do we push up with b/w increase");
1170 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1171 	    SYSCTL_CHILDREN(rack_timely),
1172 	    OID_AUTO, "max_push_drop", CTLFLAG_RW,
1173 	    &rack_timely_max_push_drop, 3,
1174 	    "Rack timely how many times do we push back on b/w decent");
1175 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1176 	    SYSCTL_CHILDREN(rack_timely),
1177 	    OID_AUTO, "min_segs", CTLFLAG_RW,
1178 	    &rack_timely_min_segs, 4,
1179 	    "Rack timely when setting the cwnd what is the min num segments");
1180 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1181 	    SYSCTL_CHILDREN(rack_timely),
1182 	    OID_AUTO, "noback_max", CTLFLAG_RW,
1183 	    &rack_use_max_for_nobackoff, 0,
1184 	    "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1185 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1186 	    SYSCTL_CHILDREN(rack_timely),
1187 	    OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1188 	    &rack_timely_int_timely_only, 0,
1189 	    "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1190 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1191 	    SYSCTL_CHILDREN(rack_timely),
1192 	    OID_AUTO, "nonstop", CTLFLAG_RW,
1193 	    &rack_timely_no_stopping, 0,
1194 	    "Rack timely don't stop increase");
1195 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1196 	    SYSCTL_CHILDREN(rack_timely),
1197 	    OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1198 	    &rack_down_raise_thresh, 100,
1199 	    "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1200 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1201 	    SYSCTL_CHILDREN(rack_timely),
1202 	    OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1203 	    &rack_req_segs, 1,
1204 	    "Bottom dragging if not these many segments outstanding and room");
1205 
1206 	/* TLP and Rack related parameters */
1207 	rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1208 	    SYSCTL_CHILDREN(rack_sysctl_root),
1209 	    OID_AUTO,
1210 	    "tlp",
1211 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1212 	    "TLP and Rack related Controls");
1213 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1214 	    SYSCTL_CHILDREN(rack_tlp),
1215 	    OID_AUTO, "use_rrr", CTLFLAG_RW,
1216 	    &use_rack_rr, 1,
1217 	    "Do we use Rack Rapid Recovery");
1218 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1219 	    SYSCTL_CHILDREN(rack_tlp),
1220 	    OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1221 	    &rack_max_abc_post_recovery, 2,
1222 	    "Since we do early recovery, do we override the l_abc to a value, if so what?");
1223 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1224 	    SYSCTL_CHILDREN(rack_tlp),
1225 	    OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1226 	    &rack_non_rxt_use_cr, 0,
1227 	    "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1228 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1229 	    SYSCTL_CHILDREN(rack_tlp),
1230 	    OID_AUTO, "tlpmethod", CTLFLAG_RW,
1231 	    &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1232 	    "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1233 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1234 	    SYSCTL_CHILDREN(rack_tlp),
1235 	    OID_AUTO, "limit", CTLFLAG_RW,
1236 	    &rack_tlp_limit, 2,
1237 	    "How many TLP's can be sent without sending new data");
1238 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1239 	    SYSCTL_CHILDREN(rack_tlp),
1240 	    OID_AUTO, "use_greater", CTLFLAG_RW,
1241 	    &rack_tlp_use_greater, 1,
1242 	    "Should we use the rack_rtt time if its greater than srtt");
1243 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1244 	    SYSCTL_CHILDREN(rack_tlp),
1245 	    OID_AUTO, "tlpminto", CTLFLAG_RW,
1246 	    &rack_tlp_min, 10000,
1247 	    "TLP minimum timeout per the specification (in microseconds)");
1248 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1249 	    SYSCTL_CHILDREN(rack_tlp),
1250 	    OID_AUTO, "send_oldest", CTLFLAG_RW,
1251 	    &rack_always_send_oldest, 0,
1252 	    "Should we always send the oldest TLP and RACK-TLP");
1253 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1254 	    SYSCTL_CHILDREN(rack_tlp),
1255 	    OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1256 	    &rack_limited_retran, 0,
1257 	    "How many times can a rack timeout drive out sends");
1258 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1259 	    SYSCTL_CHILDREN(rack_tlp),
1260 	    OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1261 	    &rack_lower_cwnd_at_tlp, 0,
1262 	    "When a TLP completes a retran should we enter recovery");
1263 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1264 	    SYSCTL_CHILDREN(rack_tlp),
1265 	    OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1266 	    &rack_reorder_thresh, 2,
1267 	    "What factor for rack will be added when seeing reordering (shift right)");
1268 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1269 	    SYSCTL_CHILDREN(rack_tlp),
1270 	    OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1271 	    &rack_tlp_thresh, 1,
1272 	    "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1273 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1274 	    SYSCTL_CHILDREN(rack_tlp),
1275 	    OID_AUTO, "reorder_fade", CTLFLAG_RW,
1276 	    &rack_reorder_fade, 60000000,
1277 	    "Does reorder detection fade, if so how many microseconds (0 means never)");
1278 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1279 	    SYSCTL_CHILDREN(rack_tlp),
1280 	    OID_AUTO, "pktdelay", CTLFLAG_RW,
1281 	    &rack_pkt_delay, 1000,
1282 	    "Extra RACK time (in microseconds) besides reordering thresh");
1283 
1284 	/* Timer related controls */
1285 	rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1286 	    SYSCTL_CHILDREN(rack_sysctl_root),
1287 	    OID_AUTO,
1288 	    "timers",
1289 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1290 	    "Timer related controls");
1291 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1292 	    SYSCTL_CHILDREN(rack_timers),
1293 	    OID_AUTO, "persmin", CTLFLAG_RW,
1294 	    &rack_persist_min, 250000,
1295 	    "What is the minimum time in microseconds between persists");
1296 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1297 	    SYSCTL_CHILDREN(rack_timers),
1298 	    OID_AUTO, "persmax", CTLFLAG_RW,
1299 	    &rack_persist_max, 2000000,
1300 	    "What is the largest delay in microseconds between persists");
1301 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1302 	    SYSCTL_CHILDREN(rack_timers),
1303 	    OID_AUTO, "delayed_ack", CTLFLAG_RW,
1304 	    &rack_delayed_ack_time, 40000,
1305 	    "Delayed ack time (40ms in microseconds)");
1306 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1307 	    SYSCTL_CHILDREN(rack_timers),
1308 	    OID_AUTO, "minrto", CTLFLAG_RW,
1309 	    &rack_rto_min, 30000,
1310 	    "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1311 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1312 	    SYSCTL_CHILDREN(rack_timers),
1313 	    OID_AUTO, "maxrto", CTLFLAG_RW,
1314 	    &rack_rto_max, 4000000,
1315 	    "Maxiumum RTO in microseconds -- should be at least as large as min_rto");
1316 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1317 	    SYSCTL_CHILDREN(rack_timers),
1318 	    OID_AUTO, "minto", CTLFLAG_RW,
1319 	    &rack_min_to, 1000,
1320 	    "Minimum rack timeout in microseconds");
1321 	/* Measure controls */
1322 	rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1323 	    SYSCTL_CHILDREN(rack_sysctl_root),
1324 	    OID_AUTO,
1325 	    "measure",
1326 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1327 	    "Measure related controls");
1328 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1329 	    SYSCTL_CHILDREN(rack_measure),
1330 	    OID_AUTO, "wma_divisor", CTLFLAG_RW,
1331 	    &rack_wma_divisor, 8,
1332 	    "When doing b/w calculation what is the  divisor for the WMA");
1333 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1334 	    SYSCTL_CHILDREN(rack_measure),
1335 	    OID_AUTO, "end_cwnd", CTLFLAG_RW,
1336 	    &rack_cwnd_block_ends_measure, 0,
1337 	    "Does a cwnd just-return end the measurement window (app limited)");
1338 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1339 	    SYSCTL_CHILDREN(rack_measure),
1340 	    OID_AUTO, "end_rwnd", CTLFLAG_RW,
1341 	    &rack_rwnd_block_ends_measure, 0,
1342 	    "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1343 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1344 	    SYSCTL_CHILDREN(rack_measure),
1345 	    OID_AUTO, "min_target", CTLFLAG_RW,
1346 	    &rack_def_data_window, 20,
1347 	    "What is the minimum target window (in mss) for a GP measurements");
1348 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1349 	    SYSCTL_CHILDREN(rack_measure),
1350 	    OID_AUTO, "goal_bdp", CTLFLAG_RW,
1351 	    &rack_goal_bdp, 2,
1352 	    "What is the goal BDP to measure");
1353 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1354 	    SYSCTL_CHILDREN(rack_measure),
1355 	    OID_AUTO, "min_srtts", CTLFLAG_RW,
1356 	    &rack_min_srtts, 1,
1357 	    "What is the goal BDP to measure");
1358 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1359 	    SYSCTL_CHILDREN(rack_measure),
1360 	    OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1361 	    &rack_min_measure_usec, 0,
1362 	    "What is the Minimum time time for a measurement if 0, this is off");
1363 	/* Misc rack controls */
1364 	rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1365 	    SYSCTL_CHILDREN(rack_sysctl_root),
1366 	    OID_AUTO,
1367 	    "misc",
1368 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1369 	    "Misc related controls");
1370 #ifdef TCP_ACCOUNTING
1371 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1372 	    SYSCTL_CHILDREN(rack_misc),
1373 	    OID_AUTO, "tcp_acct", CTLFLAG_RW,
1374 	    &rack_tcp_accounting, 0,
1375 	    "Should we turn on TCP accounting for all rack sessions?");
1376 #endif
1377 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1378 	    SYSCTL_CHILDREN(rack_misc),
1379 	    OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1380 	    &rack_prr_addbackmax, 2,
1381 	    "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1382 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1383 	    SYSCTL_CHILDREN(rack_misc),
1384 	    OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1385 	    &rack_stats_gets_ms_rtt, 1,
1386 	    "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1387 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1388 	    SYSCTL_CHILDREN(rack_misc),
1389 	    OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1390 	    &rack_client_low_buf, 0,
1391 	    "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1392 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1393 	    SYSCTL_CHILDREN(rack_misc),
1394 	    OID_AUTO, "defprofile", CTLFLAG_RW,
1395 	    &rack_def_profile, 0,
1396 	    "Should RACK use a default profile (0=no, num == profile num)?");
1397 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1398 	    SYSCTL_CHILDREN(rack_misc),
1399 	    OID_AUTO, "cmpack", CTLFLAG_RW,
1400 	    &rack_use_cmp_acks, 1,
1401 	    "Should RACK have LRO send compressed acks");
1402 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1403 	    SYSCTL_CHILDREN(rack_misc),
1404 	    OID_AUTO, "fsb", CTLFLAG_RW,
1405 	    &rack_use_fsb, 1,
1406 	    "Should RACK use the fast send block?");
1407 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1408 	    SYSCTL_CHILDREN(rack_misc),
1409 	    OID_AUTO, "rfo", CTLFLAG_RW,
1410 	    &rack_use_rfo, 1,
1411 	    "Should RACK use rack_fast_output()?");
1412 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1413 	    SYSCTL_CHILDREN(rack_misc),
1414 	    OID_AUTO, "rsmrfo", CTLFLAG_RW,
1415 	    &rack_use_rsm_rfo, 1,
1416 	    "Should RACK use rack_fast_rsm_output()?");
1417 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1418 	    SYSCTL_CHILDREN(rack_misc),
1419 	    OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1420 	    &rack_enable_shared_cwnd, 1,
1421 	    "Should RACK try to use the shared cwnd on connections where allowed");
1422 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1423 	    SYSCTL_CHILDREN(rack_misc),
1424 	    OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1425 	    &rack_limits_scwnd, 1,
1426 	    "Should RACK place low end time limits on the shared cwnd feature");
1427 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1428 	    SYSCTL_CHILDREN(rack_misc),
1429 	    OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1430 	    &rack_enable_mqueue_for_nonpaced, 0,
1431 	    "Should RACK use mbuf queuing for non-paced connections");
1432 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1433 	    SYSCTL_CHILDREN(rack_misc),
1434 	    OID_AUTO, "iMac_dack", CTLFLAG_RW,
1435 	    &rack_use_imac_dack, 0,
1436 	    "Should RACK try to emulate iMac delayed ack");
1437 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1438 	    SYSCTL_CHILDREN(rack_misc),
1439 	    OID_AUTO, "no_prr", CTLFLAG_RW,
1440 	    &rack_disable_prr, 0,
1441 	    "Should RACK not use prr and only pace (must have pacing on)");
1442 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1443 	    SYSCTL_CHILDREN(rack_misc),
1444 	    OID_AUTO, "bb_verbose", CTLFLAG_RW,
1445 	    &rack_verbose_logging, 0,
1446 	    "Should RACK black box logging be verbose");
1447 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1448 	    SYSCTL_CHILDREN(rack_misc),
1449 	    OID_AUTO, "data_after_close", CTLFLAG_RW,
1450 	    &rack_ignore_data_after_close, 1,
1451 	    "Do we hold off sending a RST until all pending data is ack'd");
1452 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1453 	    SYSCTL_CHILDREN(rack_misc),
1454 	    OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1455 	    &rack_sack_not_required, 1,
1456 	    "Do we allow rack to run on connections not supporting SACK");
1457 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1458 	    SYSCTL_CHILDREN(rack_misc),
1459 	    OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1460 	    &rack_send_a_lot_in_prr, 1,
1461 	    "Send a lot in prr");
1462 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
1463 	    SYSCTL_CHILDREN(rack_misc),
1464 	    OID_AUTO, "autoscale", CTLFLAG_RW,
1465 	    &rack_autosndbuf_inc, 20,
1466 	    "What percentage should rack scale up its snd buffer by?");
1467 	/* Sack Attacker detection stuff */
1468 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1469 	    SYSCTL_CHILDREN(rack_attack),
1470 	    OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1471 	    &rack_highest_sack_thresh_seen, 0,
1472 	    "Highest sack to ack ratio seen");
1473 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
1474 	    SYSCTL_CHILDREN(rack_attack),
1475 	    OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1476 	    &rack_highest_move_thresh_seen, 0,
1477 	    "Highest move to non-move ratio seen");
1478 	rack_ack_total = counter_u64_alloc(M_WAITOK);
1479 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1480 	    SYSCTL_CHILDREN(rack_attack),
1481 	    OID_AUTO, "acktotal", CTLFLAG_RD,
1482 	    &rack_ack_total,
1483 	    "Total number of Ack's");
1484 	rack_express_sack = counter_u64_alloc(M_WAITOK);
1485 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1486 	    SYSCTL_CHILDREN(rack_attack),
1487 	    OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1488 	    &rack_express_sack,
1489 	    "Total expresss number of Sack's");
1490 	rack_sack_total = counter_u64_alloc(M_WAITOK);
1491 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1492 	    SYSCTL_CHILDREN(rack_attack),
1493 	    OID_AUTO, "sacktotal", CTLFLAG_RD,
1494 	    &rack_sack_total,
1495 	    "Total number of SACKs");
1496 	rack_move_none = counter_u64_alloc(M_WAITOK);
1497 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1498 	    SYSCTL_CHILDREN(rack_attack),
1499 	    OID_AUTO, "move_none", CTLFLAG_RD,
1500 	    &rack_move_none,
1501 	    "Total number of SACK index reuse of postions under threshold");
1502 	rack_move_some = counter_u64_alloc(M_WAITOK);
1503 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1504 	    SYSCTL_CHILDREN(rack_attack),
1505 	    OID_AUTO, "move_some", CTLFLAG_RD,
1506 	    &rack_move_some,
1507 	    "Total number of SACK index reuse of postions over threshold");
1508 	rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1509 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1510 	    SYSCTL_CHILDREN(rack_attack),
1511 	    OID_AUTO, "attacks", CTLFLAG_RD,
1512 	    &rack_sack_attacks_detected,
1513 	    "Total number of SACK attackers that had sack disabled");
1514 	rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1515 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1516 	    SYSCTL_CHILDREN(rack_attack),
1517 	    OID_AUTO, "reversed", CTLFLAG_RD,
1518 	    &rack_sack_attacks_reversed,
1519 	    "Total number of SACK attackers that were later determined false positive");
1520 	rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1521 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1522 	    SYSCTL_CHILDREN(rack_attack),
1523 	    OID_AUTO, "nextmerge", CTLFLAG_RD,
1524 	    &rack_sack_used_next_merge,
1525 	    "Total number of times we used the next merge");
1526 	rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1527 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1528 	    SYSCTL_CHILDREN(rack_attack),
1529 	    OID_AUTO, "prevmerge", CTLFLAG_RD,
1530 	    &rack_sack_used_prev_merge,
1531 	    "Total number of times we used the prev merge");
1532 	/* Counters */
1533 	rack_fto_send = counter_u64_alloc(M_WAITOK);
1534 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1535 	    SYSCTL_CHILDREN(rack_counters),
1536 	    OID_AUTO, "fto_send", CTLFLAG_RD,
1537 	    &rack_fto_send, "Total number of rack_fast_output sends");
1538 	rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1539 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1540 	    SYSCTL_CHILDREN(rack_counters),
1541 	    OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1542 	    &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1543 	rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1544 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1545 	    SYSCTL_CHILDREN(rack_counters),
1546 	    OID_AUTO, "nfto_resend", CTLFLAG_RD,
1547 	    &rack_nfto_resend, "Total number of rack_output retransmissions");
1548 	rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1549 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1550 	    SYSCTL_CHILDREN(rack_counters),
1551 	    OID_AUTO, "nfto_send", CTLFLAG_RD,
1552 	    &rack_non_fto_send, "Total number of rack_output first sends");
1553 	rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1554 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1555 	    SYSCTL_CHILDREN(rack_counters),
1556 	    OID_AUTO, "rfo_extended", CTLFLAG_RD,
1557 	    &rack_extended_rfo, "Total number of times we extended rfo");
1558 
1559 	rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1560 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1561 	    SYSCTL_CHILDREN(rack_counters),
1562 	    OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1563 	    &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1564 	rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1565 
1566 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1567 	    SYSCTL_CHILDREN(rack_counters),
1568 	    OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1569 	    &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1570 
1571 
1572 
1573 	rack_badfr = counter_u64_alloc(M_WAITOK);
1574 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1575 	    SYSCTL_CHILDREN(rack_counters),
1576 	    OID_AUTO, "badfr", CTLFLAG_RD,
1577 	    &rack_badfr, "Total number of bad FRs");
1578 	rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
1579 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1580 	    SYSCTL_CHILDREN(rack_counters),
1581 	    OID_AUTO, "badfr_bytes", CTLFLAG_RD,
1582 	    &rack_badfr_bytes, "Total number of bad FRs");
1583 	rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
1584 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1585 	    SYSCTL_CHILDREN(rack_counters),
1586 	    OID_AUTO, "prrsndret", CTLFLAG_RD,
1587 	    &rack_rtm_prr_retran,
1588 	    "Total number of prr based retransmits");
1589 	rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
1590 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1591 	    SYSCTL_CHILDREN(rack_counters),
1592 	    OID_AUTO, "prrsndnew", CTLFLAG_RD,
1593 	    &rack_rtm_prr_newdata,
1594 	    "Total number of prr based new transmits");
1595 	rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
1596 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1597 	    SYSCTL_CHILDREN(rack_counters),
1598 	    OID_AUTO, "tsnf", CTLFLAG_RD,
1599 	    &rack_timestamp_mismatch,
1600 	    "Total number of timestamps that we could not find the reported ts");
1601 	rack_find_high = counter_u64_alloc(M_WAITOK);
1602 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1603 	    SYSCTL_CHILDREN(rack_counters),
1604 	    OID_AUTO, "findhigh", CTLFLAG_RD,
1605 	    &rack_find_high,
1606 	    "Total number of FIN causing find-high");
1607 	rack_reorder_seen = counter_u64_alloc(M_WAITOK);
1608 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1609 	    SYSCTL_CHILDREN(rack_counters),
1610 	    OID_AUTO, "reordering", CTLFLAG_RD,
1611 	    &rack_reorder_seen,
1612 	    "Total number of times we added delay due to reordering");
1613 	rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1614 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1615 	    SYSCTL_CHILDREN(rack_counters),
1616 	    OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1617 	    &rack_tlp_tot,
1618 	    "Total number of tail loss probe expirations");
1619 	rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1620 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1621 	    SYSCTL_CHILDREN(rack_counters),
1622 	    OID_AUTO, "tlp_new", CTLFLAG_RD,
1623 	    &rack_tlp_newdata,
1624 	    "Total number of tail loss probe sending new data");
1625 	rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1626 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1627 	    SYSCTL_CHILDREN(rack_counters),
1628 	    OID_AUTO, "tlp_retran", CTLFLAG_RD,
1629 	    &rack_tlp_retran,
1630 	    "Total number of tail loss probe sending retransmitted data");
1631 	rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1632 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1633 	    SYSCTL_CHILDREN(rack_counters),
1634 	    OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1635 	    &rack_tlp_retran_bytes,
1636 	    "Total bytes of tail loss probe sending retransmitted data");
1637 	rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
1638 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1639 	    SYSCTL_CHILDREN(rack_counters),
1640 	    OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
1641 	    &rack_tlp_retran_fail,
1642 	    "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
1643 	rack_to_tot = counter_u64_alloc(M_WAITOK);
1644 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1645 	    SYSCTL_CHILDREN(rack_counters),
1646 	    OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1647 	    &rack_to_tot,
1648 	    "Total number of times the rack to expired");
1649 	rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
1650 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1651 	    SYSCTL_CHILDREN(rack_counters),
1652 	    OID_AUTO, "arm_rack", CTLFLAG_RD,
1653 	    &rack_to_arm_rack,
1654 	    "Total number of times the rack timer armed");
1655 	rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
1656 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1657 	    SYSCTL_CHILDREN(rack_counters),
1658 	    OID_AUTO, "arm_tlp", CTLFLAG_RD,
1659 	    &rack_to_arm_tlp,
1660 	    "Total number of times the tlp timer armed");
1661 	rack_calc_zero = counter_u64_alloc(M_WAITOK);
1662 	rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
1663 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1664 	    SYSCTL_CHILDREN(rack_counters),
1665 	    OID_AUTO, "calc_zero", CTLFLAG_RD,
1666 	    &rack_calc_zero,
1667 	    "Total number of times pacing time worked out to zero");
1668 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1669 	    SYSCTL_CHILDREN(rack_counters),
1670 	    OID_AUTO, "calc_nonzero", CTLFLAG_RD,
1671 	    &rack_calc_nonzero,
1672 	    "Total number of times pacing time worked out to non-zero");
1673 	rack_paced_segments = counter_u64_alloc(M_WAITOK);
1674 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1675 	    SYSCTL_CHILDREN(rack_counters),
1676 	    OID_AUTO, "paced", CTLFLAG_RD,
1677 	    &rack_paced_segments,
1678 	    "Total number of times a segment send caused hptsi");
1679 	rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
1680 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1681 	    SYSCTL_CHILDREN(rack_counters),
1682 	    OID_AUTO, "unpaced", CTLFLAG_RD,
1683 	    &rack_unpaced_segments,
1684 	    "Total number of times a segment did not cause hptsi");
1685 	rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1686 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1687 	    SYSCTL_CHILDREN(rack_counters),
1688 	    OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1689 	    &rack_saw_enobuf,
1690 	    "Total number of times a sends returned enobuf for non-hdwr paced connections");
1691 	rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1692 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1693 	    SYSCTL_CHILDREN(rack_counters),
1694 	    OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1695 	    &rack_saw_enobuf_hw,
1696 	    "Total number of times a send returned enobuf for hdwr paced connections");
1697 	rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1698 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1699 	    SYSCTL_CHILDREN(rack_counters),
1700 	    OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1701 	    &rack_saw_enetunreach,
1702 	    "Total number of times a send received a enetunreachable");
1703 	rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1704 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1705 	    SYSCTL_CHILDREN(rack_counters),
1706 	    OID_AUTO, "alloc_hot", CTLFLAG_RD,
1707 	    &rack_hot_alloc,
1708 	    "Total allocations from the top of our list");
1709 	rack_to_alloc = counter_u64_alloc(M_WAITOK);
1710 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1711 	    SYSCTL_CHILDREN(rack_counters),
1712 	    OID_AUTO, "allocs", CTLFLAG_RD,
1713 	    &rack_to_alloc,
1714 	    "Total allocations of tracking structures");
1715 	rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1716 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1717 	    SYSCTL_CHILDREN(rack_counters),
1718 	    OID_AUTO, "allochard", CTLFLAG_RD,
1719 	    &rack_to_alloc_hard,
1720 	    "Total allocations done with sleeping the hard way");
1721 	rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1722 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1723 	    SYSCTL_CHILDREN(rack_counters),
1724 	    OID_AUTO, "allocemerg", CTLFLAG_RD,
1725 	    &rack_to_alloc_emerg,
1726 	    "Total allocations done from emergency cache");
1727 	rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1728 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1729 	    SYSCTL_CHILDREN(rack_counters),
1730 	    OID_AUTO, "alloc_limited", CTLFLAG_RD,
1731 	    &rack_to_alloc_limited,
1732 	    "Total allocations dropped due to limit");
1733 	rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1734 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1735 	    SYSCTL_CHILDREN(rack_counters),
1736 	    OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1737 	    &rack_alloc_limited_conns,
1738 	    "Connections with allocations dropped due to limit");
1739 	rack_split_limited = counter_u64_alloc(M_WAITOK);
1740 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1741 	    SYSCTL_CHILDREN(rack_counters),
1742 	    OID_AUTO, "split_limited", CTLFLAG_RD,
1743 	    &rack_split_limited,
1744 	    "Split allocations dropped due to limit");
1745 
1746 	for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
1747 		char name[32];
1748 		sprintf(name, "cmp_ack_cnt_%d", i);
1749 		rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK);
1750 		SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1751 				       SYSCTL_CHILDREN(rack_counters),
1752 				       OID_AUTO, name, CTLFLAG_RD,
1753 				       &rack_proc_comp_ack[i],
1754 				       "Number of compressed acks we processed");
1755 	}
1756 	rack_large_ackcmp = counter_u64_alloc(M_WAITOK);
1757 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1758 	    SYSCTL_CHILDREN(rack_counters),
1759 	    OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD,
1760 	    &rack_large_ackcmp,
1761 	    "Number of TCP connections with large mbuf's for compressed acks");
1762 	rack_small_ackcmp = counter_u64_alloc(M_WAITOK);
1763 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1764 	    SYSCTL_CHILDREN(rack_counters),
1765 	    OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD,
1766 	    &rack_small_ackcmp,
1767 	    "Number of TCP connections with small mbuf's for compressed acks");
1768 #ifdef INVARIANTS
1769 	rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1770 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1771 	    SYSCTL_CHILDREN(rack_counters),
1772 	    OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1773 	    &rack_adjust_map_bw,
1774 	    "Number of times we hit the case where the sb went up and down on a sendmap entry");
1775 #endif
1776 	rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1777 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1778 	    SYSCTL_CHILDREN(rack_counters),
1779 	    OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1780 	    &rack_multi_single_eq,
1781 	    "Number of compressed acks total represented");
1782 	rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1783 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1784 	    SYSCTL_CHILDREN(rack_counters),
1785 	    OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1786 	    &rack_proc_non_comp_ack,
1787 	    "Number of non compresseds acks that we processed");
1788 
1789 
1790 	rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1791 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1792 	    SYSCTL_CHILDREN(rack_counters),
1793 	    OID_AUTO, "sack_long", CTLFLAG_RD,
1794 	    &rack_sack_proc_all,
1795 	    "Total times we had to walk whole list for sack processing");
1796 	rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1797 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1798 	    SYSCTL_CHILDREN(rack_counters),
1799 	    OID_AUTO, "sack_restart", CTLFLAG_RD,
1800 	    &rack_sack_proc_restart,
1801 	    "Total times we had to walk whole list due to a restart");
1802 	rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1803 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1804 	    SYSCTL_CHILDREN(rack_counters),
1805 	    OID_AUTO, "sack_short", CTLFLAG_RD,
1806 	    &rack_sack_proc_short,
1807 	    "Total times we took shortcut for sack processing");
1808 	rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
1809 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1810 	    SYSCTL_CHILDREN(rack_counters),
1811 	    OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
1812 	    &rack_enter_tlp_calc,
1813 	    "Total times we called calc-tlp");
1814 	rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
1815 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1816 	    SYSCTL_CHILDREN(rack_counters),
1817 	    OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
1818 	    &rack_used_tlpmethod,
1819 	    "Total number of runt sacks");
1820 	rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
1821 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1822 	    SYSCTL_CHILDREN(rack_counters),
1823 	    OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
1824 	    &rack_used_tlpmethod2,
1825 	    "Total number of times we hit TLP method 2");
1826 	rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1827 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1828 	    SYSCTL_CHILDREN(rack_attack),
1829 	    OID_AUTO, "skipacked", CTLFLAG_RD,
1830 	    &rack_sack_skipped_acked,
1831 	    "Total number of times we skipped previously sacked");
1832 	rack_sack_splits = counter_u64_alloc(M_WAITOK);
1833 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1834 	    SYSCTL_CHILDREN(rack_attack),
1835 	    OID_AUTO, "ofsplit", CTLFLAG_RD,
1836 	    &rack_sack_splits,
1837 	    "Total number of times we did the old fashion tree split");
1838 	rack_progress_drops = counter_u64_alloc(M_WAITOK);
1839 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1840 	    SYSCTL_CHILDREN(rack_counters),
1841 	    OID_AUTO, "prog_drops", CTLFLAG_RD,
1842 	    &rack_progress_drops,
1843 	    "Total number of progress drops");
1844 	rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1845 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1846 	    SYSCTL_CHILDREN(rack_counters),
1847 	    OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1848 	    &rack_input_idle_reduces,
1849 	    "Total number of idle reductions on input");
1850 	rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1851 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1852 	    SYSCTL_CHILDREN(rack_counters),
1853 	    OID_AUTO, "collapsed_win", CTLFLAG_RD,
1854 	    &rack_collapsed_win,
1855 	    "Total number of collapsed windows");
1856 	rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
1857 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1858 	    SYSCTL_CHILDREN(rack_counters),
1859 	    OID_AUTO, "tlp_nada", CTLFLAG_RD,
1860 	    &rack_tlp_does_nada,
1861 	    "Total number of nada tlp calls");
1862 	rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1863 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1864 	    SYSCTL_CHILDREN(rack_counters),
1865 	    OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1866 	    &rack_try_scwnd,
1867 	    "Total number of scwnd attempts");
1868 
1869 	rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1870 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1871 	    SYSCTL_CHILDREN(rack_counters),
1872 	    OID_AUTO, "timer_hole", CTLFLAG_RD,
1873 	    &rack_per_timer_hole,
1874 	    "Total persists start in timer hole");
1875 
1876 	rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK);
1877 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1878 	    SYSCTL_CHILDREN(rack_counters),
1879 	    OID_AUTO, "sndptr_wrong", CTLFLAG_RD,
1880 	    &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret");
1881 	rack_sbsndptr_right = counter_u64_alloc(M_WAITOK);
1882 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1883 	    SYSCTL_CHILDREN(rack_counters),
1884 	    OID_AUTO, "sndptr_right", CTLFLAG_RD,
1885 	    &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret");
1886 
1887 	COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1888 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1889 	    OID_AUTO, "outsize", CTLFLAG_RD,
1890 	    rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1891 	COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1892 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1893 	    OID_AUTO, "opts", CTLFLAG_RD,
1894 	    rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1895 	SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1896 	    SYSCTL_CHILDREN(rack_sysctl_root),
1897 	    OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1898 	    &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1899 }
1900 
1901 static __inline int
1902 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1903 {
1904 	if (SEQ_GEQ(b->r_start, a->r_start) &&
1905 	    SEQ_LT(b->r_start, a->r_end)) {
1906 		/*
1907 		 * The entry b is within the
1908 		 * block a. i.e.:
1909 		 * a --   |-------------|
1910 		 * b --   |----|
1911 		 * <or>
1912 		 * b --       |------|
1913 		 * <or>
1914 		 * b --       |-----------|
1915 		 */
1916 		return (0);
1917 	} else if (SEQ_GEQ(b->r_start, a->r_end)) {
1918 		/*
1919 		 * b falls as either the next
1920 		 * sequence block after a so a
1921 		 * is said to be smaller than b.
1922 		 * i.e:
1923 		 * a --   |------|
1924 		 * b --          |--------|
1925 		 * or
1926 		 * b --              |-----|
1927 		 */
1928 		return (1);
1929 	}
1930 	/*
1931 	 * Whats left is where a is
1932 	 * larger than b. i.e:
1933 	 * a --         |-------|
1934 	 * b --  |---|
1935 	 * or even possibly
1936 	 * b --   |--------------|
1937 	 */
1938 	return (-1);
1939 }
1940 
1941 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1942 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1943 
1944 static uint32_t
1945 rc_init_window(struct tcp_rack *rack)
1946 {
1947 	uint32_t win;
1948 
1949 	if (rack->rc_init_win == 0) {
1950 		/*
1951 		 * Nothing set by the user, use the system stack
1952 		 * default.
1953 		 */
1954 		return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1955 	}
1956 	win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1957 	return (win);
1958 }
1959 
1960 static uint64_t
1961 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1962 {
1963 	if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1964 		return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1965 	else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1966 		return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1967 	else
1968 		return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1969 }
1970 
1971 static uint64_t
1972 rack_get_bw(struct tcp_rack *rack)
1973 {
1974 	if (rack->use_fixed_rate) {
1975 		/* Return the fixed pacing rate */
1976 		return (rack_get_fixed_pacing_bw(rack));
1977 	}
1978 	if (rack->r_ctl.gp_bw == 0) {
1979 		/*
1980 		 * We have yet no b/w measurement,
1981 		 * if we have a user set initial bw
1982 		 * return it. If we don't have that and
1983 		 * we have an srtt, use the tcp IW (10) to
1984 		 * calculate a fictional b/w over the SRTT
1985 		 * which is more or less a guess. Note
1986 		 * we don't use our IW from rack on purpose
1987 		 * so if we have like IW=30, we are not
1988 		 * calculating a "huge" b/w.
1989 		 */
1990 		uint64_t bw, srtt;
1991 		if (rack->r_ctl.init_rate)
1992 			return (rack->r_ctl.init_rate);
1993 
1994 		/* Has the user set a max peak rate? */
1995 #ifdef NETFLIX_PEAKRATE
1996 		if (rack->rc_tp->t_maxpeakrate)
1997 			return (rack->rc_tp->t_maxpeakrate);
1998 #endif
1999 		/* Ok lets come up with the IW guess, if we have a srtt */
2000 		if (rack->rc_tp->t_srtt == 0) {
2001 			/*
2002 			 * Go with old pacing method
2003 			 * i.e. burst mitigation only.
2004 			 */
2005 			return (0);
2006 		}
2007 		/* Ok lets get the initial TCP win (not racks) */
2008 		bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
2009 		srtt = (uint64_t)rack->rc_tp->t_srtt;
2010 		bw *= (uint64_t)USECS_IN_SECOND;
2011 		bw /= srtt;
2012 		if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2013 			bw = rack->r_ctl.bw_rate_cap;
2014 		return (bw);
2015 	} else {
2016 		uint64_t bw;
2017 
2018 		if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
2019 			/* Averaging is done, we can return the value */
2020 			bw = rack->r_ctl.gp_bw;
2021 		} else {
2022 			/* Still doing initial average must calculate */
2023 			bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements;
2024 		}
2025 #ifdef NETFLIX_PEAKRATE
2026 		if ((rack->rc_tp->t_maxpeakrate) &&
2027 		    (bw > rack->rc_tp->t_maxpeakrate)) {
2028 			/* The user has set a peak rate to pace at
2029 			 * don't allow us to pace faster than that.
2030 			 */
2031 			return (rack->rc_tp->t_maxpeakrate);
2032 		}
2033 #endif
2034 		if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2035 			bw = rack->r_ctl.bw_rate_cap;
2036 		return (bw);
2037 	}
2038 }
2039 
2040 static uint16_t
2041 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2042 {
2043 	if (rack->use_fixed_rate) {
2044 		return (100);
2045 	} else if (rack->in_probe_rtt && (rsm == NULL))
2046 		return (rack->r_ctl.rack_per_of_gp_probertt);
2047 	else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
2048 		  rack->r_ctl.rack_per_of_gp_rec)) {
2049 		if (rsm) {
2050 			/* a retransmission always use the recovery rate */
2051 			return (rack->r_ctl.rack_per_of_gp_rec);
2052 		} else if (rack->rack_rec_nonrxt_use_cr) {
2053 			/* Directed to use the configured rate */
2054 			goto configured_rate;
2055 		} else if (rack->rack_no_prr &&
2056 			   (rack->r_ctl.rack_per_of_gp_rec > 100)) {
2057 			/* No PRR, lets just use the b/w estimate only */
2058 			return (100);
2059 		} else {
2060 			/*
2061 			 * Here we may have a non-retransmit but we
2062 			 * have no overrides, so just use the recovery
2063 			 * rate (prr is in effect).
2064 			 */
2065 			return (rack->r_ctl.rack_per_of_gp_rec);
2066 		}
2067 	}
2068 configured_rate:
2069 	/* For the configured rate we look at our cwnd vs the ssthresh */
2070 	if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2071 		return (rack->r_ctl.rack_per_of_gp_ss);
2072 	else
2073 		return (rack->r_ctl.rack_per_of_gp_ca);
2074 }
2075 
2076 static void
2077 rack_log_hdwr_pacing(struct tcp_rack *rack,
2078 		     uint64_t rate, uint64_t hw_rate, int line,
2079 		     int error, uint16_t mod)
2080 {
2081 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2082 		union tcp_log_stackspecific log;
2083 		struct timeval tv;
2084 		const struct ifnet *ifp;
2085 
2086 		memset(&log, 0, sizeof(log));
2087 		log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2088 		log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2089 		if (rack->r_ctl.crte) {
2090 			ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2091 		} else if (rack->rc_inp->inp_route.ro_nh &&
2092 			   rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2093 			ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2094 		} else
2095 			ifp = NULL;
2096 		if (ifp) {
2097 			log.u_bbr.flex3 = (((uint64_t)ifp  >> 32) & 0x00000000ffffffff);
2098 			log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
2099 		}
2100 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2101 		log.u_bbr.bw_inuse = rate;
2102 		log.u_bbr.flex5 = line;
2103 		log.u_bbr.flex6 = error;
2104 		log.u_bbr.flex7 = mod;
2105 		log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2106 		log.u_bbr.flex8 = rack->use_fixed_rate;
2107 		log.u_bbr.flex8 <<= 1;
2108 		log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2109 		log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2110 		log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2111 		if (rack->r_ctl.crte)
2112 			log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2113 		else
2114 			log.u_bbr.cur_del_rate = 0;
2115 		log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2116 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2117 		    &rack->rc_inp->inp_socket->so_rcv,
2118 		    &rack->rc_inp->inp_socket->so_snd,
2119 		    BBR_LOG_HDWR_PACE, 0,
2120 		    0, &log, false, &tv);
2121 	}
2122 }
2123 
2124 static uint64_t
2125 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2126 {
2127 	/*
2128 	 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2129 	 */
2130 	uint64_t bw_est, high_rate;
2131 	uint64_t gain;
2132 
2133 	gain = (uint64_t)rack_get_output_gain(rack, rsm);
2134 	bw_est = bw * gain;
2135 	bw_est /= (uint64_t)100;
2136 	/* Never fall below the minimum (def 64kbps) */
2137 	if (bw_est < RACK_MIN_BW)
2138 		bw_est = RACK_MIN_BW;
2139 	if (rack->r_rack_hw_rate_caps) {
2140 		/* Rate caps are in place */
2141 		if (rack->r_ctl.crte != NULL) {
2142 			/* We have a hdwr rate already */
2143 			high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2144 			if (bw_est >= high_rate) {
2145 				/* We are capping bw at the highest rate table entry */
2146 				rack_log_hdwr_pacing(rack,
2147 						     bw_est, high_rate, __LINE__,
2148 						     0, 3);
2149 				bw_est = high_rate;
2150 				if (capped)
2151 					*capped = 1;
2152 			}
2153 		} else if ((rack->rack_hdrw_pacing == 0) &&
2154 			   (rack->rack_hdw_pace_ena) &&
2155 			   (rack->rack_attempt_hdwr_pace == 0) &&
2156 			   (rack->rc_inp->inp_route.ro_nh != NULL) &&
2157 			   (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2158 			/*
2159 			 * Special case, we have not yet attempted hardware
2160 			 * pacing, and yet we may, when we do, find out if we are
2161 			 * above the highest rate. We need to know the maxbw for the interface
2162 			 * in question (if it supports ratelimiting). We get back
2163 			 * a 0, if the interface is not found in the RL lists.
2164 			 */
2165 			high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2166 			if (high_rate) {
2167 				/* Yep, we have a rate is it above this rate? */
2168 				if (bw_est > high_rate) {
2169 					bw_est = high_rate;
2170 					if (capped)
2171 						*capped = 1;
2172 				}
2173 			}
2174 		}
2175 	}
2176 	return (bw_est);
2177 }
2178 
2179 static void
2180 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2181 {
2182 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2183 		union tcp_log_stackspecific log;
2184 		struct timeval tv;
2185 
2186 		if ((mod != 1) && (rack_verbose_logging == 0)) {
2187 			/*
2188 			 * We get 3 values currently for mod
2189 			 * 1 - We are retransmitting and this tells the reason.
2190 			 * 2 - We are clearing a dup-ack count.
2191 			 * 3 - We are incrementing a dup-ack count.
2192 			 *
2193 			 * The clear/increment are only logged
2194 			 * if you have BBverbose on.
2195 			 */
2196 			return;
2197 		}
2198 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2199 		log.u_bbr.flex1 = tsused;
2200 		log.u_bbr.flex2 = thresh;
2201 		log.u_bbr.flex3 = rsm->r_flags;
2202 		log.u_bbr.flex4 = rsm->r_dupack;
2203 		log.u_bbr.flex5 = rsm->r_start;
2204 		log.u_bbr.flex6 = rsm->r_end;
2205 		log.u_bbr.flex8 = mod;
2206 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2207 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2208 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2209 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2210 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2211 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2212 		log.u_bbr.pacing_gain = rack->r_must_retran;
2213 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2214 		    &rack->rc_inp->inp_socket->so_rcv,
2215 		    &rack->rc_inp->inp_socket->so_snd,
2216 		    BBR_LOG_SETTINGS_CHG, 0,
2217 		    0, &log, false, &tv);
2218 	}
2219 }
2220 
2221 static void
2222 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2223 {
2224 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2225 		union tcp_log_stackspecific log;
2226 		struct timeval tv;
2227 
2228 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2229 		log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2230 		log.u_bbr.flex2 = to;
2231 		log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2232 		log.u_bbr.flex4 = slot;
2233 		log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2234 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2235 		log.u_bbr.flex7 = rack->rc_in_persist;
2236 		log.u_bbr.flex8 = which;
2237 		if (rack->rack_no_prr)
2238 			log.u_bbr.pkts_out = 0;
2239 		else
2240 			log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2241 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2242 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2243 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2244 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2245 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2246 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2247 		log.u_bbr.pacing_gain = rack->r_must_retran;
2248 		log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2249 		log.u_bbr.lost = rack_rto_min;
2250 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2251 		    &rack->rc_inp->inp_socket->so_rcv,
2252 		    &rack->rc_inp->inp_socket->so_snd,
2253 		    BBR_LOG_TIMERSTAR, 0,
2254 		    0, &log, false, &tv);
2255 	}
2256 }
2257 
2258 static void
2259 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2260 {
2261 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2262 		union tcp_log_stackspecific log;
2263 		struct timeval tv;
2264 
2265 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2266 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2267 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2268 		log.u_bbr.flex8 = to_num;
2269 		log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2270 		log.u_bbr.flex2 = rack->rc_rack_rtt;
2271 		if (rsm == NULL)
2272 			log.u_bbr.flex3 = 0;
2273 		else
2274 			log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2275 		if (rack->rack_no_prr)
2276 			log.u_bbr.flex5 = 0;
2277 		else
2278 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2279 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2280 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2281 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2282 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2283 		log.u_bbr.pacing_gain = rack->r_must_retran;
2284 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2285 		    &rack->rc_inp->inp_socket->so_rcv,
2286 		    &rack->rc_inp->inp_socket->so_snd,
2287 		    BBR_LOG_RTO, 0,
2288 		    0, &log, false, &tv);
2289 	}
2290 }
2291 
2292 static void
2293 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2294 		 struct rack_sendmap *prev,
2295 		 struct rack_sendmap *rsm,
2296 		 struct rack_sendmap *next,
2297 		 int flag, uint32_t th_ack, int line)
2298 {
2299 	if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2300 		union tcp_log_stackspecific log;
2301 		struct timeval tv;
2302 
2303 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2304 		log.u_bbr.flex8 = flag;
2305 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2306 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2307 		log.u_bbr.cur_del_rate = (uint64_t)prev;
2308 		log.u_bbr.delRate = (uint64_t)rsm;
2309 		log.u_bbr.rttProp = (uint64_t)next;
2310 		log.u_bbr.flex7 = 0;
2311 		if (prev) {
2312 			log.u_bbr.flex1 = prev->r_start;
2313 			log.u_bbr.flex2 = prev->r_end;
2314 			log.u_bbr.flex7 |= 0x4;
2315 		}
2316 		if (rsm) {
2317 			log.u_bbr.flex3 = rsm->r_start;
2318 			log.u_bbr.flex4 = rsm->r_end;
2319 			log.u_bbr.flex7 |= 0x2;
2320 		}
2321 		if (next) {
2322 			log.u_bbr.flex5 = next->r_start;
2323 			log.u_bbr.flex6 = next->r_end;
2324 			log.u_bbr.flex7 |= 0x1;
2325 		}
2326 		log.u_bbr.applimited = line;
2327 		log.u_bbr.pkts_out = th_ack;
2328 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2329 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2330 		if (rack->rack_no_prr)
2331 			log.u_bbr.lost = 0;
2332 		else
2333 			log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2334 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2335 		    &rack->rc_inp->inp_socket->so_rcv,
2336 		    &rack->rc_inp->inp_socket->so_snd,
2337 		    TCP_LOG_MAPCHG, 0,
2338 		    0, &log, false, &tv);
2339 	}
2340 }
2341 
2342 static void
2343 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2344 		 struct rack_sendmap *rsm, int conf)
2345 {
2346 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2347 		union tcp_log_stackspecific log;
2348 		struct timeval tv;
2349 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2350 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2351 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2352 		log.u_bbr.flex1 = t;
2353 		log.u_bbr.flex2 = len;
2354 		log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2355 		log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2356 		log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2357 		log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2358 		log.u_bbr.flex7 = conf;
2359 		log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2360 		log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2361 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2362 		log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2363 		log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2364 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2365 		if (rsm) {
2366 			log.u_bbr.pkt_epoch = rsm->r_start;
2367 			log.u_bbr.lost = rsm->r_end;
2368 			log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2369 			log.u_bbr.pacing_gain = rsm->r_flags;
2370 		} else {
2371 			/* Its a SYN */
2372 			log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2373 			log.u_bbr.lost = 0;
2374 			log.u_bbr.cwnd_gain = 0;
2375 			log.u_bbr.pacing_gain = 0;
2376 		}
2377 		/* Write out general bits of interest rrs here */
2378 		log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2379 		log.u_bbr.use_lt_bw <<= 1;
2380 		log.u_bbr.use_lt_bw |= rack->forced_ack;
2381 		log.u_bbr.use_lt_bw <<= 1;
2382 		log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2383 		log.u_bbr.use_lt_bw <<= 1;
2384 		log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2385 		log.u_bbr.use_lt_bw <<= 1;
2386 		log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2387 		log.u_bbr.use_lt_bw <<= 1;
2388 		log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2389 		log.u_bbr.use_lt_bw <<= 1;
2390 		log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2391 		log.u_bbr.use_lt_bw <<= 1;
2392 		log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2393 		log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2394 		log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2395 		log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2396 		log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2397 		log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2398 		log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2399 		log.u_bbr.bw_inuse <<= 32;
2400 		if (rsm)
2401 			log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2402 		TCP_LOG_EVENTP(tp, NULL,
2403 		    &rack->rc_inp->inp_socket->so_rcv,
2404 		    &rack->rc_inp->inp_socket->so_snd,
2405 		    BBR_LOG_BBRRTT, 0,
2406 		    0, &log, false, &tv);
2407 
2408 
2409 	}
2410 }
2411 
2412 static void
2413 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2414 {
2415 	/*
2416 	 * Log the rtt sample we are
2417 	 * applying to the srtt algorithm in
2418 	 * useconds.
2419 	 */
2420 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2421 		union tcp_log_stackspecific log;
2422 		struct timeval tv;
2423 
2424 		/* Convert our ms to a microsecond */
2425 		memset(&log, 0, sizeof(log));
2426 		log.u_bbr.flex1 = rtt;
2427 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
2428 		log.u_bbr.flex3 = rack->r_ctl.sack_count;
2429 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2430 		log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2431 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2432 		log.u_bbr.flex7 = 1;
2433 		log.u_bbr.flex8 = rack->sack_attack_disable;
2434 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2435 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2436 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2437 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2438 		log.u_bbr.pacing_gain = rack->r_must_retran;
2439 		/*
2440 		 * We capture in delRate the upper 32 bits as
2441 		 * the confidence level we had declared, and the
2442 		 * lower 32 bits as the actual RTT using the arrival
2443 		 * timestamp.
2444 		 */
2445 		log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2446 		log.u_bbr.delRate <<= 32;
2447 		log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2448 		/* Lets capture all the things that make up t_rtxcur */
2449 		log.u_bbr.applimited = rack_rto_min;
2450 		log.u_bbr.epoch = rack_rto_max;
2451 		log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2452 		log.u_bbr.lost = rack_rto_min;
2453 		log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2454 		log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2455 		log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2456 		log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2457 		log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2458 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2459 		    &rack->rc_inp->inp_socket->so_rcv,
2460 		    &rack->rc_inp->inp_socket->so_snd,
2461 		    TCP_LOG_RTT, 0,
2462 		    0, &log, false, &tv);
2463 	}
2464 }
2465 
2466 static void
2467 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2468 {
2469 	if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
2470 		union tcp_log_stackspecific log;
2471 		struct timeval tv;
2472 
2473 		/* Convert our ms to a microsecond */
2474 		memset(&log, 0, sizeof(log));
2475 		log.u_bbr.flex1 = rtt;
2476 		log.u_bbr.flex2 = send_time;
2477 		log.u_bbr.flex3 = ack_time;
2478 		log.u_bbr.flex4 = where;
2479 		log.u_bbr.flex7 = 2;
2480 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2481 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2482 		    &rack->rc_inp->inp_socket->so_rcv,
2483 		    &rack->rc_inp->inp_socket->so_snd,
2484 		    TCP_LOG_RTT, 0,
2485 		    0, &log, false, &tv);
2486 	}
2487 }
2488 
2489 
2490 
2491 static inline void
2492 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line)
2493 {
2494 	if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2495 		union tcp_log_stackspecific log;
2496 		struct timeval tv;
2497 
2498 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2499 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2500 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2501 		log.u_bbr.flex1 = line;
2502 		log.u_bbr.flex2 = tick;
2503 		log.u_bbr.flex3 = tp->t_maxunacktime;
2504 		log.u_bbr.flex4 = tp->t_acktime;
2505 		log.u_bbr.flex8 = event;
2506 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2507 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2508 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2509 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2510 		log.u_bbr.pacing_gain = rack->r_must_retran;
2511 		TCP_LOG_EVENTP(tp, NULL,
2512 		    &rack->rc_inp->inp_socket->so_rcv,
2513 		    &rack->rc_inp->inp_socket->so_snd,
2514 		    BBR_LOG_PROGRESS, 0,
2515 		    0, &log, false, &tv);
2516 	}
2517 }
2518 
2519 static void
2520 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
2521 {
2522 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2523 		union tcp_log_stackspecific log;
2524 
2525 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2526 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2527 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2528 		log.u_bbr.flex1 = slot;
2529 		if (rack->rack_no_prr)
2530 			log.u_bbr.flex2 = 0;
2531 		else
2532 			log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2533 		log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2534 		log.u_bbr.flex8 = rack->rc_in_persist;
2535 		log.u_bbr.timeStamp = cts;
2536 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2537 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2538 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2539 		log.u_bbr.pacing_gain = rack->r_must_retran;
2540 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2541 		    &rack->rc_inp->inp_socket->so_rcv,
2542 		    &rack->rc_inp->inp_socket->so_snd,
2543 		    BBR_LOG_BBRSND, 0,
2544 		    0, &log, false, tv);
2545 	}
2546 }
2547 
2548 static void
2549 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2550 {
2551 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2552 		union tcp_log_stackspecific log;
2553 		struct timeval tv;
2554 
2555 		memset(&log, 0, sizeof(log));
2556 		log.u_bbr.flex1 = did_out;
2557 		log.u_bbr.flex2 = nxt_pkt;
2558 		log.u_bbr.flex3 = way_out;
2559 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2560 		if (rack->rack_no_prr)
2561 			log.u_bbr.flex5 = 0;
2562 		else
2563 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2564 		log.u_bbr.flex6 = nsegs;
2565 		log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2566 		log.u_bbr.flex7 = rack->rc_ack_can_sendout_data;	/* Do we have ack-can-send set */
2567 		log.u_bbr.flex7 <<= 1;
2568 		log.u_bbr.flex7 |= rack->r_fast_output;	/* is fast output primed */
2569 		log.u_bbr.flex7 <<= 1;
2570 		log.u_bbr.flex7 |= rack->r_wanted_output;	/* Do we want output */
2571 		log.u_bbr.flex8 = rack->rc_in_persist;
2572 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2573 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2574 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2575 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2576 		log.u_bbr.use_lt_bw <<= 1;
2577 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
2578 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2579 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2580 		log.u_bbr.pacing_gain = rack->r_must_retran;
2581 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2582 		    &rack->rc_inp->inp_socket->so_rcv,
2583 		    &rack->rc_inp->inp_socket->so_snd,
2584 		    BBR_LOG_DOSEG_DONE, 0,
2585 		    0, &log, false, &tv);
2586 	}
2587 }
2588 
2589 static void
2590 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2591 {
2592 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2593 		union tcp_log_stackspecific log;
2594 		struct timeval tv;
2595 		uint32_t cts;
2596 
2597 		memset(&log, 0, sizeof(log));
2598 		cts = tcp_get_usecs(&tv);
2599 		log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2600 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2601 		log.u_bbr.flex4 = arg1;
2602 		log.u_bbr.flex5 = arg2;
2603 		log.u_bbr.flex6 = arg3;
2604 		log.u_bbr.flex8 = frm;
2605 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2606 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2607 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2608 		log.u_bbr.applimited = rack->r_ctl.rc_sacked;
2609 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2610 		log.u_bbr.pacing_gain = rack->r_must_retran;
2611 		TCP_LOG_EVENTP(tp, NULL,
2612 		    &tp->t_inpcb->inp_socket->so_rcv,
2613 		    &tp->t_inpcb->inp_socket->so_snd,
2614 		    TCP_HDWR_PACE_SIZE, 0,
2615 		    0, &log, false, &tv);
2616 	}
2617 }
2618 
2619 static void
2620 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2621 			  uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2622 {
2623 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2624 		union tcp_log_stackspecific log;
2625 		struct timeval tv;
2626 
2627 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2628 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2629 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2630 		log.u_bbr.flex1 = slot;
2631 		log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2632 		log.u_bbr.flex4 = reason;
2633 		if (rack->rack_no_prr)
2634 			log.u_bbr.flex5 = 0;
2635 		else
2636 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2637 		log.u_bbr.flex7 = hpts_calling;
2638 		log.u_bbr.flex8 = rack->rc_in_persist;
2639 		log.u_bbr.lt_epoch = cwnd_to_use;
2640 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2641 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2642 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2643 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2644 		log.u_bbr.pacing_gain = rack->r_must_retran;
2645 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2646 		    &rack->rc_inp->inp_socket->so_rcv,
2647 		    &rack->rc_inp->inp_socket->so_snd,
2648 		    BBR_LOG_JUSTRET, 0,
2649 		    tlen, &log, false, &tv);
2650 	}
2651 }
2652 
2653 static void
2654 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2655 		   struct timeval *tv, uint32_t flags_on_entry)
2656 {
2657 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2658 		union tcp_log_stackspecific log;
2659 
2660 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2661 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2662 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2663 		log.u_bbr.flex1 = line;
2664 		log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2665 		log.u_bbr.flex3 = flags_on_entry;
2666 		log.u_bbr.flex4 = us_cts;
2667 		if (rack->rack_no_prr)
2668 			log.u_bbr.flex5 = 0;
2669 		else
2670 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2671 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2672 		log.u_bbr.flex7 = hpts_removed;
2673 		log.u_bbr.flex8 = 1;
2674 		log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2675 		log.u_bbr.timeStamp = us_cts;
2676 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2677 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2678 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2679 		log.u_bbr.pacing_gain = rack->r_must_retran;
2680 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2681 		    &rack->rc_inp->inp_socket->so_rcv,
2682 		    &rack->rc_inp->inp_socket->so_snd,
2683 		    BBR_LOG_TIMERCANC, 0,
2684 		    0, &log, false, tv);
2685 	}
2686 }
2687 
2688 static void
2689 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2690 			  uint32_t flex1, uint32_t flex2,
2691 			  uint32_t flex3, uint32_t flex4,
2692 			  uint32_t flex5, uint32_t flex6,
2693 			  uint16_t flex7, uint8_t mod)
2694 {
2695 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2696 		union tcp_log_stackspecific log;
2697 		struct timeval tv;
2698 
2699 		if (mod == 1) {
2700 			/* No you can't use 1, its for the real to cancel */
2701 			return;
2702 		}
2703 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2704 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2705 		log.u_bbr.flex1 = flex1;
2706 		log.u_bbr.flex2 = flex2;
2707 		log.u_bbr.flex3 = flex3;
2708 		log.u_bbr.flex4 = flex4;
2709 		log.u_bbr.flex5 = flex5;
2710 		log.u_bbr.flex6 = flex6;
2711 		log.u_bbr.flex7 = flex7;
2712 		log.u_bbr.flex8 = mod;
2713 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2714 		    &rack->rc_inp->inp_socket->so_rcv,
2715 		    &rack->rc_inp->inp_socket->so_snd,
2716 		    BBR_LOG_TIMERCANC, 0,
2717 		    0, &log, false, &tv);
2718 	}
2719 }
2720 
2721 static void
2722 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2723 {
2724 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2725 		union tcp_log_stackspecific log;
2726 		struct timeval tv;
2727 
2728 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2729 		log.u_bbr.flex1 = timers;
2730 		log.u_bbr.flex2 = ret;
2731 		log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2732 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2733 		log.u_bbr.flex5 = cts;
2734 		if (rack->rack_no_prr)
2735 			log.u_bbr.flex6 = 0;
2736 		else
2737 			log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2738 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2739 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2740 		log.u_bbr.pacing_gain = rack->r_must_retran;
2741 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2742 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2743 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2744 		    &rack->rc_inp->inp_socket->so_rcv,
2745 		    &rack->rc_inp->inp_socket->so_snd,
2746 		    BBR_LOG_TO_PROCESS, 0,
2747 		    0, &log, false, &tv);
2748 	}
2749 }
2750 
2751 static void
2752 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd)
2753 {
2754 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2755 		union tcp_log_stackspecific log;
2756 		struct timeval tv;
2757 
2758 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2759 		log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2760 		log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2761 		if (rack->rack_no_prr)
2762 			log.u_bbr.flex3 = 0;
2763 		else
2764 			log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2765 		log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2766 		log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2767 		log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2768 		log.u_bbr.flex8 = frm;
2769 		log.u_bbr.pkts_out = orig_cwnd;
2770 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2771 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2772 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2773 		log.u_bbr.use_lt_bw <<= 1;
2774 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
2775 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2776 		    &rack->rc_inp->inp_socket->so_rcv,
2777 		    &rack->rc_inp->inp_socket->so_snd,
2778 		    BBR_LOG_BBRUPD, 0,
2779 		    0, &log, false, &tv);
2780 	}
2781 }
2782 
2783 #ifdef NETFLIX_EXP_DETECTION
2784 static void
2785 rack_log_sad(struct tcp_rack *rack, int event)
2786 {
2787 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2788 		union tcp_log_stackspecific log;
2789 		struct timeval tv;
2790 
2791 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2792 		log.u_bbr.flex1 = rack->r_ctl.sack_count;
2793 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
2794 		log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2795 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2796 		log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2797 		log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2798 		log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2799 		log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2800 		log.u_bbr.lt_epoch |= rack->do_detection;
2801 		log.u_bbr.applimited = tcp_map_minimum;
2802 		log.u_bbr.flex7 = rack->sack_attack_disable;
2803 		log.u_bbr.flex8 = event;
2804 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2805 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2806 		log.u_bbr.delivered = tcp_sad_decay_val;
2807 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
2808 		    &rack->rc_inp->inp_socket->so_rcv,
2809 		    &rack->rc_inp->inp_socket->so_snd,
2810 		    TCP_SAD_DETECTION, 0,
2811 		    0, &log, false, &tv);
2812 	}
2813 }
2814 #endif
2815 
2816 static void
2817 rack_counter_destroy(void)
2818 {
2819 	int i;
2820 
2821 	counter_u64_free(rack_fto_send);
2822 	counter_u64_free(rack_fto_rsm_send);
2823 	counter_u64_free(rack_nfto_resend);
2824 	counter_u64_free(rack_hw_pace_init_fail);
2825 	counter_u64_free(rack_hw_pace_lost);
2826 	counter_u64_free(rack_non_fto_send);
2827 	counter_u64_free(rack_extended_rfo);
2828 	counter_u64_free(rack_ack_total);
2829 	counter_u64_free(rack_express_sack);
2830 	counter_u64_free(rack_sack_total);
2831 	counter_u64_free(rack_move_none);
2832 	counter_u64_free(rack_move_some);
2833 	counter_u64_free(rack_sack_attacks_detected);
2834 	counter_u64_free(rack_sack_attacks_reversed);
2835 	counter_u64_free(rack_sack_used_next_merge);
2836 	counter_u64_free(rack_sack_used_prev_merge);
2837 	counter_u64_free(rack_badfr);
2838 	counter_u64_free(rack_badfr_bytes);
2839 	counter_u64_free(rack_rtm_prr_retran);
2840 	counter_u64_free(rack_rtm_prr_newdata);
2841 	counter_u64_free(rack_timestamp_mismatch);
2842 	counter_u64_free(rack_find_high);
2843 	counter_u64_free(rack_reorder_seen);
2844 	counter_u64_free(rack_tlp_tot);
2845 	counter_u64_free(rack_tlp_newdata);
2846 	counter_u64_free(rack_tlp_retran);
2847 	counter_u64_free(rack_tlp_retran_bytes);
2848 	counter_u64_free(rack_tlp_retran_fail);
2849 	counter_u64_free(rack_to_tot);
2850 	counter_u64_free(rack_to_arm_rack);
2851 	counter_u64_free(rack_to_arm_tlp);
2852 	counter_u64_free(rack_calc_zero);
2853 	counter_u64_free(rack_calc_nonzero);
2854 	counter_u64_free(rack_paced_segments);
2855 	counter_u64_free(rack_unpaced_segments);
2856 	counter_u64_free(rack_saw_enobuf);
2857 	counter_u64_free(rack_saw_enobuf_hw);
2858 	counter_u64_free(rack_saw_enetunreach);
2859 	counter_u64_free(rack_hot_alloc);
2860 	counter_u64_free(rack_to_alloc);
2861 	counter_u64_free(rack_to_alloc_hard);
2862 	counter_u64_free(rack_to_alloc_emerg);
2863 	counter_u64_free(rack_to_alloc_limited);
2864 	counter_u64_free(rack_alloc_limited_conns);
2865 	counter_u64_free(rack_split_limited);
2866 	for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
2867 		counter_u64_free(rack_proc_comp_ack[i]);
2868 	}
2869 	counter_u64_free(rack_multi_single_eq);
2870 	counter_u64_free(rack_proc_non_comp_ack);
2871 	counter_u64_free(rack_sack_proc_all);
2872 	counter_u64_free(rack_sack_proc_restart);
2873 	counter_u64_free(rack_sack_proc_short);
2874 	counter_u64_free(rack_enter_tlp_calc);
2875 	counter_u64_free(rack_used_tlpmethod);
2876 	counter_u64_free(rack_used_tlpmethod2);
2877 	counter_u64_free(rack_sack_skipped_acked);
2878 	counter_u64_free(rack_sack_splits);
2879 	counter_u64_free(rack_progress_drops);
2880 	counter_u64_free(rack_input_idle_reduces);
2881 	counter_u64_free(rack_collapsed_win);
2882 	counter_u64_free(rack_tlp_does_nada);
2883 	counter_u64_free(rack_try_scwnd);
2884 	counter_u64_free(rack_per_timer_hole);
2885 	counter_u64_free(rack_large_ackcmp);
2886 	counter_u64_free(rack_small_ackcmp);
2887 #ifdef INVARIANTS
2888 	counter_u64_free(rack_adjust_map_bw);
2889 #endif
2890 	COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2891 	COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2892 }
2893 
2894 static struct rack_sendmap *
2895 rack_alloc(struct tcp_rack *rack)
2896 {
2897 	struct rack_sendmap *rsm;
2898 
2899 	/*
2900 	 * First get the top of the list it in
2901 	 * theory is the "hottest" rsm we have,
2902 	 * possibly just freed by ack processing.
2903 	 */
2904 	if (rack->rc_free_cnt > rack_free_cache) {
2905 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2906 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2907 		counter_u64_add(rack_hot_alloc, 1);
2908 		rack->rc_free_cnt--;
2909 		return (rsm);
2910 	}
2911 	/*
2912 	 * Once we get under our free cache we probably
2913 	 * no longer have a "hot" one available. Lets
2914 	 * get one from UMA.
2915 	 */
2916 	rsm = uma_zalloc(rack_zone, M_NOWAIT);
2917 	if (rsm) {
2918 		rack->r_ctl.rc_num_maps_alloced++;
2919 		counter_u64_add(rack_to_alloc, 1);
2920 		return (rsm);
2921 	}
2922 	/*
2923 	 * Dig in to our aux rsm's (the last two) since
2924 	 * UMA failed to get us one.
2925 	 */
2926 	if (rack->rc_free_cnt) {
2927 		counter_u64_add(rack_to_alloc_emerg, 1);
2928 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2929 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2930 		rack->rc_free_cnt--;
2931 		return (rsm);
2932 	}
2933 	return (NULL);
2934 }
2935 
2936 static struct rack_sendmap *
2937 rack_alloc_full_limit(struct tcp_rack *rack)
2938 {
2939 	if ((V_tcp_map_entries_limit > 0) &&
2940 	    (rack->do_detection == 0) &&
2941 	    (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2942 		counter_u64_add(rack_to_alloc_limited, 1);
2943 		if (!rack->alloc_limit_reported) {
2944 			rack->alloc_limit_reported = 1;
2945 			counter_u64_add(rack_alloc_limited_conns, 1);
2946 		}
2947 		return (NULL);
2948 	}
2949 	return (rack_alloc(rack));
2950 }
2951 
2952 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2953 static struct rack_sendmap *
2954 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2955 {
2956 	struct rack_sendmap *rsm;
2957 
2958 	if (limit_type) {
2959 		/* currently there is only one limit type */
2960 		if (V_tcp_map_split_limit > 0 &&
2961 		    (rack->do_detection == 0) &&
2962 		    rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2963 			counter_u64_add(rack_split_limited, 1);
2964 			if (!rack->alloc_limit_reported) {
2965 				rack->alloc_limit_reported = 1;
2966 				counter_u64_add(rack_alloc_limited_conns, 1);
2967 			}
2968 			return (NULL);
2969 		}
2970 	}
2971 
2972 	/* allocate and mark in the limit type, if set */
2973 	rsm = rack_alloc(rack);
2974 	if (rsm != NULL && limit_type) {
2975 		rsm->r_limit_type = limit_type;
2976 		rack->r_ctl.rc_num_split_allocs++;
2977 	}
2978 	return (rsm);
2979 }
2980 
2981 static void
2982 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2983 {
2984 	if (rsm->r_flags & RACK_APP_LIMITED) {
2985 		if (rack->r_ctl.rc_app_limited_cnt > 0) {
2986 			rack->r_ctl.rc_app_limited_cnt--;
2987 		}
2988 	}
2989 	if (rsm->r_limit_type) {
2990 		/* currently there is only one limit type */
2991 		rack->r_ctl.rc_num_split_allocs--;
2992 	}
2993 	if (rsm == rack->r_ctl.rc_first_appl) {
2994 		if (rack->r_ctl.rc_app_limited_cnt == 0)
2995 			rack->r_ctl.rc_first_appl = NULL;
2996 		else {
2997 			/* Follow the next one out */
2998 			struct rack_sendmap fe;
2999 
3000 			fe.r_start = rsm->r_nseq_appl;
3001 			rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3002 		}
3003 	}
3004 	if (rsm == rack->r_ctl.rc_resend)
3005 		rack->r_ctl.rc_resend = NULL;
3006 	if (rsm == rack->r_ctl.rc_rsm_at_retran)
3007 		rack->r_ctl.rc_rsm_at_retran = NULL;
3008 	if (rsm == rack->r_ctl.rc_end_appl)
3009 		rack->r_ctl.rc_end_appl = NULL;
3010 	if (rack->r_ctl.rc_tlpsend == rsm)
3011 		rack->r_ctl.rc_tlpsend = NULL;
3012 	if (rack->r_ctl.rc_sacklast == rsm)
3013 		rack->r_ctl.rc_sacklast = NULL;
3014 	memset(rsm, 0, sizeof(struct rack_sendmap));
3015 	TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
3016 	rack->rc_free_cnt++;
3017 }
3018 
3019 static void
3020 rack_free_trim(struct tcp_rack *rack)
3021 {
3022 	struct rack_sendmap *rsm;
3023 
3024 	/*
3025 	 * Free up all the tail entries until
3026 	 * we get our list down to the limit.
3027 	 */
3028 	while (rack->rc_free_cnt > rack_free_cache) {
3029 		rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3030 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3031 		rack->rc_free_cnt--;
3032 		uma_zfree(rack_zone, rsm);
3033 	}
3034 }
3035 
3036 
3037 static uint32_t
3038 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
3039 {
3040 	uint64_t srtt, bw, len, tim;
3041 	uint32_t segsiz, def_len, minl;
3042 
3043 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3044 	def_len = rack_def_data_window * segsiz;
3045 	if (rack->rc_gp_filled == 0) {
3046 		/*
3047 		 * We have no measurement (IW is in flight?) so
3048 		 * we can only guess using our data_window sysctl
3049 		 * value (usually 100MSS).
3050 		 */
3051 		return (def_len);
3052 	}
3053 	/*
3054 	 * Now we have a number of factors to consider.
3055 	 *
3056 	 * 1) We have a desired BDP which is usually
3057 	 *    at least 2.
3058 	 * 2) We have a minimum number of rtt's usually 1 SRTT
3059 	 *    but we allow it too to be more.
3060 	 * 3) We want to make sure a measurement last N useconds (if
3061 	 *    we have set rack_min_measure_usec.
3062 	 *
3063 	 * We handle the first concern here by trying to create a data
3064 	 * window of max(rack_def_data_window, DesiredBDP). The
3065 	 * second concern we handle in not letting the measurement
3066 	 * window end normally until at least the required SRTT's
3067 	 * have gone by which is done further below in
3068 	 * rack_enough_for_measurement(). Finally the third concern
3069 	 * we also handle here by calculating how long that time
3070 	 * would take at the current BW and then return the
3071 	 * max of our first calculation and that length. Note
3072 	 * that if rack_min_measure_usec is 0, we don't deal
3073 	 * with concern 3. Also for both Concern 1 and 3 an
3074 	 * application limited period could end the measurement
3075 	 * earlier.
3076 	 *
3077 	 * So lets calculate the BDP with the "known" b/w using
3078 	 * the SRTT has our rtt and then multiply it by the
3079 	 * goal.
3080 	 */
3081 	bw = rack_get_bw(rack);
3082 	srtt = (uint64_t)tp->t_srtt;
3083 	len = bw * srtt;
3084 	len /= (uint64_t)HPTS_USEC_IN_SEC;
3085 	len *= max(1, rack_goal_bdp);
3086 	/* Now we need to round up to the nearest MSS */
3087 	len = roundup(len, segsiz);
3088 	if (rack_min_measure_usec) {
3089 		/* Now calculate our min length for this b/w */
3090 		tim = rack_min_measure_usec;
3091 		minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3092 		if (minl == 0)
3093 			minl = 1;
3094 		minl = roundup(minl, segsiz);
3095 		if (len < minl)
3096 			len = minl;
3097 	}
3098 	/*
3099 	 * Now if we have a very small window we want
3100 	 * to attempt to get the window that is
3101 	 * as small as possible. This happens on
3102 	 * low b/w connections and we don't want to
3103 	 * span huge numbers of rtt's between measurements.
3104 	 *
3105 	 * We basically include 2 over our "MIN window" so
3106 	 * that the measurement can be shortened (possibly) by
3107 	 * an ack'ed packet.
3108 	 */
3109 	if (len < def_len)
3110 		return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3111 	else
3112 		return (max((uint32_t)len, def_len));
3113 
3114 }
3115 
3116 static int
3117 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack)
3118 {
3119 	uint32_t tim, srtts, segsiz;
3120 
3121 	/*
3122 	 * Has enough time passed for the GP measurement to be valid?
3123 	 */
3124 	if ((tp->snd_max == tp->snd_una) ||
3125 	    (th_ack == tp->snd_max)){
3126 		/* All is acked */
3127 		return (1);
3128 	}
3129 	if (SEQ_LT(th_ack, tp->gput_seq)) {
3130 		/* Not enough bytes yet */
3131 		return (0);
3132 	}
3133 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3134 	if (SEQ_LT(th_ack, tp->gput_ack) &&
3135 	    ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3136 		/* Not enough bytes yet */
3137 		return (0);
3138 	}
3139 	if (rack->r_ctl.rc_first_appl &&
3140 	    (rack->r_ctl.rc_first_appl->r_start == th_ack)) {
3141 		/*
3142 		 * We are up to the app limited point
3143 		 * we have to measure irrespective of the time..
3144 		 */
3145 		return (1);
3146 	}
3147 	/* Now what about time? */
3148 	srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3149 	tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3150 	if (tim >= srtts) {
3151 		return (1);
3152 	}
3153 	/* Nope not even a full SRTT has passed */
3154 	return (0);
3155 }
3156 
3157 static void
3158 rack_log_timely(struct tcp_rack *rack,
3159 		uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3160 		uint64_t up_bnd, int line, uint8_t method)
3161 {
3162 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3163 		union tcp_log_stackspecific log;
3164 		struct timeval tv;
3165 
3166 		memset(&log, 0, sizeof(log));
3167 		log.u_bbr.flex1 = logged;
3168 		log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3169 		log.u_bbr.flex2 <<= 4;
3170 		log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3171 		log.u_bbr.flex2 <<= 4;
3172 		log.u_bbr.flex2 |= rack->rc_gp_incr;
3173 		log.u_bbr.flex2 <<= 4;
3174 		log.u_bbr.flex2 |= rack->rc_gp_bwred;
3175 		log.u_bbr.flex3 = rack->rc_gp_incr;
3176 		log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3177 		log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3178 		log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3179 		log.u_bbr.flex7 = rack->rc_gp_bwred;
3180 		log.u_bbr.flex8 = method;
3181 		log.u_bbr.cur_del_rate = cur_bw;
3182 		log.u_bbr.delRate = low_bnd;
3183 		log.u_bbr.bw_inuse = up_bnd;
3184 		log.u_bbr.rttProp = rack_get_bw(rack);
3185 		log.u_bbr.pkt_epoch = line;
3186 		log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3187 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3188 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3189 		log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3190 		log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3191 		log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3192 		log.u_bbr.cwnd_gain <<= 1;
3193 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3194 		log.u_bbr.cwnd_gain <<= 1;
3195 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3196 		log.u_bbr.cwnd_gain <<= 1;
3197 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3198 		log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3199 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
3200 		    &rack->rc_inp->inp_socket->so_rcv,
3201 		    &rack->rc_inp->inp_socket->so_snd,
3202 		    TCP_TIMELY_WORK, 0,
3203 		    0, &log, false, &tv);
3204 	}
3205 }
3206 
3207 static int
3208 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3209 {
3210 	/*
3211 	 * Before we increase we need to know if
3212 	 * the estimate just made was less than
3213 	 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3214 	 *
3215 	 * If we already are pacing at a fast enough
3216 	 * rate to push us faster there is no sense of
3217 	 * increasing.
3218 	 *
3219 	 * We first caculate our actual pacing rate (ss or ca multipler
3220 	 * times our cur_bw).
3221 	 *
3222 	 * Then we take the last measured rate and multipy by our
3223 	 * maximum pacing overage to give us a max allowable rate.
3224 	 *
3225 	 * If our act_rate is smaller than our max_allowable rate
3226 	 * then we should increase. Else we should hold steady.
3227 	 *
3228 	 */
3229 	uint64_t act_rate, max_allow_rate;
3230 
3231 	if (rack_timely_no_stopping)
3232 		return (1);
3233 
3234 	if ((cur_bw == 0) || (last_bw_est == 0)) {
3235 		/*
3236 		 * Initial startup case or
3237 		 * everything is acked case.
3238 		 */
3239 		rack_log_timely(rack,  mult, cur_bw, 0, 0,
3240 				__LINE__, 9);
3241 		return (1);
3242 	}
3243 	if (mult <= 100) {
3244 		/*
3245 		 * We can always pace at or slightly above our rate.
3246 		 */
3247 		rack_log_timely(rack,  mult, cur_bw, 0, 0,
3248 				__LINE__, 9);
3249 		return (1);
3250 	}
3251 	act_rate = cur_bw * (uint64_t)mult;
3252 	act_rate /= 100;
3253 	max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3254 	max_allow_rate /= 100;
3255 	if (act_rate < max_allow_rate) {
3256 		/*
3257 		 * Here the rate we are actually pacing at
3258 		 * is smaller than 10% above our last measurement.
3259 		 * This means we are pacing below what we would
3260 		 * like to try to achieve (plus some wiggle room).
3261 		 */
3262 		rack_log_timely(rack,  mult, cur_bw, act_rate, max_allow_rate,
3263 				__LINE__, 9);
3264 		return (1);
3265 	} else {
3266 		/*
3267 		 * Here we are already pacing at least rack_max_per_above(10%)
3268 		 * what we are getting back. This indicates most likely
3269 		 * that we are being limited (cwnd/rwnd/app) and can't
3270 		 * get any more b/w. There is no sense of trying to
3271 		 * raise up the pacing rate its not speeding us up
3272 		 * and we already are pacing faster than we are getting.
3273 		 */
3274 		rack_log_timely(rack,  mult, cur_bw, act_rate, max_allow_rate,
3275 				__LINE__, 8);
3276 		return (0);
3277 	}
3278 }
3279 
3280 static void
3281 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3282 {
3283 	/*
3284 	 * When we drag bottom, we want to assure
3285 	 * that no multiplier is below 1.0, if so
3286 	 * we want to restore it to at least that.
3287 	 */
3288 	if (rack->r_ctl.rack_per_of_gp_rec  < 100) {
3289 		/* This is unlikely we usually do not touch recovery */
3290 		rack->r_ctl.rack_per_of_gp_rec = 100;
3291 	}
3292 	if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3293 		rack->r_ctl.rack_per_of_gp_ca = 100;
3294 	}
3295 	if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3296 		rack->r_ctl.rack_per_of_gp_ss = 100;
3297 	}
3298 }
3299 
3300 static void
3301 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3302 {
3303 	if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3304 		rack->r_ctl.rack_per_of_gp_ca = 100;
3305 	}
3306 	if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3307 		rack->r_ctl.rack_per_of_gp_ss = 100;
3308 	}
3309 }
3310 
3311 static void
3312 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3313 {
3314 	int32_t  calc, logged, plus;
3315 
3316 	logged = 0;
3317 
3318 	if (override) {
3319 		/*
3320 		 * override is passed when we are
3321 		 * loosing b/w and making one last
3322 		 * gasp at trying to not loose out
3323 		 * to a new-reno flow.
3324 		 */
3325 		goto extra_boost;
3326 	}
3327 	/* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3328 	if (rack->rc_gp_incr &&
3329 	    ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3330 		/*
3331 		 * Reset and get 5 strokes more before the boost. Note
3332 		 * that the count is 0 based so we have to add one.
3333 		 */
3334 extra_boost:
3335 		plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3336 		rack->rc_gp_timely_inc_cnt = 0;
3337 	} else
3338 		plus = (uint32_t)rack_gp_increase_per;
3339 	/* Must be at least 1% increase for true timely increases */
3340 	if ((plus < 1) &&
3341 	    ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3342 		plus = 1;
3343 	if (rack->rc_gp_saw_rec &&
3344 	    (rack->rc_gp_no_rec_chg == 0) &&
3345 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3346 				  rack->r_ctl.rack_per_of_gp_rec)) {
3347 		/* We have been in recovery ding it too */
3348 		calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3349 		if (calc > 0xffff)
3350 			calc = 0xffff;
3351 		logged |= 1;
3352 		rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3353 		if (rack_per_upper_bound_ss &&
3354 		    (rack->rc_dragged_bottom == 0) &&
3355 		    (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
3356 			rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
3357 	}
3358 	if (rack->rc_gp_saw_ca &&
3359 	    (rack->rc_gp_saw_ss == 0) &&
3360 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3361 				  rack->r_ctl.rack_per_of_gp_ca)) {
3362 		/* In CA */
3363 		calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3364 		if (calc > 0xffff)
3365 			calc = 0xffff;
3366 		logged |= 2;
3367 		rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3368 		if (rack_per_upper_bound_ca &&
3369 		    (rack->rc_dragged_bottom == 0) &&
3370 		    (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
3371 			rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
3372 	}
3373 	if (rack->rc_gp_saw_ss &&
3374 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3375 				  rack->r_ctl.rack_per_of_gp_ss)) {
3376 		/* In SS */
3377 		calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3378 		if (calc > 0xffff)
3379 			calc = 0xffff;
3380 		rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3381 		if (rack_per_upper_bound_ss &&
3382 		    (rack->rc_dragged_bottom == 0) &&
3383 		    (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
3384 			rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
3385 		logged |= 4;
3386 	}
3387 	if (logged &&
3388 	    (rack->rc_gp_incr == 0)){
3389 		/* Go into increment mode */
3390 		rack->rc_gp_incr = 1;
3391 		rack->rc_gp_timely_inc_cnt = 0;
3392 	}
3393 	if (rack->rc_gp_incr &&
3394 	    logged &&
3395 	    (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3396 		rack->rc_gp_timely_inc_cnt++;
3397 	}
3398 	rack_log_timely(rack,  logged, plus, 0, 0,
3399 			__LINE__, 1);
3400 }
3401 
3402 static uint32_t
3403 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3404 {
3405 	/*
3406 	 * norm_grad = rtt_diff / minrtt;
3407 	 * new_per = curper * (1 - B * norm_grad)
3408 	 *
3409 	 * B = rack_gp_decrease_per (default 10%)
3410 	 * rtt_dif = input var current rtt-diff
3411 	 * curper = input var current percentage
3412 	 * minrtt = from rack filter
3413 	 *
3414 	 */
3415 	uint64_t perf;
3416 
3417 	perf = (((uint64_t)curper * ((uint64_t)1000000 -
3418 		    ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3419 		     (((uint64_t)rtt_diff * (uint64_t)1000000)/
3420 		      (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3421 		     (uint64_t)1000000)) /
3422 		(uint64_t)1000000);
3423 	if (perf > curper) {
3424 		/* TSNH */
3425 		perf = curper - 1;
3426 	}
3427 	return ((uint32_t)perf);
3428 }
3429 
3430 static uint32_t
3431 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3432 {
3433 	/*
3434 	 *                                   highrttthresh
3435 	 * result = curper * (1 - (B * ( 1 -  ------          ))
3436 	 *                                     gp_srtt
3437 	 *
3438 	 * B = rack_gp_decrease_per (default 10%)
3439 	 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3440 	 */
3441 	uint64_t perf;
3442 	uint32_t highrttthresh;
3443 
3444 	highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3445 
3446 	perf = (((uint64_t)curper * ((uint64_t)1000000 -
3447 				     ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3448 					((uint64_t)highrttthresh * (uint64_t)1000000) /
3449 						    (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3450 	return (perf);
3451 }
3452 
3453 static void
3454 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3455 {
3456 	uint64_t logvar, logvar2, logvar3;
3457 	uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3458 
3459 	if (rack->rc_gp_incr) {
3460 		/* Turn off increment counting */
3461 		rack->rc_gp_incr = 0;
3462 		rack->rc_gp_timely_inc_cnt = 0;
3463 	}
3464 	ss_red = ca_red = rec_red = 0;
3465 	logged = 0;
3466 	/* Calculate the reduction value */
3467 	if (rtt_diff < 0) {
3468 		rtt_diff *= -1;
3469 	}
3470 	/* Must be at least 1% reduction */
3471 	if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3472 		/* We have been in recovery ding it too */
3473 		if (timely_says == 2) {
3474 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3475 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3476 			if (alt < new_per)
3477 				val = alt;
3478 			else
3479 				val = new_per;
3480 		} else
3481 			 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3482 		if (rack->r_ctl.rack_per_of_gp_rec > val) {
3483 			rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3484 			rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3485 		} else {
3486 			rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3487 			rec_red = 0;
3488 		}
3489 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3490 			rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3491 		logged |= 1;
3492 	}
3493 	if (rack->rc_gp_saw_ss) {
3494 		/* Sent in SS */
3495 		if (timely_says == 2) {
3496 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3497 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3498 			if (alt < new_per)
3499 				val = alt;
3500 			else
3501 				val = new_per;
3502 		} else
3503 			val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3504 		if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3505 			ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3506 			rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3507 		} else {
3508 			ss_red = new_per;
3509 			rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3510 			logvar = new_per;
3511 			logvar <<= 32;
3512 			logvar |= alt;
3513 			logvar2 = (uint32_t)rtt;
3514 			logvar2 <<= 32;
3515 			logvar2 |= (uint32_t)rtt_diff;
3516 			logvar3 = rack_gp_rtt_maxmul;
3517 			logvar3 <<= 32;
3518 			logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3519 			rack_log_timely(rack, timely_says,
3520 					logvar2, logvar3,
3521 					logvar, __LINE__, 10);
3522 		}
3523 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3524 			rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3525 		logged |= 4;
3526 	} else if (rack->rc_gp_saw_ca) {
3527 		/* Sent in CA */
3528 		if (timely_says == 2) {
3529 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3530 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3531 			if (alt < new_per)
3532 				val = alt;
3533 			else
3534 				val = new_per;
3535 		} else
3536 			val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3537 		if (rack->r_ctl.rack_per_of_gp_ca > val) {
3538 			ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3539 			rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3540 		} else {
3541 			rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3542 			ca_red = 0;
3543 			logvar = new_per;
3544 			logvar <<= 32;
3545 			logvar |= alt;
3546 			logvar2 = (uint32_t)rtt;
3547 			logvar2 <<= 32;
3548 			logvar2 |= (uint32_t)rtt_diff;
3549 			logvar3 = rack_gp_rtt_maxmul;
3550 			logvar3 <<= 32;
3551 			logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3552 			rack_log_timely(rack, timely_says,
3553 					logvar2, logvar3,
3554 					logvar, __LINE__, 10);
3555 		}
3556 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3557 			rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3558 		logged |= 2;
3559 	}
3560 	if (rack->rc_gp_timely_dec_cnt < 0x7) {
3561 		rack->rc_gp_timely_dec_cnt++;
3562 		if (rack_timely_dec_clear &&
3563 		    (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3564 			rack->rc_gp_timely_dec_cnt = 0;
3565 	}
3566 	logvar = ss_red;
3567 	logvar <<= 32;
3568 	logvar |= ca_red;
3569 	rack_log_timely(rack,  logged, rec_red, rack_per_lower_bound, logvar,
3570 			__LINE__, 2);
3571 }
3572 
3573 static void
3574 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3575 		     uint32_t rtt, uint32_t line, uint8_t reas)
3576 {
3577 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3578 		union tcp_log_stackspecific log;
3579 		struct timeval tv;
3580 
3581 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3582 		log.u_bbr.flex1 = line;
3583 		log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
3584 		log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
3585 		log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3586 		log.u_bbr.flex5 = rtt;
3587 		log.u_bbr.flex6 = rack->rc_highly_buffered;
3588 		log.u_bbr.flex6 <<= 1;
3589 		log.u_bbr.flex6 |= rack->forced_ack;
3590 		log.u_bbr.flex6 <<= 1;
3591 		log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
3592 		log.u_bbr.flex6 <<= 1;
3593 		log.u_bbr.flex6 |= rack->in_probe_rtt;
3594 		log.u_bbr.flex6 <<= 1;
3595 		log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
3596 		log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
3597 		log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
3598 		log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
3599 		log.u_bbr.flex8 = reas;
3600 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3601 		log.u_bbr.delRate = rack_get_bw(rack);
3602 		log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
3603 		log.u_bbr.cur_del_rate <<= 32;
3604 		log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
3605 		log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
3606 		log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3607 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3608 		log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3609 		log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3610 		log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
3611 		log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
3612 		log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3613 		log.u_bbr.rttProp = us_cts;
3614 		log.u_bbr.rttProp <<= 32;
3615 		log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
3616 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
3617 		    &rack->rc_inp->inp_socket->so_rcv,
3618 		    &rack->rc_inp->inp_socket->so_snd,
3619 		    BBR_LOG_RTT_SHRINKS, 0,
3620 		    0, &log, false, &rack->r_ctl.act_rcv_time);
3621 	}
3622 }
3623 
3624 static void
3625 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
3626 {
3627 	uint64_t bwdp;
3628 
3629 	bwdp = rack_get_bw(rack);
3630 	bwdp *= (uint64_t)rtt;
3631 	bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
3632 	rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
3633 	if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
3634 		/*
3635 		 * A window protocol must be able to have 4 packets
3636 		 * outstanding as the floor in order to function
3637 		 * (especially considering delayed ack :D).
3638 		 */
3639 		rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
3640 	}
3641 }
3642 
3643 static void
3644 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
3645 {
3646 	/**
3647 	 * ProbeRTT is a bit different in rack_pacing than in
3648 	 * BBR. It is like BBR in that it uses the lowering of
3649 	 * the RTT as a signal that we saw something new and
3650 	 * counts from there for how long between. But it is
3651 	 * different in that its quite simple. It does not
3652 	 * play with the cwnd and wait until we get down
3653 	 * to N segments outstanding and hold that for
3654 	 * 200ms. Instead it just sets the pacing reduction
3655 	 * rate to a set percentage (70 by default) and hold
3656 	 * that for a number of recent GP Srtt's.
3657 	 */
3658 	uint32_t segsiz;
3659 
3660 	if (rack->rc_gp_dyn_mul == 0)
3661 		return;
3662 
3663 	if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3664 		/* We are idle */
3665 		return;
3666 	}
3667 	if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3668 	    SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3669 		/*
3670 		 * Stop the goodput now, the idea here is
3671 		 * that future measurements with in_probe_rtt
3672 		 * won't register if they are not greater so
3673 		 * we want to get what info (if any) is available
3674 		 * now.
3675 		 */
3676 		rack_do_goodput_measurement(rack->rc_tp, rack,
3677 					    rack->rc_tp->snd_una, __LINE__);
3678 	}
3679 	rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3680 	rack->r_ctl.rc_time_probertt_entered = us_cts;
3681 	segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3682 		     rack->r_ctl.rc_pace_min_segs);
3683 	rack->in_probe_rtt = 1;
3684 	rack->measure_saw_probe_rtt = 1;
3685 	rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3686 	rack->r_ctl.rc_time_probertt_starts = 0;
3687 	rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3688 	if (rack_probertt_use_min_rtt_entry)
3689 		rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3690 	else
3691 		rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3692 	rack_log_rtt_shrinks(rack,  us_cts,  get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3693 			     __LINE__, RACK_RTTS_ENTERPROBE);
3694 }
3695 
3696 static void
3697 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3698 {
3699 	struct rack_sendmap *rsm;
3700 	uint32_t segsiz;
3701 
3702 	segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3703 		     rack->r_ctl.rc_pace_min_segs);
3704 	rack->in_probe_rtt = 0;
3705 	if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3706 	    SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3707 		/*
3708 		 * Stop the goodput now, the idea here is
3709 		 * that future measurements with in_probe_rtt
3710 		 * won't register if they are not greater so
3711 		 * we want to get what info (if any) is available
3712 		 * now.
3713 		 */
3714 		rack_do_goodput_measurement(rack->rc_tp, rack,
3715 					    rack->rc_tp->snd_una, __LINE__);
3716 	} else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3717 		/*
3718 		 * We don't have enough data to make a measurement.
3719 		 * So lets just stop and start here after exiting
3720 		 * probe-rtt. We probably are not interested in
3721 		 * the results anyway.
3722 		 */
3723 		rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3724 	}
3725 	/*
3726 	 * Measurements through the current snd_max are going
3727 	 * to be limited by the slower pacing rate.
3728 	 *
3729 	 * We need to mark these as app-limited so we
3730 	 * don't collapse the b/w.
3731 	 */
3732 	rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3733 	if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3734 		if (rack->r_ctl.rc_app_limited_cnt == 0)
3735 			rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3736 		else {
3737 			/*
3738 			 * Go out to the end app limited and mark
3739 			 * this new one as next and move the end_appl up
3740 			 * to this guy.
3741 			 */
3742 			if (rack->r_ctl.rc_end_appl)
3743 				rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3744 			rack->r_ctl.rc_end_appl = rsm;
3745 		}
3746 		rsm->r_flags |= RACK_APP_LIMITED;
3747 		rack->r_ctl.rc_app_limited_cnt++;
3748 	}
3749 	/*
3750 	 * Now, we need to examine our pacing rate multipliers.
3751 	 * If its under 100%, we need to kick it back up to
3752 	 * 100%. We also don't let it be over our "max" above
3753 	 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3754 	 * Note setting clamp_atexit_prtt to 0 has the effect
3755 	 * of setting CA/SS to 100% always at exit (which is
3756 	 * the default behavior).
3757 	 */
3758 	if (rack_probertt_clear_is) {
3759 		rack->rc_gp_incr = 0;
3760 		rack->rc_gp_bwred = 0;
3761 		rack->rc_gp_timely_inc_cnt = 0;
3762 		rack->rc_gp_timely_dec_cnt = 0;
3763 	}
3764 	/* Do we do any clamping at exit? */
3765 	if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3766 		rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3767 		rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3768 	}
3769 	if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3770 		rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3771 		rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3772 	}
3773 	/*
3774 	 * Lets set rtt_diff to 0, so that we will get a "boost"
3775 	 * after exiting.
3776 	 */
3777 	rack->r_ctl.rc_rtt_diff = 0;
3778 
3779 	/* Clear all flags so we start fresh */
3780 	rack->rc_tp->t_bytes_acked = 0;
3781 	rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3782 	/*
3783 	 * If configured to, set the cwnd and ssthresh to
3784 	 * our targets.
3785 	 */
3786 	if (rack_probe_rtt_sets_cwnd) {
3787 		uint64_t ebdp;
3788 		uint32_t setto;
3789 
3790 		/* Set ssthresh so we get into CA once we hit our target */
3791 		if (rack_probertt_use_min_rtt_exit == 1) {
3792 			/* Set to min rtt */
3793 			rack_set_prtt_target(rack, segsiz,
3794 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3795 		} else if (rack_probertt_use_min_rtt_exit == 2) {
3796 			/* Set to current gp rtt */
3797 			rack_set_prtt_target(rack, segsiz,
3798 					     rack->r_ctl.rc_gp_srtt);
3799 		} else if (rack_probertt_use_min_rtt_exit == 3) {
3800 			/* Set to entry gp rtt */
3801 			rack_set_prtt_target(rack, segsiz,
3802 					     rack->r_ctl.rc_entry_gp_rtt);
3803 		} else {
3804 			uint64_t sum;
3805 			uint32_t setval;
3806 
3807 			sum = rack->r_ctl.rc_entry_gp_rtt;
3808 			sum *= 10;
3809 			sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3810 			if (sum >= 20) {
3811 				/*
3812 				 * A highly buffered path needs
3813 				 * cwnd space for timely to work.
3814 				 * Lets set things up as if
3815 				 * we are heading back here again.
3816 				 */
3817 				setval = rack->r_ctl.rc_entry_gp_rtt;
3818 			} else if (sum >= 15) {
3819 				/*
3820 				 * Lets take the smaller of the
3821 				 * two since we are just somewhat
3822 				 * buffered.
3823 				 */
3824 				setval = rack->r_ctl.rc_gp_srtt;
3825 				if (setval > rack->r_ctl.rc_entry_gp_rtt)
3826 					setval = rack->r_ctl.rc_entry_gp_rtt;
3827 			} else {
3828 				/*
3829 				 * Here we are not highly buffered
3830 				 * and should pick the min we can to
3831 				 * keep from causing loss.
3832 				 */
3833 				setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3834 			}
3835 			rack_set_prtt_target(rack, segsiz,
3836 					     setval);
3837 		}
3838 		if (rack_probe_rtt_sets_cwnd > 1) {
3839 			/* There is a percentage here to boost */
3840 			ebdp = rack->r_ctl.rc_target_probertt_flight;
3841 			ebdp *= rack_probe_rtt_sets_cwnd;
3842 			ebdp /= 100;
3843 			setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3844 		} else
3845 			setto = rack->r_ctl.rc_target_probertt_flight;
3846 		rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3847 		if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3848 			/* Enforce a min */
3849 			rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3850 		}
3851 		/* If we set in the cwnd also set the ssthresh point so we are in CA */
3852 		rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3853 	}
3854 	rack_log_rtt_shrinks(rack,  us_cts,
3855 			     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3856 			     __LINE__, RACK_RTTS_EXITPROBE);
3857 	/* Clear times last so log has all the info */
3858 	rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3859 	rack->r_ctl.rc_time_probertt_entered = us_cts;
3860 	rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3861 	rack->r_ctl.rc_time_of_last_probertt = us_cts;
3862 }
3863 
3864 static void
3865 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3866 {
3867 	/* Check in on probe-rtt */
3868 	if (rack->rc_gp_filled == 0) {
3869 		/* We do not do p-rtt unless we have gp measurements */
3870 		return;
3871 	}
3872 	if (rack->in_probe_rtt) {
3873 		uint64_t no_overflow;
3874 		uint32_t endtime, must_stay;
3875 
3876 		if (rack->r_ctl.rc_went_idle_time &&
3877 		    ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3878 			/*
3879 			 * We went idle during prtt, just exit now.
3880 			 */
3881 			rack_exit_probertt(rack, us_cts);
3882 		} else if (rack_probe_rtt_safety_val &&
3883 		    TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3884 		    ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3885 			/*
3886 			 * Probe RTT safety value triggered!
3887 			 */
3888 			rack_log_rtt_shrinks(rack,  us_cts,
3889 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3890 					     __LINE__, RACK_RTTS_SAFETY);
3891 			rack_exit_probertt(rack, us_cts);
3892 		}
3893 		/* Calculate the max we will wait */
3894 		endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3895 		if (rack->rc_highly_buffered)
3896 			endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3897 		/* Calculate the min we must wait */
3898 		must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3899 		if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3900 		    TSTMP_LT(us_cts, endtime)) {
3901 			uint32_t calc;
3902 			/* Do we lower more? */
3903 no_exit:
3904 			if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3905 				calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3906 			else
3907 				calc = 0;
3908 			calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3909 			if (calc) {
3910 				/* Maybe */
3911 				calc *= rack_per_of_gp_probertt_reduce;
3912 				rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3913 				/* Limit it too */
3914 				if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3915 					rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3916 			}
3917 			/* We must reach target or the time set */
3918 			return;
3919 		}
3920 		if (rack->r_ctl.rc_time_probertt_starts == 0) {
3921 			if ((TSTMP_LT(us_cts, must_stay) &&
3922 			     rack->rc_highly_buffered) ||
3923 			     (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3924 			      rack->r_ctl.rc_target_probertt_flight)) {
3925 				/* We are not past the must_stay time */
3926 				goto no_exit;
3927 			}
3928 			rack_log_rtt_shrinks(rack,  us_cts,
3929 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3930 					     __LINE__, RACK_RTTS_REACHTARGET);
3931 			rack->r_ctl.rc_time_probertt_starts = us_cts;
3932 			if (rack->r_ctl.rc_time_probertt_starts == 0)
3933 				rack->r_ctl.rc_time_probertt_starts = 1;
3934 			/* Restore back to our rate we want to pace at in prtt */
3935 			rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3936 		}
3937 		/*
3938 		 * Setup our end time, some number of gp_srtts plus 200ms.
3939 		 */
3940 		no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3941 			       (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3942 		if (rack_probertt_gpsrtt_cnt_div)
3943 			endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3944 		else
3945 			endtime = 0;
3946 		endtime += rack_min_probertt_hold;
3947 		endtime += rack->r_ctl.rc_time_probertt_starts;
3948 		if (TSTMP_GEQ(us_cts,  endtime)) {
3949 			/* yes, exit probertt */
3950 			rack_exit_probertt(rack, us_cts);
3951 		}
3952 
3953 	} else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3954 		/* Go into probertt, its been too long since we went lower */
3955 		rack_enter_probertt(rack, us_cts);
3956 	}
3957 }
3958 
3959 static void
3960 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3961 		       uint32_t rtt, int32_t rtt_diff)
3962 {
3963 	uint64_t cur_bw, up_bnd, low_bnd, subfr;
3964 	uint32_t losses;
3965 
3966 	if ((rack->rc_gp_dyn_mul == 0) ||
3967 	    (rack->use_fixed_rate) ||
3968 	    (rack->in_probe_rtt) ||
3969 	    (rack->rc_always_pace == 0)) {
3970 		/* No dynamic GP multipler in play */
3971 		return;
3972 	}
3973 	losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3974 	cur_bw = rack_get_bw(rack);
3975 	/* Calculate our up and down range */
3976 	up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3977 	up_bnd /= 100;
3978 	up_bnd += rack->r_ctl.last_gp_comp_bw;
3979 
3980 	subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3981 	subfr /= 100;
3982 	low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3983 	if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3984 		/*
3985 		 * This is the case where our RTT is above
3986 		 * the max target and we have been configured
3987 		 * to just do timely no bonus up stuff in that case.
3988 		 *
3989 		 * There are two configurations, set to 1, and we
3990 		 * just do timely if we are over our max. If its
3991 		 * set above 1 then we slam the multipliers down
3992 		 * to 100 and then decrement per timely.
3993 		 */
3994 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
3995 				__LINE__, 3);
3996 		if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3997 			rack_validate_multipliers_at_or_below_100(rack);
3998 		rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3999 	} else if ((last_bw_est < low_bnd) && !losses) {
4000 		/*
4001 		 * We are decreasing this is a bit complicated this
4002 		 * means we are loosing ground. This could be
4003 		 * because another flow entered and we are competing
4004 		 * for b/w with it. This will push the RTT up which
4005 		 * makes timely unusable unless we want to get shoved
4006 		 * into a corner and just be backed off (the age
4007 		 * old problem with delay based CC).
4008 		 *
4009 		 * On the other hand if it was a route change we
4010 		 * would like to stay somewhat contained and not
4011 		 * blow out the buffers.
4012 		 */
4013 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
4014 				__LINE__, 3);
4015 		rack->r_ctl.last_gp_comp_bw = cur_bw;
4016 		if (rack->rc_gp_bwred == 0) {
4017 			/* Go into reduction counting */
4018 			rack->rc_gp_bwred = 1;
4019 			rack->rc_gp_timely_dec_cnt = 0;
4020 		}
4021 		if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
4022 		    (timely_says == 0)) {
4023 			/*
4024 			 * Push another time with a faster pacing
4025 			 * to try to gain back (we include override to
4026 			 * get a full raise factor).
4027 			 */
4028 			if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
4029 			    (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
4030 			    (timely_says == 0) ||
4031 			    (rack_down_raise_thresh == 0)) {
4032 				/*
4033 				 * Do an override up in b/w if we were
4034 				 * below the threshold or if the threshold
4035 				 * is zero we always do the raise.
4036 				 */
4037 				rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
4038 			} else {
4039 				/* Log it stays the same */
4040 				rack_log_timely(rack,  0, last_bw_est, low_bnd, 0,
4041 						__LINE__, 11);
4042 			}
4043 			rack->rc_gp_timely_dec_cnt++;
4044 			/* We are not incrementing really no-count */
4045 			rack->rc_gp_incr = 0;
4046 			rack->rc_gp_timely_inc_cnt = 0;
4047 		} else {
4048 			/*
4049 			 * Lets just use the RTT
4050 			 * information and give up
4051 			 * pushing.
4052 			 */
4053 			goto use_timely;
4054 		}
4055 	} else if ((timely_says != 2) &&
4056 		    !losses &&
4057 		    (last_bw_est > up_bnd)) {
4058 		/*
4059 		 * We are increasing b/w lets keep going, updating
4060 		 * our b/w and ignoring any timely input, unless
4061 		 * of course we are at our max raise (if there is one).
4062 		 */
4063 
4064 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
4065 				__LINE__, 3);
4066 		rack->r_ctl.last_gp_comp_bw = cur_bw;
4067 		if (rack->rc_gp_saw_ss &&
4068 		    rack_per_upper_bound_ss &&
4069 		     (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
4070 			    /*
4071 			     * In cases where we can't go higher
4072 			     * we should just use timely.
4073 			     */
4074 			    goto use_timely;
4075 		}
4076 		if (rack->rc_gp_saw_ca &&
4077 		    rack_per_upper_bound_ca &&
4078 		    (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
4079 			    /*
4080 			     * In cases where we can't go higher
4081 			     * we should just use timely.
4082 			     */
4083 			    goto use_timely;
4084 		}
4085 		rack->rc_gp_bwred = 0;
4086 		rack->rc_gp_timely_dec_cnt = 0;
4087 		/* You get a set number of pushes if timely is trying to reduce */
4088 		if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4089 			rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4090 		} else {
4091 			/* Log it stays the same */
4092 			rack_log_timely(rack,  0, last_bw_est, up_bnd, 0,
4093 			    __LINE__, 12);
4094 		}
4095 		return;
4096 	} else {
4097 		/*
4098 		 * We are staying between the lower and upper range bounds
4099 		 * so use timely to decide.
4100 		 */
4101 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
4102 				__LINE__, 3);
4103 use_timely:
4104 		if (timely_says) {
4105 			rack->rc_gp_incr = 0;
4106 			rack->rc_gp_timely_inc_cnt = 0;
4107 			if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4108 			    !losses &&
4109 			    (last_bw_est < low_bnd)) {
4110 				/* We are loosing ground */
4111 				rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4112 				rack->rc_gp_timely_dec_cnt++;
4113 				/* We are not incrementing really no-count */
4114 				rack->rc_gp_incr = 0;
4115 				rack->rc_gp_timely_inc_cnt = 0;
4116 			} else
4117 				rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4118 		} else {
4119 			rack->rc_gp_bwred = 0;
4120 			rack->rc_gp_timely_dec_cnt = 0;
4121 			rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4122 		}
4123 	}
4124 }
4125 
4126 static int32_t
4127 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4128 {
4129 	int32_t timely_says;
4130 	uint64_t log_mult, log_rtt_a_diff;
4131 
4132 	log_rtt_a_diff = rtt;
4133 	log_rtt_a_diff <<= 32;
4134 	log_rtt_a_diff |= (uint32_t)rtt_diff;
4135 	if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4136 		    rack_gp_rtt_maxmul)) {
4137 		/* Reduce the b/w multipler */
4138 		timely_says = 2;
4139 		log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4140 		log_mult <<= 32;
4141 		log_mult |= prev_rtt;
4142 		rack_log_timely(rack,  timely_says, log_mult,
4143 				get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4144 				log_rtt_a_diff, __LINE__, 4);
4145 	} else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4146 			   ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4147 			    max(rack_gp_rtt_mindiv , 1)))) {
4148 		/* Increase the b/w multipler */
4149 		log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4150 			((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4151 			 max(rack_gp_rtt_mindiv , 1));
4152 		log_mult <<= 32;
4153 		log_mult |= prev_rtt;
4154 		timely_says = 0;
4155 		rack_log_timely(rack,  timely_says, log_mult ,
4156 				get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4157 				log_rtt_a_diff, __LINE__, 5);
4158 	} else {
4159 		/*
4160 		 * Use a gradient to find it the timely gradient
4161 		 * is:
4162 		 * grad = rc_rtt_diff / min_rtt;
4163 		 *
4164 		 * anything below or equal to 0 will be
4165 		 * a increase indication. Anything above
4166 		 * zero is a decrease. Note we take care
4167 		 * of the actual gradient calculation
4168 		 * in the reduction (its not needed for
4169 		 * increase).
4170 		 */
4171 		log_mult = prev_rtt;
4172 		if (rtt_diff <= 0) {
4173 			/*
4174 			 * Rttdiff is less than zero, increase the
4175 			 * b/w multipler (its 0 or negative)
4176 			 */
4177 			timely_says = 0;
4178 			rack_log_timely(rack,  timely_says, log_mult,
4179 					get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4180 		} else {
4181 			/* Reduce the b/w multipler */
4182 			timely_says = 1;
4183 			rack_log_timely(rack,  timely_says, log_mult,
4184 					get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4185 		}
4186 	}
4187 	return (timely_says);
4188 }
4189 
4190 static void
4191 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4192 			    tcp_seq th_ack, int line)
4193 {
4194 	uint64_t tim, bytes_ps, ltim, stim, utim;
4195 	uint32_t segsiz, bytes, reqbytes, us_cts;
4196 	int32_t gput, new_rtt_diff, timely_says;
4197 	uint64_t  resid_bw, subpart = 0, addpart = 0, srtt;
4198 	int did_add = 0;
4199 
4200 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4201 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4202 	if (TSTMP_GEQ(us_cts, tp->gput_ts))
4203 		tim = us_cts - tp->gput_ts;
4204 	else
4205 		tim = 0;
4206 
4207 	if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4208 		stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4209 	else
4210 		stim = 0;
4211 	/*
4212 	 * Use the larger of the send time or ack time. This prevents us
4213 	 * from being influenced by ack artifacts to come up with too
4214 	 * high of measurement. Note that since we are spanning over many more
4215 	 * bytes in most of our measurements hopefully that is less likely to
4216 	 * occur.
4217 	 */
4218 	if (tim > stim)
4219 		utim = max(tim, 1);
4220 	else
4221 		utim = max(stim, 1);
4222 	/* Lets get a msec time ltim too for the old stuff */
4223 	ltim = max(1, (utim / HPTS_USEC_IN_MSEC));
4224 	gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
4225 	reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4226 	if ((tim == 0) && (stim == 0)) {
4227 		/*
4228 		 * Invalid measurement time, maybe
4229 		 * all on one ack/one send?
4230 		 */
4231 		bytes = 0;
4232 		bytes_ps = 0;
4233 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4234 					   0, 0, 0, 10, __LINE__, NULL);
4235 		goto skip_measurement;
4236 	}
4237 	if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4238 		/* We never made a us_rtt measurement? */
4239 		bytes = 0;
4240 		bytes_ps = 0;
4241 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4242 					   0, 0, 0, 10, __LINE__, NULL);
4243 		goto skip_measurement;
4244 	}
4245 	/*
4246 	 * Calculate the maximum possible b/w this connection
4247 	 * could have. We base our calculation on the lowest
4248 	 * rtt we have seen during the measurement and the
4249 	 * largest rwnd the client has given us in that time. This
4250 	 * forms a BDP that is the maximum that we could ever
4251 	 * get to the client. Anything larger is not valid.
4252 	 *
4253 	 * I originally had code here that rejected measurements
4254 	 * where the time was less than 1/2 the latest us_rtt.
4255 	 * But after thinking on that I realized its wrong since
4256 	 * say you had a 150Mbps or even 1Gbps link, and you
4257 	 * were a long way away.. example I am in Europe (100ms rtt)
4258 	 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4259 	 * bytes my time would be 1.2ms, and yet my rtt would say
4260 	 * the measurement was invalid the time was < 50ms. The
4261 	 * same thing is true for 150Mb (8ms of time).
4262 	 *
4263 	 * A better way I realized is to look at what the maximum
4264 	 * the connection could possibly do. This is gated on
4265 	 * the lowest RTT we have seen and the highest rwnd.
4266 	 * We should in theory never exceed that, if we are
4267 	 * then something on the path is storing up packets
4268 	 * and then feeding them all at once to our endpoint
4269 	 * messing up our measurement.
4270 	 */
4271 	rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4272 	rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4273 	rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4274 	if (SEQ_LT(th_ack, tp->gput_seq)) {
4275 		/* No measurement can be made */
4276 		bytes = 0;
4277 		bytes_ps = 0;
4278 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4279 					   0, 0, 0, 10, __LINE__, NULL);
4280 		goto skip_measurement;
4281 	} else
4282 		bytes = (th_ack - tp->gput_seq);
4283 	bytes_ps = (uint64_t)bytes;
4284 	/*
4285 	 * Don't measure a b/w for pacing unless we have gotten at least
4286 	 * an initial windows worth of data in this measurement interval.
4287 	 *
4288 	 * Small numbers of bytes get badly influenced by delayed ack and
4289 	 * other artifacts. Note we take the initial window or our
4290 	 * defined minimum GP (defaulting to 10 which hopefully is the
4291 	 * IW).
4292 	 */
4293 	if (rack->rc_gp_filled == 0) {
4294 		/*
4295 		 * The initial estimate is special. We
4296 		 * have blasted out an IW worth of packets
4297 		 * without a real valid ack ts results. We
4298 		 * then setup the app_limited_needs_set flag,
4299 		 * this should get the first ack in (probably 2
4300 		 * MSS worth) to be recorded as the timestamp.
4301 		 * We thus allow a smaller number of bytes i.e.
4302 		 * IW - 2MSS.
4303 		 */
4304 		reqbytes -= (2 * segsiz);
4305 		/* Also lets fill previous for our first measurement to be neutral */
4306 		rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4307 	}
4308 	if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4309 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4310 					   rack->r_ctl.rc_app_limited_cnt,
4311 					   0, 0, 10, __LINE__, NULL);
4312 		goto skip_measurement;
4313 	}
4314 	/*
4315 	 * We now need to calculate the Timely like status so
4316 	 * we can update (possibly) the b/w multipliers.
4317 	 */
4318 	new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4319 	if (rack->rc_gp_filled == 0) {
4320 		/* No previous reading */
4321 		rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4322 	} else {
4323 		if (rack->measure_saw_probe_rtt == 0) {
4324 			/*
4325 			 * We don't want a probertt to be counted
4326 			 * since it will be negative incorrectly. We
4327 			 * expect to be reducing the RTT when we
4328 			 * pace at a slower rate.
4329 			 */
4330 			rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4331 			rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4332 		}
4333 	}
4334 	timely_says = rack_make_timely_judgement(rack,
4335 		rack->r_ctl.rc_gp_srtt,
4336 		rack->r_ctl.rc_rtt_diff,
4337 	        rack->r_ctl.rc_prev_gp_srtt
4338 		);
4339 	bytes_ps *= HPTS_USEC_IN_SEC;
4340 	bytes_ps /= utim;
4341 	if (bytes_ps > rack->r_ctl.last_max_bw) {
4342 		/*
4343 		 * Something is on path playing
4344 		 * since this b/w is not possible based
4345 		 * on our BDP (highest rwnd and lowest rtt
4346 		 * we saw in the measurement window).
4347 		 *
4348 		 * Another option here would be to
4349 		 * instead skip the measurement.
4350 		 */
4351 		rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4352 					   bytes_ps, rack->r_ctl.last_max_bw, 0,
4353 					   11, __LINE__, NULL);
4354 		bytes_ps = rack->r_ctl.last_max_bw;
4355 	}
4356 	/* We store gp for b/w in bytes per second */
4357 	if (rack->rc_gp_filled == 0) {
4358 		/* Initial measurment */
4359 		if (bytes_ps) {
4360 			rack->r_ctl.gp_bw = bytes_ps;
4361 			rack->rc_gp_filled = 1;
4362 			rack->r_ctl.num_measurements = 1;
4363 			rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4364 		} else {
4365 			rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4366 						   rack->r_ctl.rc_app_limited_cnt,
4367 						   0, 0, 10, __LINE__, NULL);
4368 		}
4369 		if (rack->rc_inp->inp_in_hpts &&
4370 		    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4371 			/*
4372 			 * Ok we can't trust the pacer in this case
4373 			 * where we transition from un-paced to paced.
4374 			 * Or for that matter when the burst mitigation
4375 			 * was making a wild guess and got it wrong.
4376 			 * Stop the pacer and clear up all the aggregate
4377 			 * delays etc.
4378 			 */
4379 			tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
4380 			rack->r_ctl.rc_hpts_flags = 0;
4381 			rack->r_ctl.rc_last_output_to = 0;
4382 		}
4383 		did_add = 2;
4384 	} else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4385 		/* Still a small number run an average */
4386 		rack->r_ctl.gp_bw += bytes_ps;
4387 		addpart = rack->r_ctl.num_measurements;
4388 		rack->r_ctl.num_measurements++;
4389 		if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4390 			/* We have collected enought to move forward */
4391 			rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4392 		}
4393 		did_add = 3;
4394 	} else {
4395 		/*
4396 		 * We want to take 1/wma of the goodput and add in to 7/8th
4397 		 * of the old value weighted by the srtt. So if your measurement
4398 		 * period is say 2 SRTT's long you would get 1/4 as the
4399 		 * value, if it was like 1/2 SRTT then you would get 1/16th.
4400 		 *
4401 		 * But we must be careful not to take too much i.e. if the
4402 		 * srtt is say 20ms and the measurement is taken over
4403 		 * 400ms our weight would be 400/20 i.e. 20. On the
4404 		 * other hand if we get a measurement over 1ms with a
4405 		 * 10ms rtt we only want to take a much smaller portion.
4406 		 */
4407 		if (rack->r_ctl.num_measurements < 0xff) {
4408 			rack->r_ctl.num_measurements++;
4409 		}
4410 		srtt = (uint64_t)tp->t_srtt;
4411 		if (srtt == 0) {
4412 			/*
4413 			 * Strange why did t_srtt go back to zero?
4414 			 */
4415 			if (rack->r_ctl.rc_rack_min_rtt)
4416 				srtt = rack->r_ctl.rc_rack_min_rtt;
4417 			else
4418 				srtt = HPTS_USEC_IN_MSEC;
4419 		}
4420 		/*
4421 		 * XXXrrs: Note for reviewers, in playing with
4422 		 * dynamic pacing I discovered this GP calculation
4423 		 * as done originally leads to some undesired results.
4424 		 * Basically you can get longer measurements contributing
4425 		 * too much to the WMA. Thus I changed it if you are doing
4426 		 * dynamic adjustments to only do the aportioned adjustment
4427 		 * if we have a very small (time wise) measurement. Longer
4428 		 * measurements just get there weight (defaulting to 1/8)
4429 		 * add to the WMA. We may want to think about changing
4430 		 * this to always do that for both sides i.e. dynamic
4431 		 * and non-dynamic... but considering lots of folks
4432 		 * were playing with this I did not want to change the
4433 		 * calculation per.se. without your thoughts.. Lawerence?
4434 		 * Peter??
4435 		 */
4436 		if (rack->rc_gp_dyn_mul == 0) {
4437 			subpart = rack->r_ctl.gp_bw * utim;
4438 			subpart /= (srtt * 8);
4439 			if (subpart < (rack->r_ctl.gp_bw / 2)) {
4440 				/*
4441 				 * The b/w update takes no more
4442 				 * away then 1/2 our running total
4443 				 * so factor it in.
4444 				 */
4445 				addpart = bytes_ps * utim;
4446 				addpart /= (srtt * 8);
4447 			} else {
4448 				/*
4449 				 * Don't allow a single measurement
4450 				 * to account for more than 1/2 of the
4451 				 * WMA. This could happen on a retransmission
4452 				 * where utim becomes huge compared to
4453 				 * srtt (multiple retransmissions when using
4454 				 * the sending rate which factors in all the
4455 				 * transmissions from the first one).
4456 				 */
4457 				subpart = rack->r_ctl.gp_bw / 2;
4458 				addpart = bytes_ps / 2;
4459 			}
4460 			resid_bw = rack->r_ctl.gp_bw - subpart;
4461 			rack->r_ctl.gp_bw = resid_bw + addpart;
4462 			did_add = 1;
4463 		} else {
4464 			if ((utim / srtt) <= 1) {
4465 				/*
4466 				 * The b/w update was over a small period
4467 				 * of time. The idea here is to prevent a small
4468 				 * measurement time period from counting
4469 				 * too much. So we scale it based on the
4470 				 * time so it attributes less than 1/rack_wma_divisor
4471 				 * of its measurement.
4472 				 */
4473 				subpart = rack->r_ctl.gp_bw * utim;
4474 				subpart /= (srtt * rack_wma_divisor);
4475 				addpart = bytes_ps * utim;
4476 				addpart /= (srtt * rack_wma_divisor);
4477 			} else {
4478 				/*
4479 				 * The scaled measurement was long
4480 				 * enough so lets just add in the
4481 				 * portion of the measurment i.e. 1/rack_wma_divisor
4482 				 */
4483 				subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
4484 				addpart = bytes_ps / rack_wma_divisor;
4485 			}
4486 			if ((rack->measure_saw_probe_rtt == 0) ||
4487 		            (bytes_ps > rack->r_ctl.gp_bw)) {
4488 				/*
4489 				 * For probe-rtt we only add it in
4490 				 * if its larger, all others we just
4491 				 * add in.
4492 				 */
4493 				did_add = 1;
4494 				resid_bw = rack->r_ctl.gp_bw - subpart;
4495 				rack->r_ctl.gp_bw = resid_bw + addpart;
4496 			}
4497 		}
4498 	}
4499 	if ((rack->gp_ready == 0) &&
4500 	    (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
4501 		/* We have enough measurements now */
4502 		rack->gp_ready = 1;
4503 		rack_set_cc_pacing(rack);
4504 		if (rack->defer_options)
4505 			rack_apply_deferred_options(rack);
4506 	}
4507 	rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
4508 				   rack_get_bw(rack), 22, did_add, NULL);
4509 	/* We do not update any multipliers if we are in or have seen a probe-rtt */
4510 	if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
4511 		rack_update_multiplier(rack, timely_says, bytes_ps,
4512 				       rack->r_ctl.rc_gp_srtt,
4513 				       rack->r_ctl.rc_rtt_diff);
4514 	rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
4515 				   rack_get_bw(rack), 3, line, NULL);
4516 	/* reset the gp srtt and setup the new prev */
4517 	rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4518 	/* Record the lost count for the next measurement */
4519 	rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
4520 	/*
4521 	 * We restart our diffs based on the gpsrtt in the
4522 	 * measurement window.
4523 	 */
4524 	rack->rc_gp_rtt_set = 0;
4525 	rack->rc_gp_saw_rec = 0;
4526 	rack->rc_gp_saw_ca = 0;
4527 	rack->rc_gp_saw_ss = 0;
4528 	rack->rc_dragged_bottom = 0;
4529 skip_measurement:
4530 
4531 #ifdef STATS
4532 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
4533 				 gput);
4534 	/*
4535 	 * XXXLAS: This is a temporary hack, and should be
4536 	 * chained off VOI_TCP_GPUT when stats(9) grows an
4537 	 * API to deal with chained VOIs.
4538 	 */
4539 	if (tp->t_stats_gput_prev > 0)
4540 		stats_voi_update_abs_s32(tp->t_stats,
4541 					 VOI_TCP_GPUT_ND,
4542 					 ((gput - tp->t_stats_gput_prev) * 100) /
4543 					 tp->t_stats_gput_prev);
4544 #endif
4545 	tp->t_flags &= ~TF_GPUTINPROG;
4546 	tp->t_stats_gput_prev = gput;
4547 	/*
4548 	 * Now are we app limited now and there is space from where we
4549 	 * were to where we want to go?
4550 	 *
4551 	 * We don't do the other case i.e. non-applimited here since
4552 	 * the next send will trigger us picking up the missing data.
4553 	 */
4554 	if (rack->r_ctl.rc_first_appl &&
4555 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
4556 	    rack->r_ctl.rc_app_limited_cnt &&
4557 	    (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
4558 	    ((rack->r_ctl.rc_first_appl->r_start - th_ack) >
4559 	     max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
4560 		/*
4561 		 * Yep there is enough outstanding to make a measurement here.
4562 		 */
4563 		struct rack_sendmap *rsm, fe;
4564 
4565 		tp->t_flags |= TF_GPUTINPROG;
4566 		rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
4567 		rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
4568 		tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4569 		rack->app_limited_needs_set = 0;
4570 		tp->gput_seq = th_ack;
4571 		if (rack->in_probe_rtt)
4572 			rack->measure_saw_probe_rtt = 1;
4573 		else if ((rack->measure_saw_probe_rtt) &&
4574 			 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
4575 			rack->measure_saw_probe_rtt = 0;
4576 		if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) {
4577 			/* There is a full window to gain info from */
4578 			tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
4579 		} else {
4580 			/* We can only measure up to the applimited point */
4581 			tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack);
4582 		}
4583 		/*
4584 		 * Now we need to find the timestamp of the send at tp->gput_seq
4585 		 * for the send based measurement.
4586 		 */
4587 		fe.r_start = tp->gput_seq;
4588 		rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4589 		if (rsm) {
4590 			/* Ok send-based limit is set */
4591 			if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
4592 				/*
4593 				 * Move back to include the earlier part
4594 				 * so our ack time lines up right (this may
4595 				 * make an overlapping measurement but thats
4596 				 * ok).
4597 				 */
4598 				tp->gput_seq = rsm->r_start;
4599 			}
4600 			if (rsm->r_flags & RACK_ACKED)
4601 				tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
4602 			else
4603 				rack->app_limited_needs_set = 1;
4604 			rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
4605 		} else {
4606 			/*
4607 			 * If we don't find the rsm due to some
4608 			 * send-limit set the current time, which
4609 			 * basically disables the send-limit.
4610 			 */
4611 			struct timeval tv;
4612 
4613 			microuptime(&tv);
4614 			rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
4615 		}
4616 		rack_log_pacing_delay_calc(rack,
4617 					   tp->gput_seq,
4618 					   tp->gput_ack,
4619 					   (uint64_t)rsm,
4620 					   tp->gput_ts,
4621 					   rack->r_ctl.rc_app_limited_cnt,
4622 					   9,
4623 					   __LINE__, NULL);
4624 	}
4625 }
4626 
4627 /*
4628  * CC wrapper hook functions
4629  */
4630 static void
4631 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
4632     uint16_t type, int32_t recovery)
4633 {
4634 	uint32_t prior_cwnd, acked;
4635 	struct tcp_log_buffer *lgb = NULL;
4636 	uint8_t labc_to_use;
4637 
4638 	INP_WLOCK_ASSERT(tp->t_inpcb);
4639 	tp->ccv->nsegs = nsegs;
4640 	acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una);
4641 	if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
4642 		uint32_t max;
4643 
4644 		max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
4645 		if (tp->ccv->bytes_this_ack > max) {
4646 			tp->ccv->bytes_this_ack = max;
4647 		}
4648 	}
4649 #ifdef STATS
4650 	stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
4651 	    ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
4652 #endif
4653 	if ((tp->t_flags & TF_GPUTINPROG) &&
4654 	    rack_enough_for_measurement(tp, rack, th_ack)) {
4655 		/* Measure the Goodput */
4656 		rack_do_goodput_measurement(tp, rack, th_ack, __LINE__);
4657 #ifdef NETFLIX_PEAKRATE
4658 		if ((type == CC_ACK) &&
4659 		    (tp->t_maxpeakrate)) {
4660 			/*
4661 			 * We update t_peakrate_thr. This gives us roughly
4662 			 * one update per round trip time. Note
4663 			 * it will only be used if pace_always is off i.e
4664 			 * we don't do this for paced flows.
4665 			 */
4666 			rack_update_peakrate_thr(tp);
4667 		}
4668 #endif
4669 	}
4670 	/* Which way our we limited, if not cwnd limited no advance in CA */
4671 	if (tp->snd_cwnd <= tp->snd_wnd)
4672 		tp->ccv->flags |= CCF_CWND_LIMITED;
4673 	else
4674 		tp->ccv->flags &= ~CCF_CWND_LIMITED;
4675 	if (tp->snd_cwnd > tp->snd_ssthresh) {
4676 		tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
4677 			 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
4678 		/* For the setting of a window past use the actual scwnd we are using */
4679 		if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
4680 			tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
4681 			tp->ccv->flags |= CCF_ABC_SENTAWND;
4682 		}
4683 	} else {
4684 		tp->ccv->flags &= ~CCF_ABC_SENTAWND;
4685 		tp->t_bytes_acked = 0;
4686 	}
4687 	prior_cwnd = tp->snd_cwnd;
4688 	if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
4689 	    (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf)))
4690 		labc_to_use = rack->rc_labc;
4691 	else
4692 		labc_to_use = rack_max_abc_post_recovery;
4693 	if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4694 		union tcp_log_stackspecific log;
4695 		struct timeval tv;
4696 
4697 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4698 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4699 		log.u_bbr.flex1 = th_ack;
4700 		log.u_bbr.flex2 = tp->ccv->flags;
4701 		log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4702 		log.u_bbr.flex4 = tp->ccv->nsegs;
4703 		log.u_bbr.flex5 = labc_to_use;
4704 		log.u_bbr.flex6 = prior_cwnd;
4705 		log.u_bbr.flex7 = V_tcp_do_newsack;
4706 		log.u_bbr.flex8 = 1;
4707 		lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4708 				     0, &log, false, NULL, NULL, 0, &tv);
4709 	}
4710 	if (CC_ALGO(tp)->ack_received != NULL) {
4711 		/* XXXLAS: Find a way to live without this */
4712 		tp->ccv->curack = th_ack;
4713 		tp->ccv->labc = labc_to_use;
4714 		tp->ccv->flags |= CCF_USE_LOCAL_ABC;
4715 		CC_ALGO(tp)->ack_received(tp->ccv, type);
4716 	}
4717 	if (lgb) {
4718 		lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
4719 	}
4720 	if (rack->r_must_retran) {
4721 		if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
4722 			/*
4723 			 * We now are beyond the rxt point so lets disable
4724 			 * the flag.
4725 			 */
4726 			rack->r_ctl.rc_out_at_rto = 0;
4727 			rack->r_must_retran = 0;
4728 		} else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
4729 			/*
4730 			 * Only decrement the rc_out_at_rto if the cwnd advances
4731 			 * at least a whole segment. Otherwise next time the peer
4732 			 * acks, we won't be able to send this generaly happens
4733 			 * when we are in Congestion Avoidance.
4734 			 */
4735 			if (acked <= rack->r_ctl.rc_out_at_rto){
4736 				rack->r_ctl.rc_out_at_rto -= acked;
4737 			} else {
4738 				rack->r_ctl.rc_out_at_rto = 0;
4739 			}
4740 		}
4741 	}
4742 #ifdef STATS
4743 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4744 #endif
4745 	if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4746 		rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4747 	}
4748 #ifdef NETFLIX_PEAKRATE
4749 	/* we enforce max peak rate if it is set and we are not pacing */
4750 	if ((rack->rc_always_pace == 0) &&
4751 	    tp->t_peakrate_thr &&
4752 	    (tp->snd_cwnd > tp->t_peakrate_thr)) {
4753 		tp->snd_cwnd = tp->t_peakrate_thr;
4754 	}
4755 #endif
4756 }
4757 
4758 static void
4759 tcp_rack_partialack(struct tcpcb *tp)
4760 {
4761 	struct tcp_rack *rack;
4762 
4763 	rack = (struct tcp_rack *)tp->t_fb_ptr;
4764 	INP_WLOCK_ASSERT(tp->t_inpcb);
4765 	/*
4766 	 * If we are doing PRR and have enough
4767 	 * room to send <or> we are pacing and prr
4768 	 * is disabled we will want to see if we
4769 	 * can send data (by setting r_wanted_output to
4770 	 * true).
4771 	 */
4772 	if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4773 	    rack->rack_no_prr)
4774 		rack->r_wanted_output = 1;
4775 }
4776 
4777 static void
4778 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
4779 {
4780 	struct tcp_rack *rack;
4781 	uint32_t orig_cwnd;
4782 
4783 	orig_cwnd = tp->snd_cwnd;
4784 	INP_WLOCK_ASSERT(tp->t_inpcb);
4785 	rack = (struct tcp_rack *)tp->t_fb_ptr;
4786 	/* only alert CC if we alerted when we entered */
4787 	if (CC_ALGO(tp)->post_recovery != NULL) {
4788 		tp->ccv->curack = th_ack;
4789 		CC_ALGO(tp)->post_recovery(tp->ccv);
4790 		if (tp->snd_cwnd < tp->snd_ssthresh) {
4791 			/*
4792 			 * Rack has burst control and pacing
4793 			 * so lets not set this any lower than
4794 			 * snd_ssthresh per RFC-6582 (option 2).
4795 			 */
4796 			tp->snd_cwnd = tp->snd_ssthresh;
4797 		}
4798 	}
4799 	if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4800 		union tcp_log_stackspecific log;
4801 		struct timeval tv;
4802 
4803 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4804 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4805 		log.u_bbr.flex1 = th_ack;
4806 		log.u_bbr.flex2 = tp->ccv->flags;
4807 		log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4808 		log.u_bbr.flex4 = tp->ccv->nsegs;
4809 		log.u_bbr.flex5 = V_tcp_abc_l_var;
4810 		log.u_bbr.flex6 = orig_cwnd;
4811 		log.u_bbr.flex7 = V_tcp_do_newsack;
4812 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
4813 		log.u_bbr.flex8 = 2;
4814 		tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4815 			       0, &log, false, NULL, NULL, 0, &tv);
4816 	}
4817 	if ((rack->rack_no_prr == 0) &&
4818 	    (rack->no_prr_addback == 0) &&
4819 	    (rack->r_ctl.rc_prr_sndcnt > 0)) {
4820 		/*
4821 		 * Suck the next prr cnt back into cwnd, but
4822 		 * only do that if we are not application limited.
4823 		 */
4824 		if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4825 			/*
4826 			 * We are allowed to add back to the cwnd the amount we did
4827 			 * not get out if:
4828 			 * a) no_prr_addback is off.
4829 			 * b) we are not app limited
4830 			 * c) we are doing prr
4831 			 * <and>
4832 			 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
4833 			 */
4834 			tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
4835 					    rack->r_ctl.rc_prr_sndcnt);
4836 		}
4837 		rack->r_ctl.rc_prr_sndcnt = 0;
4838 		rack_log_to_prr(rack, 1, 0);
4839 	}
4840 	rack_log_to_prr(rack, 14, orig_cwnd);
4841 	tp->snd_recover = tp->snd_una;
4842 	EXIT_RECOVERY(tp->t_flags);
4843 }
4844 
4845 static void
4846 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack)
4847 {
4848 	struct tcp_rack *rack;
4849 	uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
4850 
4851 	INP_WLOCK_ASSERT(tp->t_inpcb);
4852 #ifdef STATS
4853 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
4854 #endif
4855 	if (IN_RECOVERY(tp->t_flags) == 0) {
4856 		in_rec_at_entry = 0;
4857 		ssthresh_enter = tp->snd_ssthresh;
4858 		cwnd_enter = tp->snd_cwnd;
4859 	} else
4860 		in_rec_at_entry = 1;
4861 	rack = (struct tcp_rack *)tp->t_fb_ptr;
4862 	switch (type) {
4863 	case CC_NDUPACK:
4864 		tp->t_flags &= ~TF_WASFRECOVERY;
4865 		tp->t_flags &= ~TF_WASCRECOVERY;
4866 		if (!IN_FASTRECOVERY(tp->t_flags)) {
4867 			rack->r_ctl.rc_prr_delivered = 0;
4868 			rack->r_ctl.rc_prr_out = 0;
4869 			if (rack->rack_no_prr == 0) {
4870 				rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4871 				rack_log_to_prr(rack, 2, in_rec_at_entry);
4872 			}
4873 			rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4874 			tp->snd_recover = tp->snd_max;
4875 			if (tp->t_flags2 & TF2_ECN_PERMIT)
4876 				tp->t_flags2 |= TF2_ECN_SND_CWR;
4877 		}
4878 		break;
4879 	case CC_ECN:
4880 		if (!IN_CONGRECOVERY(tp->t_flags) ||
4881 		    /*
4882 		     * Allow ECN reaction on ACK to CWR, if
4883 		     * that data segment was also CE marked.
4884 		     */
4885 		    SEQ_GEQ(ack, tp->snd_recover)) {
4886 			EXIT_CONGRECOVERY(tp->t_flags);
4887 			KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4888 			tp->snd_recover = tp->snd_max + 1;
4889 			if (tp->t_flags2 & TF2_ECN_PERMIT)
4890 				tp->t_flags2 |= TF2_ECN_SND_CWR;
4891 		}
4892 		break;
4893 	case CC_RTO:
4894 		tp->t_dupacks = 0;
4895 		tp->t_bytes_acked = 0;
4896 		EXIT_RECOVERY(tp->t_flags);
4897 		tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4898 		    ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4899 		orig_cwnd = tp->snd_cwnd;
4900 		tp->snd_cwnd = ctf_fixed_maxseg(tp);
4901 		rack_log_to_prr(rack, 16, orig_cwnd);
4902 		if (tp->t_flags2 & TF2_ECN_PERMIT)
4903 			tp->t_flags2 |= TF2_ECN_SND_CWR;
4904 		break;
4905 	case CC_RTO_ERR:
4906 		KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4907 		/* RTO was unnecessary, so reset everything. */
4908 		tp->snd_cwnd = tp->snd_cwnd_prev;
4909 		tp->snd_ssthresh = tp->snd_ssthresh_prev;
4910 		tp->snd_recover = tp->snd_recover_prev;
4911 		if (tp->t_flags & TF_WASFRECOVERY) {
4912 			ENTER_FASTRECOVERY(tp->t_flags);
4913 			tp->t_flags &= ~TF_WASFRECOVERY;
4914 		}
4915 		if (tp->t_flags & TF_WASCRECOVERY) {
4916 			ENTER_CONGRECOVERY(tp->t_flags);
4917 			tp->t_flags &= ~TF_WASCRECOVERY;
4918 		}
4919 		tp->snd_nxt = tp->snd_max;
4920 		tp->t_badrxtwin = 0;
4921 		break;
4922 	}
4923 	if ((CC_ALGO(tp)->cong_signal != NULL)  &&
4924 	    (type != CC_RTO)){
4925 		tp->ccv->curack = ack;
4926 		CC_ALGO(tp)->cong_signal(tp->ccv, type);
4927 	}
4928 	if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
4929 		rack_log_to_prr(rack, 15, cwnd_enter);
4930 		rack->r_ctl.dsack_byte_cnt = 0;
4931 		rack->r_ctl.retran_during_recovery = 0;
4932 		rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
4933 		rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
4934 		rack->r_ent_rec_ns = 1;
4935 	}
4936 }
4937 
4938 static inline void
4939 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4940 {
4941 	uint32_t i_cwnd;
4942 
4943 	INP_WLOCK_ASSERT(tp->t_inpcb);
4944 
4945 #ifdef NETFLIX_STATS
4946 	KMOD_TCPSTAT_INC(tcps_idle_restarts);
4947 	if (tp->t_state == TCPS_ESTABLISHED)
4948 		KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4949 #endif
4950 	if (CC_ALGO(tp)->after_idle != NULL)
4951 		CC_ALGO(tp)->after_idle(tp->ccv);
4952 
4953 	if (tp->snd_cwnd == 1)
4954 		i_cwnd = tp->t_maxseg;		/* SYN(-ACK) lost */
4955 	else
4956 		i_cwnd = rc_init_window(rack);
4957 
4958 	/*
4959 	 * Being idle is no differnt than the initial window. If the cc
4960 	 * clamps it down below the initial window raise it to the initial
4961 	 * window.
4962 	 */
4963 	if (tp->snd_cwnd < i_cwnd) {
4964 		tp->snd_cwnd = i_cwnd;
4965 	}
4966 }
4967 
4968 /*
4969  * Indicate whether this ack should be delayed.  We can delay the ack if
4970  * following conditions are met:
4971  *	- There is no delayed ack timer in progress.
4972  *	- Our last ack wasn't a 0-sized window. We never want to delay
4973  *	  the ack that opens up a 0-sized window.
4974  *	- LRO wasn't used for this segment. We make sure by checking that the
4975  *	  segment size is not larger than the MSS.
4976  *	- Delayed acks are enabled or this is a half-synchronized T/TCP
4977  *	  connection.
4978  */
4979 #define DELAY_ACK(tp, tlen)			 \
4980 	(((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4981 	((tp->t_flags & TF_DELACK) == 0) &&	 \
4982 	(tlen <= tp->t_maxseg) &&		 \
4983 	(tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4984 
4985 static struct rack_sendmap *
4986 rack_find_lowest_rsm(struct tcp_rack *rack)
4987 {
4988 	struct rack_sendmap *rsm;
4989 
4990 	/*
4991 	 * Walk the time-order transmitted list looking for an rsm that is
4992 	 * not acked. This will be the one that was sent the longest time
4993 	 * ago that is still outstanding.
4994 	 */
4995 	TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4996 		if (rsm->r_flags & RACK_ACKED) {
4997 			continue;
4998 		}
4999 		goto finish;
5000 	}
5001 finish:
5002 	return (rsm);
5003 }
5004 
5005 static struct rack_sendmap *
5006 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5007 {
5008 	struct rack_sendmap *prsm;
5009 
5010 	/*
5011 	 * Walk the sequence order list backward until we hit and arrive at
5012 	 * the highest seq not acked. In theory when this is called it
5013 	 * should be the last segment (which it was not).
5014 	 */
5015 	counter_u64_add(rack_find_high, 1);
5016 	prsm = rsm;
5017 	RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
5018 		if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
5019 			continue;
5020 		}
5021 		return (prsm);
5022 	}
5023 	return (NULL);
5024 }
5025 
5026 static uint32_t
5027 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
5028 {
5029 	int32_t lro;
5030 	uint32_t thresh;
5031 
5032 	/*
5033 	 * lro is the flag we use to determine if we have seen reordering.
5034 	 * If it gets set we have seen reordering. The reorder logic either
5035 	 * works in one of two ways:
5036 	 *
5037 	 * If reorder-fade is configured, then we track the last time we saw
5038 	 * re-ordering occur. If we reach the point where enough time as
5039 	 * passed we no longer consider reordering has occuring.
5040 	 *
5041 	 * Or if reorder-face is 0, then once we see reordering we consider
5042 	 * the connection to alway be subject to reordering and just set lro
5043 	 * to 1.
5044 	 *
5045 	 * In the end if lro is non-zero we add the extra time for
5046 	 * reordering in.
5047 	 */
5048 	if (srtt == 0)
5049 		srtt = 1;
5050 	if (rack->r_ctl.rc_reorder_ts) {
5051 		if (rack->r_ctl.rc_reorder_fade) {
5052 			if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
5053 				lro = cts - rack->r_ctl.rc_reorder_ts;
5054 				if (lro == 0) {
5055 					/*
5056 					 * No time as passed since the last
5057 					 * reorder, mark it as reordering.
5058 					 */
5059 					lro = 1;
5060 				}
5061 			} else {
5062 				/* Negative time? */
5063 				lro = 0;
5064 			}
5065 			if (lro > rack->r_ctl.rc_reorder_fade) {
5066 				/* Turn off reordering seen too */
5067 				rack->r_ctl.rc_reorder_ts = 0;
5068 				lro = 0;
5069 			}
5070 		} else {
5071 			/* Reodering does not fade */
5072 			lro = 1;
5073 		}
5074 	} else {
5075 		lro = 0;
5076 	}
5077 	thresh = srtt + rack->r_ctl.rc_pkt_delay;
5078 	if (lro) {
5079 		/* It must be set, if not you get 1/4 rtt */
5080 		if (rack->r_ctl.rc_reorder_shift)
5081 			thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
5082 		else
5083 			thresh += (srtt >> 2);
5084 	} else {
5085 		thresh += 1;
5086 	}
5087 	/* We don't let the rack timeout be above a RTO */
5088 	if (thresh > rack->rc_tp->t_rxtcur) {
5089 		thresh = rack->rc_tp->t_rxtcur;
5090 	}
5091 	/* And we don't want it above the RTO max either */
5092 	if (thresh > rack_rto_max) {
5093 		thresh = rack_rto_max;
5094 	}
5095 	return (thresh);
5096 }
5097 
5098 static uint32_t
5099 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
5100 		     struct rack_sendmap *rsm, uint32_t srtt)
5101 {
5102 	struct rack_sendmap *prsm;
5103 	uint32_t thresh, len;
5104 	int segsiz;
5105 
5106 	if (srtt == 0)
5107 		srtt = 1;
5108 	if (rack->r_ctl.rc_tlp_threshold)
5109 		thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
5110 	else
5111 		thresh = (srtt * 2);
5112 
5113 	/* Get the previous sent packet, if any */
5114 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
5115 	counter_u64_add(rack_enter_tlp_calc, 1);
5116 	len = rsm->r_end - rsm->r_start;
5117 	if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
5118 		/* Exactly like the ID */
5119 		if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
5120 			uint32_t alt_thresh;
5121 			/*
5122 			 * Compensate for delayed-ack with the d-ack time.
5123 			 */
5124 			counter_u64_add(rack_used_tlpmethod, 1);
5125 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5126 			if (alt_thresh > thresh)
5127 				thresh = alt_thresh;
5128 		}
5129 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
5130 		/* 2.1 behavior */
5131 		prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
5132 		if (prsm && (len <= segsiz)) {
5133 			/*
5134 			 * Two packets outstanding, thresh should be (2*srtt) +
5135 			 * possible inter-packet delay (if any).
5136 			 */
5137 			uint32_t inter_gap = 0;
5138 			int idx, nidx;
5139 
5140 			counter_u64_add(rack_used_tlpmethod, 1);
5141 			idx = rsm->r_rtr_cnt - 1;
5142 			nidx = prsm->r_rtr_cnt - 1;
5143 			if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
5144 				/* Yes it was sent later (or at the same time) */
5145 				inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
5146 			}
5147 			thresh += inter_gap;
5148 		} else if (len <= segsiz) {
5149 			/*
5150 			 * Possibly compensate for delayed-ack.
5151 			 */
5152 			uint32_t alt_thresh;
5153 
5154 			counter_u64_add(rack_used_tlpmethod2, 1);
5155 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5156 			if (alt_thresh > thresh)
5157 				thresh = alt_thresh;
5158 		}
5159 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
5160 		/* 2.2 behavior */
5161 		if (len <= segsiz) {
5162 			uint32_t alt_thresh;
5163 			/*
5164 			 * Compensate for delayed-ack with the d-ack time.
5165 			 */
5166 			counter_u64_add(rack_used_tlpmethod, 1);
5167 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5168 			if (alt_thresh > thresh)
5169 				thresh = alt_thresh;
5170 		}
5171 	}
5172 	/* Not above an RTO */
5173 	if (thresh > tp->t_rxtcur) {
5174 		thresh = tp->t_rxtcur;
5175 	}
5176 	/* Not above a RTO max */
5177 	if (thresh > rack_rto_max) {
5178 		thresh = rack_rto_max;
5179 	}
5180 	/* Apply user supplied min TLP */
5181 	if (thresh < rack_tlp_min) {
5182 		thresh = rack_tlp_min;
5183 	}
5184 	return (thresh);
5185 }
5186 
5187 static uint32_t
5188 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
5189 {
5190 	/*
5191 	 * We want the rack_rtt which is the
5192 	 * last rtt we measured. However if that
5193 	 * does not exist we fallback to the srtt (which
5194 	 * we probably will never do) and then as a last
5195 	 * resort we use RACK_INITIAL_RTO if no srtt is
5196 	 * yet set.
5197 	 */
5198 	if (rack->rc_rack_rtt)
5199 		return (rack->rc_rack_rtt);
5200 	else if (tp->t_srtt == 0)
5201 		return (RACK_INITIAL_RTO);
5202 	return (tp->t_srtt);
5203 }
5204 
5205 static struct rack_sendmap *
5206 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
5207 {
5208 	/*
5209 	 * Check to see that we don't need to fall into recovery. We will
5210 	 * need to do so if our oldest transmit is past the time we should
5211 	 * have had an ack.
5212 	 */
5213 	struct tcp_rack *rack;
5214 	struct rack_sendmap *rsm;
5215 	int32_t idx;
5216 	uint32_t srtt, thresh;
5217 
5218 	rack = (struct tcp_rack *)tp->t_fb_ptr;
5219 	if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
5220 		return (NULL);
5221 	}
5222 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5223 	if (rsm == NULL)
5224 		return (NULL);
5225 
5226 	if (rsm->r_flags & RACK_ACKED) {
5227 		rsm = rack_find_lowest_rsm(rack);
5228 		if (rsm == NULL)
5229 			return (NULL);
5230 	}
5231 	idx = rsm->r_rtr_cnt - 1;
5232 	srtt = rack_grab_rtt(tp, rack);
5233 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
5234 	if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
5235 		return (NULL);
5236 	}
5237 	if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
5238 		return (NULL);
5239 	}
5240 	/* Ok if we reach here we are over-due and this guy can be sent */
5241 	if (IN_RECOVERY(tp->t_flags) == 0) {
5242 		/*
5243 		 * For the one that enters us into recovery record undo
5244 		 * info.
5245 		 */
5246 		rack->r_ctl.rc_rsm_start = rsm->r_start;
5247 		rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
5248 		rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
5249 	}
5250 	rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
5251 	return (rsm);
5252 }
5253 
5254 static uint32_t
5255 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
5256 {
5257 	int32_t t;
5258 	int32_t tt;
5259 	uint32_t ret_val;
5260 
5261 	t = (tp->t_srtt + (tp->t_rttvar << 2));
5262 	RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
5263  	    rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
5264 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
5265 		tp->t_rxtshift++;
5266 	rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
5267 	ret_val = (uint32_t)tt;
5268 	return (ret_val);
5269 }
5270 
5271 static uint32_t
5272 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
5273 {
5274 	/*
5275 	 * Start the FR timer, we do this based on getting the first one in
5276 	 * the rc_tmap. Note that if its NULL we must stop the timer. in all
5277 	 * events we need to stop the running timer (if its running) before
5278 	 * starting the new one.
5279 	 */
5280 	uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
5281 	uint32_t srtt_cur;
5282 	int32_t idx;
5283 	int32_t is_tlp_timer = 0;
5284 	struct rack_sendmap *rsm;
5285 
5286 	if (rack->t_timers_stopped) {
5287 		/* All timers have been stopped none are to run */
5288 		return (0);
5289 	}
5290 	if (rack->rc_in_persist) {
5291 		/* We can't start any timer in persists */
5292 		return (rack_get_persists_timer_val(tp, rack));
5293 	}
5294 	rack->rc_on_min_to = 0;
5295 	if ((tp->t_state < TCPS_ESTABLISHED) ||
5296 	    ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
5297 		goto activate_rxt;
5298 	}
5299 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5300 	if ((rsm == NULL) || sup_rack) {
5301 		/* Nothing on the send map or no rack */
5302 activate_rxt:
5303 		time_since_sent = 0;
5304 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5305 		if (rsm) {
5306 			/*
5307 			 * Should we discount the RTX timer any?
5308 			 *
5309 			 * We want to discount it the smallest amount.
5310 			 * If a timer (Rack/TLP or RXT) has gone off more
5311 			 * recently thats the discount we want to use (now - timer time).
5312 			 * If the retransmit of the oldest packet was more recent then
5313 			 * we want to use that (now - oldest-packet-last_transmit_time).
5314 			 *
5315 			 */
5316 			idx = rsm->r_rtr_cnt - 1;
5317 			if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
5318 				tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5319 			else
5320 				tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5321 			if (TSTMP_GT(cts, tstmp_touse))
5322 			    time_since_sent = cts - tstmp_touse;
5323 		}
5324 		if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
5325 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
5326 			to = tp->t_rxtcur;
5327 			if (to > time_since_sent)
5328 				to -= time_since_sent;
5329 			else
5330 				to = rack->r_ctl.rc_min_to;
5331 			if (to == 0)
5332 				to = 1;
5333 			/* Special case for KEEPINIT */
5334 			if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
5335 			    (TP_KEEPINIT(tp) != 0) &&
5336 			    rsm) {
5337 				/*
5338 				 * We have to put a ceiling on the rxt timer
5339 				 * of the keep-init timeout.
5340 				 */
5341 				uint32_t max_time, red;
5342 
5343 				max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
5344 				if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
5345 					red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
5346 					if (red < max_time)
5347 						max_time -= red;
5348 					else
5349 						max_time = 1;
5350 				}
5351 				/* Reduce timeout to the keep value if needed */
5352 				if (max_time < to)
5353 					to = max_time;
5354 			}
5355 			return (to);
5356 		}
5357 		return (0);
5358 	}
5359 	if (rsm->r_flags & RACK_ACKED) {
5360 		rsm = rack_find_lowest_rsm(rack);
5361 		if (rsm == NULL) {
5362 			/* No lowest? */
5363 			goto activate_rxt;
5364 		}
5365 	}
5366 	if (rack->sack_attack_disable) {
5367 		/*
5368 		 * We don't want to do
5369 		 * any TLP's if you are an attacker.
5370 		 * Though if you are doing what
5371 		 * is expected you may still have
5372 		 * SACK-PASSED marks.
5373 		 */
5374 		goto activate_rxt;
5375 	}
5376 	/* Convert from ms to usecs */
5377 	if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
5378 		if ((tp->t_flags & TF_SENTFIN) &&
5379 		    ((tp->snd_max - tp->snd_una) == 1) &&
5380 		    (rsm->r_flags & RACK_HAS_FIN)) {
5381 			/*
5382 			 * We don't start a rack timer if all we have is a
5383 			 * FIN outstanding.
5384 			 */
5385 			goto activate_rxt;
5386 		}
5387 		if ((rack->use_rack_rr == 0) &&
5388 		    (IN_FASTRECOVERY(tp->t_flags)) &&
5389 		    (rack->rack_no_prr == 0) &&
5390 		     (rack->r_ctl.rc_prr_sndcnt  < ctf_fixed_maxseg(tp))) {
5391 			/*
5392 			 * We are not cheating, in recovery  and
5393 			 * not enough ack's to yet get our next
5394 			 * retransmission out.
5395 			 *
5396 			 * Note that classified attackers do not
5397 			 * get to use the rack-cheat.
5398 			 */
5399 			goto activate_tlp;
5400 		}
5401 		srtt = rack_grab_rtt(tp, rack);
5402 		thresh = rack_calc_thresh_rack(rack, srtt, cts);
5403 		idx = rsm->r_rtr_cnt - 1;
5404 		exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
5405 		if (SEQ_GEQ(exp, cts)) {
5406 			to = exp - cts;
5407 			if (to < rack->r_ctl.rc_min_to) {
5408 				to = rack->r_ctl.rc_min_to;
5409 				if (rack->r_rr_config == 3)
5410 					rack->rc_on_min_to = 1;
5411 			}
5412 		} else {
5413 			to = rack->r_ctl.rc_min_to;
5414 			if (rack->r_rr_config == 3)
5415 				rack->rc_on_min_to = 1;
5416 		}
5417 	} else {
5418 		/* Ok we need to do a TLP not RACK */
5419 activate_tlp:
5420 		if ((rack->rc_tlp_in_progress != 0) &&
5421 		    (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
5422 			/*
5423 			 * The previous send was a TLP and we have sent
5424 			 * N TLP's without sending new data.
5425 			 */
5426 			goto activate_rxt;
5427 		}
5428 		rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
5429 		if (rsm == NULL) {
5430 			/* We found no rsm to TLP with. */
5431 			goto activate_rxt;
5432 		}
5433 		if (rsm->r_flags & RACK_HAS_FIN) {
5434 			/* If its a FIN we dont do TLP */
5435 			rsm = NULL;
5436 			goto activate_rxt;
5437 		}
5438 		idx = rsm->r_rtr_cnt - 1;
5439 		time_since_sent = 0;
5440 		if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
5441 			tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5442 		else
5443 			tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5444 		if (TSTMP_GT(cts, tstmp_touse))
5445 		    time_since_sent = cts - tstmp_touse;
5446 		is_tlp_timer = 1;
5447 		if (tp->t_srtt) {
5448 			if ((rack->rc_srtt_measure_made == 0) &&
5449 			    (tp->t_srtt == 1)) {
5450 				/*
5451 				 * If another stack as run and set srtt to 1,
5452 				 * then the srtt was 0, so lets use the initial.
5453 				 */
5454 				srtt = RACK_INITIAL_RTO;
5455 			} else {
5456 				srtt_cur = tp->t_srtt;
5457 				srtt = srtt_cur;
5458 			}
5459 		} else
5460 			srtt = RACK_INITIAL_RTO;
5461 		/*
5462 		 * If the SRTT is not keeping up and the
5463 		 * rack RTT has spiked we want to use
5464 		 * the last RTT not the smoothed one.
5465 		 */
5466 		if (rack_tlp_use_greater &&
5467 		    tp->t_srtt &&
5468 		    (srtt < rack_grab_rtt(tp, rack))) {
5469 			srtt = rack_grab_rtt(tp, rack);
5470 		}
5471 		thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
5472 		if (thresh > time_since_sent) {
5473 			to = thresh - time_since_sent;
5474 		} else {
5475 			to = rack->r_ctl.rc_min_to;
5476 			rack_log_alt_to_to_cancel(rack,
5477 						  thresh,		/* flex1 */
5478 						  time_since_sent,	/* flex2 */
5479 						  tstmp_touse,		/* flex3 */
5480 						  rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
5481 						  (uint32_t)rsm->r_tim_lastsent[idx],
5482 						  srtt,
5483 						  idx, 99);
5484 		}
5485 		if (to < rack_tlp_min) {
5486 			to = rack_tlp_min;
5487 		}
5488 		if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
5489 			/*
5490 			 * If the TLP time works out to larger than the max
5491 			 * RTO lets not do TLP.. just RTO.
5492 			 */
5493 			goto activate_rxt;
5494 		}
5495 	}
5496 	if (is_tlp_timer == 0) {
5497 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
5498 	} else {
5499 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
5500 	}
5501 	if (to == 0)
5502 		to = 1;
5503 	return (to);
5504 }
5505 
5506 static void
5507 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5508 {
5509 	if (rack->rc_in_persist == 0) {
5510 		if (tp->t_flags & TF_GPUTINPROG) {
5511 			/*
5512 			 * Stop the goodput now, the calling of the
5513 			 * measurement function clears the flag.
5514 			 */
5515 			rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__);
5516 		}
5517 #ifdef NETFLIX_SHARED_CWND
5518 		if (rack->r_ctl.rc_scw) {
5519 			tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5520 			rack->rack_scwnd_is_idle = 1;
5521 		}
5522 #endif
5523 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
5524 		if (rack->r_ctl.rc_went_idle_time == 0)
5525 			rack->r_ctl.rc_went_idle_time = 1;
5526 		rack_timer_cancel(tp, rack, cts, __LINE__);
5527 		tp->t_rxtshift = 0;
5528 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5529 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5530 		rack->rc_in_persist = 1;
5531 	}
5532 }
5533 
5534 static void
5535 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5536 {
5537 	if (rack->rc_inp->inp_in_hpts) {
5538 		tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5539 		rack->r_ctl.rc_hpts_flags = 0;
5540 	}
5541 #ifdef NETFLIX_SHARED_CWND
5542 	if (rack->r_ctl.rc_scw) {
5543 		tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5544 		rack->rack_scwnd_is_idle = 0;
5545 	}
5546 #endif
5547 	if (rack->rc_gp_dyn_mul &&
5548 	    (rack->use_fixed_rate == 0) &&
5549 	    (rack->rc_always_pace)) {
5550 		/*
5551 		 * Do we count this as if a probe-rtt just
5552 		 * finished?
5553 		 */
5554 		uint32_t time_idle, idle_min;
5555 
5556 		time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
5557 		idle_min = rack_min_probertt_hold;
5558 		if (rack_probertt_gpsrtt_cnt_div) {
5559 			uint64_t extra;
5560 			extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
5561 				(uint64_t)rack_probertt_gpsrtt_cnt_mul;
5562 			extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
5563 			idle_min += (uint32_t)extra;
5564 		}
5565 		if (time_idle >= idle_min) {
5566 			/* Yes, we count it as a probe-rtt. */
5567 			uint32_t us_cts;
5568 
5569 			us_cts = tcp_get_usecs(NULL);
5570 			if (rack->in_probe_rtt == 0) {
5571 				rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
5572 				rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
5573 				rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
5574 				rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
5575 			} else {
5576 				rack_exit_probertt(rack, us_cts);
5577 			}
5578 		}
5579 	}
5580 	rack->rc_in_persist = 0;
5581 	rack->r_ctl.rc_went_idle_time = 0;
5582 	tp->t_rxtshift = 0;
5583 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5584 	   rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
5585 	rack->r_ctl.rc_agg_delayed = 0;
5586 	rack->r_early = 0;
5587 	rack->r_late = 0;
5588 	rack->r_ctl.rc_agg_early = 0;
5589 }
5590 
5591 static void
5592 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
5593 		   struct hpts_diag *diag, struct timeval *tv)
5594 {
5595 	if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5596 		union tcp_log_stackspecific log;
5597 
5598 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5599 		log.u_bbr.flex1 = diag->p_nxt_slot;
5600 		log.u_bbr.flex2 = diag->p_cur_slot;
5601 		log.u_bbr.flex3 = diag->slot_req;
5602 		log.u_bbr.flex4 = diag->inp_hptsslot;
5603 		log.u_bbr.flex5 = diag->slot_remaining;
5604 		log.u_bbr.flex6 = diag->need_new_to;
5605 		log.u_bbr.flex7 = diag->p_hpts_active;
5606 		log.u_bbr.flex8 = diag->p_on_min_sleep;
5607 		/* Hijack other fields as needed */
5608 		log.u_bbr.epoch = diag->have_slept;
5609 		log.u_bbr.lt_epoch = diag->yet_to_sleep;
5610 		log.u_bbr.pkts_out = diag->co_ret;
5611 		log.u_bbr.applimited = diag->hpts_sleep_time;
5612 		log.u_bbr.delivered = diag->p_prev_slot;
5613 		log.u_bbr.inflight = diag->p_runningtick;
5614 		log.u_bbr.bw_inuse = diag->wheel_tick;
5615 		log.u_bbr.rttProp = diag->wheel_cts;
5616 		log.u_bbr.timeStamp = cts;
5617 		log.u_bbr.delRate = diag->maxticks;
5618 		log.u_bbr.cur_del_rate = diag->p_curtick;
5619 		log.u_bbr.cur_del_rate <<= 32;
5620 		log.u_bbr.cur_del_rate |= diag->p_lasttick;
5621 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
5622 		    &rack->rc_inp->inp_socket->so_rcv,
5623 		    &rack->rc_inp->inp_socket->so_snd,
5624 		    BBR_LOG_HPTSDIAG, 0,
5625 		    0, &log, false, tv);
5626 	}
5627 
5628 }
5629 
5630 static void
5631 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
5632 {
5633 	if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5634 		union tcp_log_stackspecific log;
5635 		struct timeval tv;
5636 
5637 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5638 		log.u_bbr.flex1 = sb->sb_flags;
5639 		log.u_bbr.flex2 = len;
5640 		log.u_bbr.flex3 = sb->sb_state;
5641 		log.u_bbr.flex8 = type;
5642 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5643 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
5644 		    &rack->rc_inp->inp_socket->so_rcv,
5645 		    &rack->rc_inp->inp_socket->so_snd,
5646 		    TCP_LOG_SB_WAKE, 0,
5647 		    len, &log, false, &tv);
5648 	}
5649 }
5650 
5651 static void
5652 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
5653       int32_t slot, uint32_t tot_len_this_send, int sup_rack)
5654 {
5655 	struct hpts_diag diag;
5656 	struct inpcb *inp;
5657 	struct timeval tv;
5658 	uint32_t delayed_ack = 0;
5659 	uint32_t hpts_timeout;
5660 	uint32_t entry_slot = slot;
5661 	uint8_t stopped;
5662 	uint32_t left = 0;
5663 	uint32_t us_cts;
5664 
5665 	inp = tp->t_inpcb;
5666 	if ((tp->t_state == TCPS_CLOSED) ||
5667 	    (tp->t_state == TCPS_LISTEN)) {
5668 		return;
5669 	}
5670 	if (inp->inp_in_hpts) {
5671 		/* Already on the pacer */
5672 		return;
5673 	}
5674 	stopped = rack->rc_tmr_stopped;
5675 	if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
5676 		left = rack->r_ctl.rc_timer_exp - cts;
5677 	}
5678 	rack->r_ctl.rc_timer_exp = 0;
5679 	rack->r_ctl.rc_hpts_flags = 0;
5680 	us_cts = tcp_get_usecs(&tv);
5681 	/* Now early/late accounting */
5682 	rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL);
5683 	if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
5684 		/*
5685 		 * We have a early carry over set,
5686 		 * we can always add more time so we
5687 		 * can always make this compensation.
5688 		 *
5689 		 * Note if ack's are allowed to wake us do not
5690 		 * penalize the next timer for being awoke
5691 		 * by an ack aka the rc_agg_early (non-paced mode).
5692 		 */
5693 		slot += rack->r_ctl.rc_agg_early;
5694 		rack->r_early = 0;
5695 		rack->r_ctl.rc_agg_early = 0;
5696 	}
5697 	if (rack->r_late) {
5698 		/*
5699 		 * This is harder, we can
5700 		 * compensate some but it
5701 		 * really depends on what
5702 		 * the current pacing time is.
5703 		 */
5704 		if (rack->r_ctl.rc_agg_delayed >= slot) {
5705 			/*
5706 			 * We can't compensate for it all.
5707 			 * And we have to have some time
5708 			 * on the clock. We always have a min
5709 			 * 10 slots (10 x 10 i.e. 100 usecs).
5710 			 */
5711 			if (slot <= HPTS_TICKS_PER_USEC) {
5712 				/* We gain delay */
5713 				rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot);
5714 				slot = HPTS_TICKS_PER_USEC;
5715 			} else {
5716 				/* We take off some */
5717 				rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC);
5718 				slot = HPTS_TICKS_PER_USEC;
5719 			}
5720 		} else {
5721 			slot -= rack->r_ctl.rc_agg_delayed;
5722 			rack->r_ctl.rc_agg_delayed = 0;
5723 			/* Make sure we have 100 useconds at minimum */
5724 			if (slot < HPTS_TICKS_PER_USEC) {
5725 				rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot;
5726 				slot = HPTS_TICKS_PER_USEC;
5727 			}
5728 			if (rack->r_ctl.rc_agg_delayed == 0)
5729 				rack->r_late = 0;
5730 		}
5731 	}
5732 	if (slot) {
5733 		/* We are pacing too */
5734 		rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
5735 	}
5736 	hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
5737 #ifdef NETFLIX_EXP_DETECTION
5738 	if (rack->sack_attack_disable &&
5739 	    (slot < tcp_sad_pacing_interval)) {
5740 		/*
5741 		 * We have a potential attacker on
5742 		 * the line. We have possibly some
5743 		 * (or now) pacing time set. We want to
5744 		 * slow down the processing of sacks by some
5745 		 * amount (if it is an attacker). Set the default
5746 		 * slot for attackers in place (unless the orginal
5747 		 * interval is longer). Its stored in
5748 		 * micro-seconds, so lets convert to msecs.
5749 		 */
5750 		slot = tcp_sad_pacing_interval;
5751 	}
5752 #endif
5753 	if (tp->t_flags & TF_DELACK) {
5754 		delayed_ack = TICKS_2_USEC(tcp_delacktime);
5755 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
5756 	}
5757 	if (delayed_ack && ((hpts_timeout == 0) ||
5758 			    (delayed_ack < hpts_timeout)))
5759 		hpts_timeout = delayed_ack;
5760 	else
5761 		rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5762 	/*
5763 	 * If no timers are going to run and we will fall off the hptsi
5764 	 * wheel, we resort to a keep-alive timer if its configured.
5765 	 */
5766 	if ((hpts_timeout == 0) &&
5767 	    (slot == 0)) {
5768 		if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5769 		    (tp->t_state <= TCPS_CLOSING)) {
5770 			/*
5771 			 * Ok we have no timer (persists, rack, tlp, rxt  or
5772 			 * del-ack), we don't have segments being paced. So
5773 			 * all that is left is the keepalive timer.
5774 			 */
5775 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
5776 				/* Get the established keep-alive time */
5777 				hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
5778 			} else {
5779 				/*
5780 				 * Get the initial setup keep-alive time,
5781 				 * note that this is probably not going to
5782 				 * happen, since rack will be running a rxt timer
5783 				 * if a SYN of some sort is outstanding. It is
5784 				 * actually handled in rack_timeout_rxt().
5785 				 */
5786 				hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
5787 			}
5788 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
5789 			if (rack->in_probe_rtt) {
5790 				/*
5791 				 * We want to instead not wake up a long time from
5792 				 * now but to wake up about the time we would
5793 				 * exit probe-rtt and initiate a keep-alive ack.
5794 				 * This will get us out of probe-rtt and update
5795 				 * our min-rtt.
5796 				 */
5797 				hpts_timeout = rack_min_probertt_hold;
5798 			}
5799 		}
5800 	}
5801 	if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
5802 	    (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
5803 		/*
5804 		 * RACK, TLP, persists and RXT timers all are restartable
5805 		 * based on actions input .. i.e we received a packet (ack
5806 		 * or sack) and that changes things (rw, or snd_una etc).
5807 		 * Thus we can restart them with a new value. For
5808 		 * keep-alive, delayed_ack we keep track of what was left
5809 		 * and restart the timer with a smaller value.
5810 		 */
5811 		if (left < hpts_timeout)
5812 			hpts_timeout = left;
5813 	}
5814 	if (hpts_timeout) {
5815 		/*
5816 		 * Hack alert for now we can't time-out over 2,147,483
5817 		 * seconds (a bit more than 596 hours), which is probably ok
5818 		 * :).
5819 		 */
5820 		if (hpts_timeout > 0x7ffffffe)
5821 			hpts_timeout = 0x7ffffffe;
5822 		rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
5823 	}
5824 	rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL);
5825 	if ((rack->gp_ready == 0) &&
5826 	    (rack->use_fixed_rate == 0) &&
5827 	    (hpts_timeout < slot) &&
5828 	    (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
5829 		/*
5830 		 * We have no good estimate yet for the
5831 		 * old clunky burst mitigation or the
5832 		 * real pacing. And the tlp or rxt is smaller
5833 		 * than the pacing calculation. Lets not
5834 		 * pace that long since we know the calculation
5835 		 * so far is not accurate.
5836 		 */
5837 		slot = hpts_timeout;
5838 	}
5839 	rack->r_ctl.last_pacing_time = slot;
5840 	/**
5841 	 * Turn off all the flags for queuing by default. The
5842 	 * flags have important meanings to what happens when
5843 	 * LRO interacts with the transport. Most likely (by default now)
5844 	 * mbuf_queueing and ack compression are on. So the transport
5845 	 * has a couple of flags that control what happens (if those
5846 	 * are not on then these flags won't have any effect since it
5847 	 * won't go through the queuing LRO path).
5848 	 *
5849 	 * INP_MBUF_QUEUE_READY - This flags says that I am busy
5850 	 *                        pacing output, so don't disturb. But
5851 	 *                        it also means LRO can wake me if there
5852 	 *                        is a SACK arrival.
5853 	 *
5854 	 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
5855 	 *                       with the above flag (QUEUE_READY) and
5856 	 *                       when present it says don't even wake me
5857 	 *                       if a SACK arrives.
5858 	 *
5859 	 * The idea behind these flags is that if we are pacing we
5860 	 * set the MBUF_QUEUE_READY and only get woken up if
5861 	 * a SACK arrives (which could change things) or if
5862 	 * our pacing timer expires. If, however, we have a rack
5863 	 * timer running, then we don't even want a sack to wake
5864 	 * us since the rack timer has to expire before we can send.
5865 	 *
5866 	 * Other cases should usually have none of the flags set
5867 	 * so LRO can call into us.
5868 	 */
5869 	inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5870 	if (slot) {
5871 		rack->r_ctl.rc_last_output_to = us_cts + slot;
5872 		/*
5873 		 * A pacing timer (slot) is being set, in
5874 		 * such a case we cannot send (we are blocked by
5875 		 * the timer). So lets tell LRO that it should not
5876 		 * wake us unless there is a SACK. Note this only
5877 		 * will be effective if mbuf queueing is on or
5878 		 * compressed acks are being processed.
5879 		 */
5880 		inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5881 		/*
5882 		 * But wait if we have a Rack timer running
5883 		 * even a SACK should not disturb us (with
5884 		 * the exception of r_rr_config 3).
5885 		 */
5886 		if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5887 		    (rack->r_rr_config != 3))
5888 			inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5889 		if (rack->rc_ack_can_sendout_data) {
5890 			/*
5891 			 * Ahh but wait, this is that special case
5892 			 * where the pacing timer can be disturbed
5893 			 * backout the changes (used for non-paced
5894 			 * burst limiting).
5895 			 */
5896 			inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5897 		}
5898 		if ((rack->use_rack_rr) &&
5899 		    (rack->r_rr_config < 2) &&
5900 		    ((hpts_timeout) && (hpts_timeout < slot))) {
5901 			/*
5902 			 * Arrange for the hpts to kick back in after the
5903 			 * t-o if the t-o does not cause a send.
5904 			 */
5905 			(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5906 						   __LINE__, &diag);
5907 			rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5908 			rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5909 		} else {
5910 			(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
5911 						   __LINE__, &diag);
5912 			rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5913 			rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5914 		}
5915 	} else if (hpts_timeout) {
5916 		/*
5917 		 * With respect to inp_flags2 here, lets let any new acks wake
5918 		 * us up here. Since we are not pacing (no pacing timer), output
5919 		 * can happen so we should let it. If its a Rack timer, then any inbound
5920 		 * packet probably won't change the sending (we will be blocked)
5921 		 * but it may change the prr stats so letting it in (the set defaults
5922 		 * at the start of this block) are good enough.
5923 		 */
5924 		(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5925 					   __LINE__, &diag);
5926 		rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5927 		rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5928 	} else {
5929 		/* No timer starting */
5930 #ifdef INVARIANTS
5931 		if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5932 			panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5933 			    tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5934 		}
5935 #endif
5936 	}
5937 	rack->rc_tmr_stopped = 0;
5938 	if (slot)
5939 		rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5940 }
5941 
5942 /*
5943  * RACK Timer, here we simply do logging and house keeping.
5944  * the normal rack_output() function will call the
5945  * appropriate thing to check if we need to do a RACK retransmit.
5946  * We return 1, saying don't proceed with rack_output only
5947  * when all timers have been stopped (destroyed PCB?).
5948  */
5949 static int
5950 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5951 {
5952 	/*
5953 	 * This timer simply provides an internal trigger to send out data.
5954 	 * The check_recovery_mode call will see if there are needed
5955 	 * retransmissions, if so we will enter fast-recovery. The output
5956 	 * call may or may not do the same thing depending on sysctl
5957 	 * settings.
5958 	 */
5959 	struct rack_sendmap *rsm;
5960 
5961 	if (tp->t_timers->tt_flags & TT_STOPPED) {
5962 		return (1);
5963 	}
5964 	counter_u64_add(rack_to_tot, 1);
5965 	if (rack->r_state && (rack->r_state != tp->t_state))
5966 		rack_set_state(tp, rack);
5967 	rack->rc_on_min_to = 0;
5968 	rsm = rack_check_recovery_mode(tp, cts);
5969 	rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5970 	if (rsm) {
5971 		rack->r_ctl.rc_resend = rsm;
5972 		rack->r_timer_override = 1;
5973 		if (rack->use_rack_rr) {
5974 			/*
5975 			 * Don't accumulate extra pacing delay
5976 			 * we are allowing the rack timer to
5977 			 * over-ride pacing i.e. rrr takes precedence
5978 			 * if the pacing interval is longer than the rrr
5979 			 * time (in other words we get the min pacing
5980 			 * time versus rrr pacing time).
5981 			 */
5982 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5983 		}
5984 	}
5985 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5986 	if (rsm == NULL) {
5987 		/* restart a timer and return 1 */
5988 		rack_start_hpts_timer(rack, tp, cts,
5989 				      0, 0, 0);
5990 		return (1);
5991 	}
5992 	return (0);
5993 }
5994 
5995 static void
5996 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
5997 {
5998 	if (rsm->m->m_len > rsm->orig_m_len) {
5999 		/*
6000 		 * Mbuf grew, caused by sbcompress, our offset does
6001 		 * not change.
6002 		 */
6003 		rsm->orig_m_len = rsm->m->m_len;
6004 	} else if (rsm->m->m_len < rsm->orig_m_len) {
6005 		/*
6006 		 * Mbuf shrank, trimmed off the top by an ack, our
6007 		 * offset changes.
6008 		 */
6009 		rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
6010 		rsm->orig_m_len = rsm->m->m_len;
6011 	}
6012 }
6013 
6014 static void
6015 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
6016 {
6017 	struct mbuf *m;
6018 	uint32_t soff;
6019 
6020 	if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) {
6021 		/* Fix up the orig_m_len and possibly the mbuf offset */
6022 		rack_adjust_orig_mlen(src_rsm);
6023 	}
6024 	m = src_rsm->m;
6025 	soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
6026 	while (soff >= m->m_len) {
6027 		/* Move out past this mbuf */
6028 		soff -= m->m_len;
6029 		m = m->m_next;
6030 		KASSERT((m != NULL),
6031 			("rsm:%p nrsm:%p hit at soff:%u null m",
6032 			 src_rsm, rsm, soff));
6033 	}
6034 	rsm->m = m;
6035 	rsm->soff = soff;
6036 	rsm->orig_m_len = m->m_len;
6037 }
6038 
6039 static __inline void
6040 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
6041 	       struct rack_sendmap *rsm, uint32_t start)
6042 {
6043 	int idx;
6044 
6045 	nrsm->r_start = start;
6046 	nrsm->r_end = rsm->r_end;
6047 	nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
6048 	nrsm->r_flags = rsm->r_flags;
6049 	nrsm->r_dupack = rsm->r_dupack;
6050 	nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
6051 	nrsm->r_rtr_bytes = 0;
6052 	rsm->r_end = nrsm->r_start;
6053 	nrsm->r_just_ret = rsm->r_just_ret;
6054 	for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
6055 		nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
6056 	}
6057 	/* Now if we have SYN flag we keep it on the left edge */
6058 	if (nrsm->r_flags & RACK_HAS_SYN)
6059 		nrsm->r_flags &= ~RACK_HAS_SYN;
6060 	/* Now if we have a FIN flag we keep it on the right edge */
6061 	if (rsm->r_flags & RACK_HAS_FIN)
6062 		rsm->r_flags &= ~RACK_HAS_FIN;
6063 	/* Push bit must go to the right edge as well */
6064 	if (rsm->r_flags & RACK_HAD_PUSH)
6065 		rsm->r_flags &= ~RACK_HAD_PUSH;
6066 
6067 	/*
6068 	 * Now we need to find nrsm's new location in the mbuf chain
6069 	 * we basically calculate a new offset, which is soff +
6070 	 * how much is left in original rsm. Then we walk out the mbuf
6071 	 * chain to find the righ postion, it may be the same mbuf
6072 	 * or maybe not.
6073 	 */
6074 	KASSERT(((rsm->m != NULL) ||
6075 		 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
6076 		("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
6077 	if (rsm->m)
6078 		rack_setup_offset_for_rsm(rsm, nrsm);
6079 }
6080 
6081 static struct rack_sendmap *
6082 rack_merge_rsm(struct tcp_rack *rack,
6083 	       struct rack_sendmap *l_rsm,
6084 	       struct rack_sendmap *r_rsm)
6085 {
6086 	/*
6087 	 * We are merging two ack'd RSM's,
6088 	 * the l_rsm is on the left (lower seq
6089 	 * values) and the r_rsm is on the right
6090 	 * (higher seq value). The simplest way
6091 	 * to merge these is to move the right
6092 	 * one into the left. I don't think there
6093 	 * is any reason we need to try to find
6094 	 * the oldest (or last oldest retransmitted).
6095 	 */
6096 	struct rack_sendmap *rm;
6097 
6098 	rack_log_map_chg(rack->rc_tp, rack, NULL,
6099 			 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
6100 	l_rsm->r_end = r_rsm->r_end;
6101 	if (l_rsm->r_dupack < r_rsm->r_dupack)
6102 		l_rsm->r_dupack = r_rsm->r_dupack;
6103 	if (r_rsm->r_rtr_bytes)
6104 		l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
6105 	if (r_rsm->r_in_tmap) {
6106 		/* This really should not happen */
6107 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
6108 		r_rsm->r_in_tmap = 0;
6109 	}
6110 
6111 	/* Now the flags */
6112 	if (r_rsm->r_flags & RACK_HAS_FIN)
6113 		l_rsm->r_flags |= RACK_HAS_FIN;
6114 	if (r_rsm->r_flags & RACK_TLP)
6115 		l_rsm->r_flags |= RACK_TLP;
6116 	if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
6117 		l_rsm->r_flags |= RACK_RWND_COLLAPSED;
6118 	if ((r_rsm->r_flags & RACK_APP_LIMITED)  &&
6119 	    ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
6120 		/*
6121 		 * If both are app-limited then let the
6122 		 * free lower the count. If right is app
6123 		 * limited and left is not, transfer.
6124 		 */
6125 		l_rsm->r_flags |= RACK_APP_LIMITED;
6126 		r_rsm->r_flags &= ~RACK_APP_LIMITED;
6127 		if (r_rsm == rack->r_ctl.rc_first_appl)
6128 			rack->r_ctl.rc_first_appl = l_rsm;
6129 	}
6130 	rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6131 #ifdef INVARIANTS
6132 	if (rm != r_rsm) {
6133 		panic("removing head in rack:%p rsm:%p rm:%p",
6134 		      rack, r_rsm, rm);
6135 	}
6136 #endif
6137 	if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
6138 		/* Transfer the split limit to the map we free */
6139 		r_rsm->r_limit_type = l_rsm->r_limit_type;
6140 		l_rsm->r_limit_type = 0;
6141 	}
6142 	rack_free(rack, r_rsm);
6143 	return (l_rsm);
6144 }
6145 
6146 /*
6147  * TLP Timer, here we simply setup what segment we want to
6148  * have the TLP expire on, the normal rack_output() will then
6149  * send it out.
6150  *
6151  * We return 1, saying don't proceed with rack_output only
6152  * when all timers have been stopped (destroyed PCB?).
6153  */
6154 static int
6155 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6156 {
6157 	/*
6158 	 * Tail Loss Probe.
6159 	 */
6160 	struct rack_sendmap *rsm = NULL;
6161 	struct rack_sendmap *insret;
6162 	struct socket *so;
6163 	uint32_t amm;
6164 	uint32_t out, avail;
6165 	int collapsed_win = 0;
6166 
6167 	if (tp->t_timers->tt_flags & TT_STOPPED) {
6168 		return (1);
6169 	}
6170 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6171 		/* Its not time yet */
6172 		return (0);
6173 	}
6174 	if (ctf_progress_timeout_check(tp, true)) {
6175 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6176 		tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6177 		return (1);
6178 	}
6179 	/*
6180 	 * A TLP timer has expired. We have been idle for 2 rtts. So we now
6181 	 * need to figure out how to force a full MSS segment out.
6182 	 */
6183 	rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
6184 	rack->r_ctl.retran_during_recovery = 0;
6185 	rack->r_ctl.dsack_byte_cnt = 0;
6186 	counter_u64_add(rack_tlp_tot, 1);
6187 	if (rack->r_state && (rack->r_state != tp->t_state))
6188 		rack_set_state(tp, rack);
6189 	so = tp->t_inpcb->inp_socket;
6190 	avail = sbavail(&so->so_snd);
6191 	out = tp->snd_max - tp->snd_una;
6192 	if (out > tp->snd_wnd) {
6193 		/* special case, we need a retransmission */
6194 		collapsed_win = 1;
6195 		goto need_retran;
6196 	}
6197 	/*
6198 	 * Check our send oldest always settings, and if
6199 	 * there is an oldest to send jump to the need_retran.
6200 	 */
6201 	if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
6202 		goto need_retran;
6203 
6204 	if (avail > out) {
6205 		/* New data is available */
6206 		amm = avail - out;
6207 		if (amm > ctf_fixed_maxseg(tp)) {
6208 			amm = ctf_fixed_maxseg(tp);
6209 			if ((amm + out) > tp->snd_wnd) {
6210 				/* We are rwnd limited */
6211 				goto need_retran;
6212 			}
6213 		} else if (amm < ctf_fixed_maxseg(tp)) {
6214 			/* not enough to fill a MTU */
6215 			goto need_retran;
6216 		}
6217 		if (IN_FASTRECOVERY(tp->t_flags)) {
6218 			/* Unlikely */
6219 			if (rack->rack_no_prr == 0) {
6220 				if (out + amm <= tp->snd_wnd) {
6221 					rack->r_ctl.rc_prr_sndcnt = amm;
6222 					rack_log_to_prr(rack, 4, 0);
6223 				}
6224 			} else
6225 				goto need_retran;
6226 		} else {
6227 			/* Set the send-new override */
6228 			if (out + amm <= tp->snd_wnd)
6229 				rack->r_ctl.rc_tlp_new_data = amm;
6230 			else
6231 				goto need_retran;
6232 		}
6233 		rack->r_ctl.rc_tlpsend = NULL;
6234 		counter_u64_add(rack_tlp_newdata, 1);
6235 		goto send;
6236 	}
6237 need_retran:
6238 	/*
6239 	 * Ok we need to arrange the last un-acked segment to be re-sent, or
6240 	 * optionally the first un-acked segment.
6241 	 */
6242 	if (collapsed_win == 0) {
6243 		if (rack_always_send_oldest)
6244 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6245 		else {
6246 			rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6247 			if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
6248 				rsm = rack_find_high_nonack(rack, rsm);
6249 			}
6250 		}
6251 		if (rsm == NULL) {
6252 			counter_u64_add(rack_tlp_does_nada, 1);
6253 #ifdef TCP_BLACKBOX
6254 			tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6255 #endif
6256 			goto out;
6257 		}
6258 	} else {
6259 		/*
6260 		 * We must find the last segment
6261 		 * that was acceptable by the client.
6262 		 */
6263 		RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6264 			if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
6265 				/* Found one */
6266 				break;
6267 			}
6268 		}
6269 		if (rsm == NULL) {
6270 			/* None? if so send the first */
6271 			rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6272 			if (rsm == NULL) {
6273 				counter_u64_add(rack_tlp_does_nada, 1);
6274 #ifdef TCP_BLACKBOX
6275 				tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6276 #endif
6277 				goto out;
6278 			}
6279 		}
6280 	}
6281 	if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
6282 		/*
6283 		 * We need to split this the last segment in two.
6284 		 */
6285 		struct rack_sendmap *nrsm;
6286 
6287 		nrsm = rack_alloc_full_limit(rack);
6288 		if (nrsm == NULL) {
6289 			/*
6290 			 * No memory to split, we will just exit and punt
6291 			 * off to the RXT timer.
6292 			 */
6293 			counter_u64_add(rack_tlp_does_nada, 1);
6294 			goto out;
6295 		}
6296 		rack_clone_rsm(rack, nrsm, rsm,
6297 			       (rsm->r_end - ctf_fixed_maxseg(tp)));
6298 		rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
6299 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6300 #ifdef INVARIANTS
6301 		if (insret != NULL) {
6302 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6303 			      nrsm, insret, rack, rsm);
6304 		}
6305 #endif
6306 		if (rsm->r_in_tmap) {
6307 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6308 			nrsm->r_in_tmap = 1;
6309 		}
6310 		rsm->r_flags &= (~RACK_HAS_FIN);
6311 		rsm = nrsm;
6312 	}
6313 	rack->r_ctl.rc_tlpsend = rsm;
6314 send:
6315 	rack->r_timer_override = 1;
6316 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6317 	return (0);
6318 out:
6319 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6320 	return (0);
6321 }
6322 
6323 /*
6324  * Delayed ack Timer, here we simply need to setup the
6325  * ACK_NOW flag and remove the DELACK flag. From there
6326  * the output routine will send the ack out.
6327  *
6328  * We only return 1, saying don't proceed, if all timers
6329  * are stopped (destroyed PCB?).
6330  */
6331 static int
6332 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6333 {
6334 	if (tp->t_timers->tt_flags & TT_STOPPED) {
6335 		return (1);
6336 	}
6337 	rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
6338 	tp->t_flags &= ~TF_DELACK;
6339 	tp->t_flags |= TF_ACKNOW;
6340 	KMOD_TCPSTAT_INC(tcps_delack);
6341 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6342 	return (0);
6343 }
6344 
6345 /*
6346  * Persists timer, here we simply send the
6347  * same thing as a keepalive will.
6348  * the one byte send.
6349  *
6350  * We only return 1, saying don't proceed, if all timers
6351  * are stopped (destroyed PCB?).
6352  */
6353 static int
6354 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6355 {
6356 	struct tcptemp *t_template;
6357 	struct inpcb *inp;
6358 	int32_t retval = 1;
6359 
6360 	inp = tp->t_inpcb;
6361 
6362 	if (tp->t_timers->tt_flags & TT_STOPPED) {
6363 		return (1);
6364 	}
6365 	if (rack->rc_in_persist == 0)
6366 		return (0);
6367 	if (ctf_progress_timeout_check(tp, false)) {
6368 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6369 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6370 		tcp_set_inp_to_drop(inp, ETIMEDOUT);
6371 		return (1);
6372 	}
6373 	KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
6374 	/*
6375 	 * Persistence timer into zero window. Force a byte to be output, if
6376 	 * possible.
6377 	 */
6378 	KMOD_TCPSTAT_INC(tcps_persisttimeo);
6379 	/*
6380 	 * Hack: if the peer is dead/unreachable, we do not time out if the
6381 	 * window is closed.  After a full backoff, drop the connection if
6382 	 * the idle time (no responses to probes) reaches the maximum
6383 	 * backoff that we would use if retransmitting.
6384 	 */
6385 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
6386 	    (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
6387 	     TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
6388 		KMOD_TCPSTAT_INC(tcps_persistdrop);
6389 		retval = 1;
6390 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6391 		tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6392 		goto out;
6393 	}
6394 	if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
6395 	    tp->snd_una == tp->snd_max)
6396 		rack_exit_persist(tp, rack, cts);
6397 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
6398 	/*
6399 	 * If the user has closed the socket then drop a persisting
6400 	 * connection after a much reduced timeout.
6401 	 */
6402 	if (tp->t_state > TCPS_CLOSE_WAIT &&
6403 	    (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
6404 		retval = 1;
6405 		KMOD_TCPSTAT_INC(tcps_persistdrop);
6406 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6407 		tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6408 		goto out;
6409 	}
6410 	t_template = tcpip_maketemplate(rack->rc_inp);
6411 	if (t_template) {
6412 		/* only set it if we were answered */
6413 		if (rack->forced_ack == 0) {
6414 			rack->forced_ack = 1;
6415 			rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6416 		}
6417 		tcp_respond(tp, t_template->tt_ipgen,
6418 			    &t_template->tt_t, (struct mbuf *)NULL,
6419 			    tp->rcv_nxt, tp->snd_una - 1, 0);
6420 		/* This sends an ack */
6421 		if (tp->t_flags & TF_DELACK)
6422 			tp->t_flags &= ~TF_DELACK;
6423 		free(t_template, M_TEMP);
6424 	}
6425 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
6426 		tp->t_rxtshift++;
6427 out:
6428 	rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
6429 	rack_start_hpts_timer(rack, tp, cts,
6430 			      0, 0, 0);
6431 	return (retval);
6432 }
6433 
6434 /*
6435  * If a keepalive goes off, we had no other timers
6436  * happening. We always return 1 here since this
6437  * routine either drops the connection or sends
6438  * out a segment with respond.
6439  */
6440 static int
6441 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6442 {
6443 	struct tcptemp *t_template;
6444 	struct inpcb *inp;
6445 
6446 	if (tp->t_timers->tt_flags & TT_STOPPED) {
6447 		return (1);
6448 	}
6449 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
6450 	inp = tp->t_inpcb;
6451 	rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
6452 	/*
6453 	 * Keep-alive timer went off; send something or drop connection if
6454 	 * idle for too long.
6455 	 */
6456 	KMOD_TCPSTAT_INC(tcps_keeptimeo);
6457 	if (tp->t_state < TCPS_ESTABLISHED)
6458 		goto dropit;
6459 	if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6460 	    tp->t_state <= TCPS_CLOSING) {
6461 		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
6462 			goto dropit;
6463 		/*
6464 		 * Send a packet designed to force a response if the peer is
6465 		 * up and reachable: either an ACK if the connection is
6466 		 * still alive, or an RST if the peer has closed the
6467 		 * connection due to timeout or reboot. Using sequence
6468 		 * number tp->snd_una-1 causes the transmitted zero-length
6469 		 * segment to lie outside the receive window; by the
6470 		 * protocol spec, this requires the correspondent TCP to
6471 		 * respond.
6472 		 */
6473 		KMOD_TCPSTAT_INC(tcps_keepprobe);
6474 		t_template = tcpip_maketemplate(inp);
6475 		if (t_template) {
6476 			if (rack->forced_ack == 0) {
6477 				rack->forced_ack = 1;
6478 				rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6479 			}
6480 			tcp_respond(tp, t_template->tt_ipgen,
6481 			    &t_template->tt_t, (struct mbuf *)NULL,
6482 			    tp->rcv_nxt, tp->snd_una - 1, 0);
6483 			free(t_template, M_TEMP);
6484 		}
6485 	}
6486 	rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
6487 	return (1);
6488 dropit:
6489 	KMOD_TCPSTAT_INC(tcps_keepdrops);
6490 	tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6491 	tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6492 	return (1);
6493 }
6494 
6495 /*
6496  * Retransmit helper function, clear up all the ack
6497  * flags and take care of important book keeping.
6498  */
6499 static void
6500 rack_remxt_tmr(struct tcpcb *tp)
6501 {
6502 	/*
6503 	 * The retransmit timer went off, all sack'd blocks must be
6504 	 * un-acked.
6505 	 */
6506 	struct rack_sendmap *rsm, *trsm = NULL;
6507 	struct tcp_rack *rack;
6508 
6509 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6510 	rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
6511 	rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
6512 	if (rack->r_state && (rack->r_state != tp->t_state))
6513 		rack_set_state(tp, rack);
6514 	/*
6515 	 * Ideally we would like to be able to
6516 	 * mark SACK-PASS on anything not acked here.
6517 	 *
6518 	 * However, if we do that we would burst out
6519 	 * all that data 1ms apart. This would be unwise,
6520 	 * so for now we will just let the normal rxt timer
6521 	 * and tlp timer take care of it.
6522 	 *
6523 	 * Also we really need to stick them back in sequence
6524 	 * order. This way we send in the proper order and any
6525 	 * sacks that come floating in will "re-ack" the data.
6526 	 * To do this we zap the tmap with an INIT and then
6527 	 * walk through and place every rsm in the RB tree
6528 	 * back in its seq ordered place.
6529 	 */
6530 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
6531 	RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6532 		rsm->r_dupack = 0;
6533 		rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6534 		/* We must re-add it back to the tlist */
6535 		if (trsm == NULL) {
6536 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6537 		} else {
6538 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
6539 		}
6540 		rsm->r_in_tmap = 1;
6541 		trsm = rsm;
6542 		if (rsm->r_flags & RACK_ACKED)
6543 			rsm->r_flags |= RACK_WAS_ACKED;
6544 		rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
6545 	}
6546 	/* Clear the count (we just un-acked them) */
6547 	rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
6548 	rack->r_ctl.rc_sacked = 0;
6549 	rack->r_ctl.rc_sacklast = NULL;
6550 	rack->r_ctl.rc_agg_delayed = 0;
6551 	rack->r_early = 0;
6552 	rack->r_ctl.rc_agg_early = 0;
6553 	rack->r_late = 0;
6554 	/* Clear the tlp rtx mark */
6555 	rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6556 	if (rack->r_ctl.rc_resend != NULL)
6557 		rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
6558 	rack->r_ctl.rc_prr_sndcnt = 0;
6559 	rack_log_to_prr(rack, 6, 0);
6560 	rack->r_timer_override = 1;
6561 	if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
6562 #ifdef NETFLIX_EXP_DETECTION
6563 	    || (rack->sack_attack_disable != 0)
6564 #endif
6565 		    ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
6566 		/*
6567 		 * For non-sack customers new data
6568 		 * needs to go out as retransmits until
6569 		 * we retransmit up to snd_max.
6570 		 */
6571 		rack->r_must_retran = 1;
6572 		rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
6573 						rack->r_ctl.rc_sacked);
6574 	}
6575 	rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
6576 }
6577 
6578 static void
6579 rack_convert_rtts(struct tcpcb *tp)
6580 {
6581 	if (tp->t_srtt > 1) {
6582 		uint32_t val, frac;
6583 
6584 		val = tp->t_srtt >> TCP_RTT_SHIFT;
6585 		frac = tp->t_srtt & 0x1f;
6586 		tp->t_srtt = TICKS_2_USEC(val);
6587 		/*
6588 		 * frac is the fractional part of the srtt (if any)
6589 		 * but its in ticks and every bit represents
6590 		 * 1/32nd of a hz.
6591 		 */
6592 		if (frac) {
6593 			if (hz == 1000) {
6594 				frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6595 			} else {
6596 				frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6597 			}
6598 			tp->t_srtt += frac;
6599 		}
6600 	}
6601 	if (tp->t_rttvar) {
6602 		uint32_t val, frac;
6603 
6604 		val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
6605 		frac = tp->t_rttvar & 0x1f;
6606 		tp->t_rttvar = TICKS_2_USEC(val);
6607 		/*
6608 		 * frac is the fractional part of the srtt (if any)
6609 		 * but its in ticks and every bit represents
6610 		 * 1/32nd of a hz.
6611 		 */
6612 		if (frac) {
6613 			if (hz == 1000) {
6614 				frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6615 			} else {
6616 				frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6617 			}
6618 			tp->t_rttvar += frac;
6619 		}
6620 	}
6621 	tp->t_rxtcur = RACK_REXMTVAL(tp);
6622 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6623 		tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
6624 	}
6625 	if (tp->t_rxtcur > rack_rto_max) {
6626 		tp->t_rxtcur = rack_rto_max;
6627 	}
6628 }
6629 
6630 static void
6631 rack_cc_conn_init(struct tcpcb *tp)
6632 {
6633 	struct tcp_rack *rack;
6634 	uint32_t srtt;
6635 
6636 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6637 	srtt = tp->t_srtt;
6638 	cc_conn_init(tp);
6639 	/*
6640 	 * Now convert to rack's internal format,
6641 	 * if required.
6642 	 */
6643 	if ((srtt == 0) && (tp->t_srtt != 0))
6644 		rack_convert_rtts(tp);
6645 	/*
6646 	 * We want a chance to stay in slowstart as
6647 	 * we create a connection. TCP spec says that
6648 	 * initially ssthresh is infinite. For our
6649 	 * purposes that is the snd_wnd.
6650 	 */
6651 	if (tp->snd_ssthresh < tp->snd_wnd) {
6652 		tp->snd_ssthresh = tp->snd_wnd;
6653 	}
6654 	/*
6655 	 * We also want to assure a IW worth of
6656 	 * data can get inflight.
6657 	 */
6658 	if (rc_init_window(rack) < tp->snd_cwnd)
6659 		tp->snd_cwnd = rc_init_window(rack);
6660 }
6661 
6662 /*
6663  * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
6664  * we will setup to retransmit the lowest seq number outstanding.
6665  */
6666 static int
6667 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6668 {
6669 	int32_t rexmt;
6670 	struct inpcb *inp;
6671 	int32_t retval = 0;
6672 	bool isipv6;
6673 
6674 	inp = tp->t_inpcb;
6675 	if (tp->t_timers->tt_flags & TT_STOPPED) {
6676 		return (1);
6677 	}
6678 	if (ctf_progress_timeout_check(tp, false)) {
6679 		tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6680 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6681 		tcp_set_inp_to_drop(inp, ETIMEDOUT);
6682 		return (1);
6683 	}
6684 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
6685 	rack->r_ctl.retran_during_recovery = 0;
6686 	rack->r_ctl.dsack_byte_cnt = 0;
6687 	if (IN_FASTRECOVERY(tp->t_flags))
6688 		tp->t_flags |= TF_WASFRECOVERY;
6689 	else
6690 		tp->t_flags &= ~TF_WASFRECOVERY;
6691 	if (IN_CONGRECOVERY(tp->t_flags))
6692 		tp->t_flags |= TF_WASCRECOVERY;
6693 	else
6694 		tp->t_flags &= ~TF_WASCRECOVERY;
6695 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
6696 	    (tp->snd_una == tp->snd_max)) {
6697 		/* Nothing outstanding .. nothing to do */
6698 		return (0);
6699 	}
6700 	/*
6701 	 * Rack can only run one timer  at a time, so we cannot
6702 	 * run a KEEPINIT (gating SYN sending) and a retransmit
6703 	 * timer for the SYN. So if we are in a front state and
6704 	 * have a KEEPINIT timer we need to check the first transmit
6705 	 * against now to see if we have exceeded the KEEPINIT time
6706 	 * (if one is set).
6707 	 */
6708 	if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6709 	    (TP_KEEPINIT(tp) != 0)) {
6710 		struct rack_sendmap *rsm;
6711 
6712 		rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6713 		if (rsm) {
6714 			/* Ok we have something outstanding to test keepinit with */
6715 			if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
6716 			    ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
6717 				/* We have exceeded the KEEPINIT time */
6718 				tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6719 				goto drop_it;
6720 			}
6721 		}
6722 	}
6723 	/*
6724 	 * Retransmission timer went off.  Message has not been acked within
6725 	 * retransmit interval.  Back off to a longer retransmit interval
6726 	 * and retransmit one segment.
6727 	 */
6728 	rack_remxt_tmr(tp);
6729 	if ((rack->r_ctl.rc_resend == NULL) ||
6730 	    ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
6731 		/*
6732 		 * If the rwnd collapsed on
6733 		 * the one we are retransmitting
6734 		 * it does not count against the
6735 		 * rxt count.
6736 		 */
6737 		tp->t_rxtshift++;
6738 	}
6739 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
6740 		tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6741 drop_it:
6742 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
6743 		KMOD_TCPSTAT_INC(tcps_timeoutdrop);
6744 		retval = 1;
6745 		tcp_set_inp_to_drop(rack->rc_inp,
6746 		    (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
6747 		goto out;
6748 	}
6749 	if (tp->t_state == TCPS_SYN_SENT) {
6750 		/*
6751 		 * If the SYN was retransmitted, indicate CWND to be limited
6752 		 * to 1 segment in cc_conn_init().
6753 		 */
6754 		tp->snd_cwnd = 1;
6755 	} else if (tp->t_rxtshift == 1) {
6756 		/*
6757 		 * first retransmit; record ssthresh and cwnd so they can be
6758 		 * recovered if this turns out to be a "bad" retransmit. A
6759 		 * retransmit is considered "bad" if an ACK for this segment
6760 		 * is received within RTT/2 interval; the assumption here is
6761 		 * that the ACK was already in flight.  See "On Estimating
6762 		 * End-to-End Network Path Properties" by Allman and Paxson
6763 		 * for more details.
6764 		 */
6765 		tp->snd_cwnd_prev = tp->snd_cwnd;
6766 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
6767 		tp->snd_recover_prev = tp->snd_recover;
6768 		tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
6769 		tp->t_flags |= TF_PREVVALID;
6770 	} else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
6771 		tp->t_flags &= ~TF_PREVVALID;
6772 	KMOD_TCPSTAT_INC(tcps_rexmttimeo);
6773 	if ((tp->t_state == TCPS_SYN_SENT) ||
6774 	    (tp->t_state == TCPS_SYN_RECEIVED))
6775 		rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
6776 	else
6777 		rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
6778 
6779 	RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
6780 	   max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
6781 	/*
6782 	 * We enter the path for PLMTUD if connection is established or, if
6783 	 * connection is FIN_WAIT_1 status, reason for the last is that if
6784 	 * amount of data we send is very small, we could send it in couple
6785 	 * of packets and process straight to FIN. In that case we won't
6786 	 * catch ESTABLISHED state.
6787 	 */
6788 #ifdef INET6
6789 	isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
6790 #else
6791 	isipv6 = false;
6792 #endif
6793 	if (((V_tcp_pmtud_blackhole_detect == 1) ||
6794 	    (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
6795 	    (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
6796 	    ((tp->t_state == TCPS_ESTABLISHED) ||
6797 	    (tp->t_state == TCPS_FIN_WAIT_1))) {
6798 		/*
6799 		 * Idea here is that at each stage of mtu probe (usually,
6800 		 * 1448 -> 1188 -> 524) should be given 2 chances to recover
6801 		 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
6802 		 * should take care of that.
6803 		 */
6804 		if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
6805 		    (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
6806 		    (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
6807 		    tp->t_rxtshift % 2 == 0)) {
6808 			/*
6809 			 * Enter Path MTU Black-hole Detection mechanism: -
6810 			 * Disable Path MTU Discovery (IP "DF" bit). -
6811 			 * Reduce MTU to lower value than what we negotiated
6812 			 * with peer.
6813 			 */
6814 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
6815 				/* Record that we may have found a black hole. */
6816 				tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
6817 				/* Keep track of previous MSS. */
6818 				tp->t_pmtud_saved_maxseg = tp->t_maxseg;
6819 			}
6820 
6821 			/*
6822 			 * Reduce the MSS to blackhole value or to the
6823 			 * default in an attempt to retransmit.
6824 			 */
6825 #ifdef INET6
6826 			if (isipv6 &&
6827 			    tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
6828 				/* Use the sysctl tuneable blackhole MSS. */
6829 				tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
6830 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6831 			} else if (isipv6) {
6832 				/* Use the default MSS. */
6833 				tp->t_maxseg = V_tcp_v6mssdflt;
6834 				/*
6835 				 * Disable Path MTU Discovery when we switch
6836 				 * to minmss.
6837 				 */
6838 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6839 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6840 			}
6841 #endif
6842 #if defined(INET6) && defined(INET)
6843 			else
6844 #endif
6845 #ifdef INET
6846 			if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
6847 				/* Use the sysctl tuneable blackhole MSS. */
6848 				tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
6849 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6850 			} else {
6851 				/* Use the default MSS. */
6852 				tp->t_maxseg = V_tcp_mssdflt;
6853 				/*
6854 				 * Disable Path MTU Discovery when we switch
6855 				 * to minmss.
6856 				 */
6857 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6858 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6859 			}
6860 #endif
6861 		} else {
6862 			/*
6863 			 * If further retransmissions are still unsuccessful
6864 			 * with a lowered MTU, maybe this isn't a blackhole
6865 			 * and we restore the previous MSS and blackhole
6866 			 * detection flags. The limit '6' is determined by
6867 			 * giving each probe stage (1448, 1188, 524) 2
6868 			 * chances to recover.
6869 			 */
6870 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
6871 			    (tp->t_rxtshift >= 6)) {
6872 				tp->t_flags2 |= TF2_PLPMTU_PMTUD;
6873 				tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
6874 				tp->t_maxseg = tp->t_pmtud_saved_maxseg;
6875 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
6876 			}
6877 		}
6878 	}
6879 	/*
6880 	 * Disable RFC1323 and SACK if we haven't got any response to
6881 	 * our third SYN to work-around some broken terminal servers
6882 	 * (most of which have hopefully been retired) that have bad VJ
6883 	 * header compression code which trashes TCP segments containing
6884 	 * unknown-to-them TCP options.
6885 	 */
6886 	if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
6887 	    (tp->t_rxtshift == 3))
6888 		tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
6889 	/*
6890 	 * If we backed off this far, our srtt estimate is probably bogus.
6891 	 * Clobber it so we'll take the next rtt measurement as our srtt;
6892 	 * move the current srtt into rttvar to keep the current retransmit
6893 	 * times until then.
6894 	 */
6895 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
6896 #ifdef INET6
6897 		if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
6898 			in6_losing(tp->t_inpcb);
6899 		else
6900 #endif
6901 			in_losing(tp->t_inpcb);
6902 		tp->t_rttvar += tp->t_srtt;
6903 		tp->t_srtt = 0;
6904 	}
6905 	sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6906 	tp->snd_recover = tp->snd_max;
6907 	tp->t_flags |= TF_ACKNOW;
6908 	tp->t_rtttime = 0;
6909 	rack_cong_signal(tp, CC_RTO, tp->snd_una);
6910 out:
6911 	return (retval);
6912 }
6913 
6914 static int
6915 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
6916 {
6917 	int32_t ret = 0;
6918 	int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
6919 
6920 	if (timers == 0) {
6921 		return (0);
6922 	}
6923 	if (tp->t_state == TCPS_LISTEN) {
6924 		/* no timers on listen sockets */
6925 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
6926 			return (0);
6927 		return (1);
6928 	}
6929 	if ((timers & PACE_TMR_RACK) &&
6930 	    rack->rc_on_min_to) {
6931 		/*
6932 		 * For the rack timer when we
6933 		 * are on a min-timeout (which means rrr_conf = 3)
6934 		 * we don't want to check the timer. It may
6935 		 * be going off for a pace and thats ok we
6936 		 * want to send the retransmit (if its ready).
6937 		 *
6938 		 * If its on a normal rack timer (non-min) then
6939 		 * we will check if its expired.
6940 		 */
6941 		goto skip_time_check;
6942 	}
6943 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6944 		uint32_t left;
6945 
6946 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
6947 			ret = -1;
6948 			rack_log_to_processing(rack, cts, ret, 0);
6949 			return (0);
6950 		}
6951 		if (hpts_calling == 0) {
6952 			/*
6953 			 * A user send or queued mbuf (sack) has called us? We
6954 			 * return 0 and let the pacing guards
6955 			 * deal with it if they should or
6956 			 * should not cause a send.
6957 			 */
6958 			ret = -2;
6959 			rack_log_to_processing(rack, cts, ret, 0);
6960 			return (0);
6961 		}
6962 		/*
6963 		 * Ok our timer went off early and we are not paced false
6964 		 * alarm, go back to sleep.
6965 		 */
6966 		ret = -3;
6967 		left = rack->r_ctl.rc_timer_exp - cts;
6968 		tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
6969 		rack_log_to_processing(rack, cts, ret, left);
6970 		return (1);
6971 	}
6972 skip_time_check:
6973 	rack->rc_tmr_stopped = 0;
6974 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
6975 	if (timers & PACE_TMR_DELACK) {
6976 		ret = rack_timeout_delack(tp, rack, cts);
6977 	} else if (timers & PACE_TMR_RACK) {
6978 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
6979 		rack->r_fast_output = 0;
6980 		ret = rack_timeout_rack(tp, rack, cts);
6981 	} else if (timers & PACE_TMR_TLP) {
6982 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
6983 		ret = rack_timeout_tlp(tp, rack, cts);
6984 	} else if (timers & PACE_TMR_RXT) {
6985 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
6986 		rack->r_fast_output = 0;
6987 		ret = rack_timeout_rxt(tp, rack, cts);
6988 	} else if (timers & PACE_TMR_PERSIT) {
6989 		ret = rack_timeout_persist(tp, rack, cts);
6990 	} else if (timers & PACE_TMR_KEEP) {
6991 		ret = rack_timeout_keepalive(tp, rack, cts);
6992 	}
6993 	rack_log_to_processing(rack, cts, ret, timers);
6994 	return (ret);
6995 }
6996 
6997 static void
6998 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
6999 {
7000 	struct timeval tv;
7001 	uint32_t us_cts, flags_on_entry;
7002 	uint8_t hpts_removed = 0;
7003 
7004 	flags_on_entry = rack->r_ctl.rc_hpts_flags;
7005 	us_cts = tcp_get_usecs(&tv);
7006 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
7007 	    ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
7008 	     ((tp->snd_max - tp->snd_una) == 0))) {
7009 		tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7010 		hpts_removed = 1;
7011 		/* If we were not delayed cancel out the flag. */
7012 		if ((tp->snd_max - tp->snd_una) == 0)
7013 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
7014 		rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7015 	}
7016 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7017 		rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
7018 		if (rack->rc_inp->inp_in_hpts &&
7019 		    ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
7020 			/*
7021 			 * Canceling timer's when we have no output being
7022 			 * paced. We also must remove ourselves from the
7023 			 * hpts.
7024 			 */
7025 			tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7026 			hpts_removed = 1;
7027 		}
7028 		rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
7029 	}
7030 	if (hpts_removed == 0)
7031 		rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7032 }
7033 
7034 static void
7035 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
7036 {
7037 	return;
7038 }
7039 
7040 static int
7041 rack_stopall(struct tcpcb *tp)
7042 {
7043 	struct tcp_rack *rack;
7044 	rack = (struct tcp_rack *)tp->t_fb_ptr;
7045 	rack->t_timers_stopped = 1;
7046 	return (0);
7047 }
7048 
7049 static void
7050 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
7051 {
7052 	return;
7053 }
7054 
7055 static int
7056 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
7057 {
7058 	return (0);
7059 }
7060 
7061 static void
7062 rack_stop_all_timers(struct tcpcb *tp)
7063 {
7064 	struct tcp_rack *rack;
7065 
7066 	/*
7067 	 * Assure no timers are running.
7068 	 */
7069 	if (tcp_timer_active(tp, TT_PERSIST)) {
7070 		/* We enter in persists, set the flag appropriately */
7071 		rack = (struct tcp_rack *)tp->t_fb_ptr;
7072 		rack->rc_in_persist = 1;
7073 	}
7074 	tcp_timer_suspend(tp, TT_PERSIST);
7075 	tcp_timer_suspend(tp, TT_REXMT);
7076 	tcp_timer_suspend(tp, TT_KEEP);
7077 	tcp_timer_suspend(tp, TT_DELACK);
7078 }
7079 
7080 static void
7081 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
7082     struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag)
7083 {
7084 	int32_t idx;
7085 	uint16_t stripped_flags;
7086 
7087 	rsm->r_rtr_cnt++;
7088 	rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7089 	rsm->r_dupack = 0;
7090 	if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
7091 		rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
7092 		rsm->r_flags |= RACK_OVERMAX;
7093 	}
7094 	if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
7095 		rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
7096 		rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
7097 	}
7098 	idx = rsm->r_rtr_cnt - 1;
7099 	rsm->r_tim_lastsent[idx] = ts;
7100 	stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP);
7101 	if (rsm->r_flags & RACK_ACKED) {
7102 		/* Problably MTU discovery messing with us */
7103 		rsm->r_flags &= ~RACK_ACKED;
7104 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7105 	}
7106 	if (rsm->r_in_tmap) {
7107 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7108 		rsm->r_in_tmap = 0;
7109 	}
7110 	TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7111 	rsm->r_in_tmap = 1;
7112 	if (rsm->r_flags & RACK_SACK_PASSED) {
7113 		/* We have retransmitted due to the SACK pass */
7114 		rsm->r_flags &= ~RACK_SACK_PASSED;
7115 		rsm->r_flags |= RACK_WAS_SACKPASS;
7116 	}
7117 }
7118 
7119 static uint32_t
7120 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
7121     struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag)
7122 {
7123 	/*
7124 	 * We (re-)transmitted starting at rsm->r_start for some length
7125 	 * (possibly less than r_end.
7126 	 */
7127 	struct rack_sendmap *nrsm, *insret;
7128 	uint32_t c_end;
7129 	int32_t len;
7130 
7131 	len = *lenp;
7132 	c_end = rsm->r_start + len;
7133 	if (SEQ_GEQ(c_end, rsm->r_end)) {
7134 		/*
7135 		 * We retransmitted the whole piece or more than the whole
7136 		 * slopping into the next rsm.
7137 		 */
7138 		rack_update_rsm(tp, rack, rsm, ts, add_flag);
7139 		if (c_end == rsm->r_end) {
7140 			*lenp = 0;
7141 			return (0);
7142 		} else {
7143 			int32_t act_len;
7144 
7145 			/* Hangs over the end return whats left */
7146 			act_len = rsm->r_end - rsm->r_start;
7147 			*lenp = (len - act_len);
7148 			return (rsm->r_end);
7149 		}
7150 		/* We don't get out of this block. */
7151 	}
7152 	/*
7153 	 * Here we retransmitted less than the whole thing which means we
7154 	 * have to split this into what was transmitted and what was not.
7155 	 */
7156 	nrsm = rack_alloc_full_limit(rack);
7157 	if (nrsm == NULL) {
7158 		/*
7159 		 * We can't get memory, so lets not proceed.
7160 		 */
7161 		*lenp = 0;
7162 		return (0);
7163 	}
7164 	/*
7165 	 * So here we are going to take the original rsm and make it what we
7166 	 * retransmitted. nrsm will be the tail portion we did not
7167 	 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
7168 	 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
7169 	 * 1, 6 and the new piece will be 6, 11.
7170 	 */
7171 	rack_clone_rsm(rack, nrsm, rsm, c_end);
7172 	nrsm->r_dupack = 0;
7173 	rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7174 	insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7175 #ifdef INVARIANTS
7176 	if (insret != NULL) {
7177 		panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7178 		      nrsm, insret, rack, rsm);
7179 	}
7180 #endif
7181 	if (rsm->r_in_tmap) {
7182 		TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7183 		nrsm->r_in_tmap = 1;
7184 	}
7185 	rsm->r_flags &= (~RACK_HAS_FIN);
7186 	rack_update_rsm(tp, rack, rsm, ts, add_flag);
7187 	/* Log a split of rsm into rsm and nrsm */
7188 	rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7189 	*lenp = 0;
7190 	return (0);
7191 }
7192 
7193 static void
7194 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
7195 		uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts,
7196 		struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff)
7197 {
7198 	struct tcp_rack *rack;
7199 	struct rack_sendmap *rsm, *nrsm, *insret, fe;
7200 	register uint32_t snd_max, snd_una;
7201 
7202 	/*
7203 	 * Add to the RACK log of packets in flight or retransmitted. If
7204 	 * there is a TS option we will use the TS echoed, if not we will
7205 	 * grab a TS.
7206 	 *
7207 	 * Retransmissions will increment the count and move the ts to its
7208 	 * proper place. Note that if options do not include TS's then we
7209 	 * won't be able to effectively use the ACK for an RTT on a retran.
7210 	 *
7211 	 * Notes about r_start and r_end. Lets consider a send starting at
7212 	 * sequence 1 for 10 bytes. In such an example the r_start would be
7213 	 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
7214 	 * This means that r_end is actually the first sequence for the next
7215 	 * slot (11).
7216 	 *
7217 	 */
7218 	/*
7219 	 * If err is set what do we do XXXrrs? should we not add the thing?
7220 	 * -- i.e. return if err != 0 or should we pretend we sent it? --
7221 	 * i.e. proceed with add ** do this for now.
7222 	 */
7223 	INP_WLOCK_ASSERT(tp->t_inpcb);
7224 	if (err)
7225 		/*
7226 		 * We don't log errors -- we could but snd_max does not
7227 		 * advance in this case either.
7228 		 */
7229 		return;
7230 
7231 	if (th_flags & TH_RST) {
7232 		/*
7233 		 * We don't log resets and we return immediately from
7234 		 * sending
7235 		 */
7236 		return;
7237 	}
7238 	rack = (struct tcp_rack *)tp->t_fb_ptr;
7239 	snd_una = tp->snd_una;
7240 	snd_max = tp->snd_max;
7241 	if (th_flags & (TH_SYN | TH_FIN)) {
7242 		/*
7243 		 * The call to rack_log_output is made before bumping
7244 		 * snd_max. This means we can record one extra byte on a SYN
7245 		 * or FIN if seq_out is adding more on and a FIN is present
7246 		 * (and we are not resending).
7247 		 */
7248 		if ((th_flags & TH_SYN) && (seq_out == tp->iss))
7249 			len++;
7250 		if (th_flags & TH_FIN)
7251 			len++;
7252 		if (SEQ_LT(snd_max, tp->snd_nxt)) {
7253 			/*
7254 			 * The add/update as not been done for the FIN/SYN
7255 			 * yet.
7256 			 */
7257 			snd_max = tp->snd_nxt;
7258 		}
7259 	}
7260 	if (SEQ_LEQ((seq_out + len), snd_una)) {
7261 		/* Are sending an old segment to induce an ack (keep-alive)? */
7262 		return;
7263 	}
7264 	if (SEQ_LT(seq_out, snd_una)) {
7265 		/* huh? should we panic? */
7266 		uint32_t end;
7267 
7268 		end = seq_out + len;
7269 		seq_out = snd_una;
7270 		if (SEQ_GEQ(end, seq_out))
7271 			len = end - seq_out;
7272 		else
7273 			len = 0;
7274 	}
7275 	if (len == 0) {
7276 		/* We don't log zero window probes */
7277 		return;
7278 	}
7279 	rack->r_ctl.rc_time_last_sent = cts;
7280 	if (IN_FASTRECOVERY(tp->t_flags)) {
7281 		rack->r_ctl.rc_prr_out += len;
7282 	}
7283 	/* First question is it a retransmission or new? */
7284 	if (seq_out == snd_max) {
7285 		/* Its new */
7286 again:
7287 		rsm = rack_alloc(rack);
7288 		if (rsm == NULL) {
7289 			/*
7290 			 * Hmm out of memory and the tcb got destroyed while
7291 			 * we tried to wait.
7292 			 */
7293 			return;
7294 		}
7295 		if (th_flags & TH_FIN) {
7296 			rsm->r_flags = RACK_HAS_FIN|add_flag;
7297 		} else {
7298 			rsm->r_flags = add_flag;
7299 		}
7300 		rsm->r_tim_lastsent[0] = cts;
7301 		rsm->r_rtr_cnt = 1;
7302 		rsm->r_rtr_bytes = 0;
7303 		if (th_flags & TH_SYN) {
7304 			/* The data space is one beyond snd_una */
7305 			rsm->r_flags |= RACK_HAS_SYN;
7306 		}
7307 		rsm->r_start = seq_out;
7308 		rsm->r_end = rsm->r_start + len;
7309 		rsm->r_dupack = 0;
7310 		/*
7311 		 * save off the mbuf location that
7312 		 * sndmbuf_noadv returned (which is
7313 		 * where we started copying from)..
7314 		 */
7315 		rsm->m = s_mb;
7316 		rsm->soff = s_moff;
7317 		/* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
7318 		if (rsm->m) {
7319 			if (rsm->m->m_len <= rsm->soff) {
7320 				/*
7321 				 * XXXrrs Question, will this happen?
7322 				 *
7323 				 * If sbsndptr is set at the correct place
7324 				 * then s_moff should always be somewhere
7325 				 * within rsm->m. But if the sbsndptr was
7326 				 * off then that won't be true. If it occurs
7327 				 * we need to walkout to the correct location.
7328 				 */
7329 				struct mbuf *lm;
7330 
7331 				lm = rsm->m;
7332 				while (lm->m_len <= rsm->soff) {
7333 					rsm->soff -= lm->m_len;
7334 					lm = lm->m_next;
7335 					KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
7336 							     __func__, rack, s_moff, s_mb, rsm->soff));
7337 				}
7338 				rsm->m = lm;
7339 				counter_u64_add(rack_sbsndptr_wrong, 1);
7340 			} else
7341 				counter_u64_add(rack_sbsndptr_right, 1);
7342 			rsm->orig_m_len = rsm->m->m_len;
7343 		} else
7344 			rsm->orig_m_len = 0;
7345 		rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7346 		/* Log a new rsm */
7347 		rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
7348 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7349 #ifdef INVARIANTS
7350 		if (insret != NULL) {
7351 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7352 			      nrsm, insret, rack, rsm);
7353 		}
7354 #endif
7355 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7356 		rsm->r_in_tmap = 1;
7357 		/*
7358 		 * Special case detection, is there just a single
7359 		 * packet outstanding when we are not in recovery?
7360 		 *
7361 		 * If this is true mark it so.
7362 		 */
7363 		if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
7364 		    (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
7365 			struct rack_sendmap *prsm;
7366 
7367 			prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7368 			if (prsm)
7369 				prsm->r_one_out_nr = 1;
7370 		}
7371 		return;
7372 	}
7373 	/*
7374 	 * If we reach here its a retransmission and we need to find it.
7375 	 */
7376 	memset(&fe, 0, sizeof(fe));
7377 more:
7378 	if (hintrsm && (hintrsm->r_start == seq_out)) {
7379 		rsm = hintrsm;
7380 		hintrsm = NULL;
7381 	} else {
7382 		/* No hints sorry */
7383 		rsm = NULL;
7384 	}
7385 	if ((rsm) && (rsm->r_start == seq_out)) {
7386 		seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7387 		if (len == 0) {
7388 			return;
7389 		} else {
7390 			goto more;
7391 		}
7392 	}
7393 	/* Ok it was not the last pointer go through it the hard way. */
7394 refind:
7395 	fe.r_start = seq_out;
7396 	rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7397 	if (rsm) {
7398 		if (rsm->r_start == seq_out) {
7399 			seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7400 			if (len == 0) {
7401 				return;
7402 			} else {
7403 				goto refind;
7404 			}
7405 		}
7406 		if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
7407 			/* Transmitted within this piece */
7408 			/*
7409 			 * Ok we must split off the front and then let the
7410 			 * update do the rest
7411 			 */
7412 			nrsm = rack_alloc_full_limit(rack);
7413 			if (nrsm == NULL) {
7414 				rack_update_rsm(tp, rack, rsm, cts, add_flag);
7415 				return;
7416 			}
7417 			/*
7418 			 * copy rsm to nrsm and then trim the front of rsm
7419 			 * to not include this part.
7420 			 */
7421 			rack_clone_rsm(rack, nrsm, rsm, seq_out);
7422 			insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7423 			rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7424 #ifdef INVARIANTS
7425 			if (insret != NULL) {
7426 				panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7427 				      nrsm, insret, rack, rsm);
7428 			}
7429 #endif
7430 			if (rsm->r_in_tmap) {
7431 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7432 				nrsm->r_in_tmap = 1;
7433 			}
7434 			rsm->r_flags &= (~RACK_HAS_FIN);
7435 			seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag);
7436 			if (len == 0) {
7437 				return;
7438 			} else if (len > 0)
7439 				goto refind;
7440 		}
7441 	}
7442 	/*
7443 	 * Hmm not found in map did they retransmit both old and on into the
7444 	 * new?
7445 	 */
7446 	if (seq_out == tp->snd_max) {
7447 		goto again;
7448 	} else if (SEQ_LT(seq_out, tp->snd_max)) {
7449 #ifdef INVARIANTS
7450 		printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
7451 		       seq_out, len, tp->snd_una, tp->snd_max);
7452 		printf("Starting Dump of all rack entries\n");
7453 		RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
7454 			printf("rsm:%p start:%u end:%u\n",
7455 			       rsm, rsm->r_start, rsm->r_end);
7456 		}
7457 		printf("Dump complete\n");
7458 		panic("seq_out not found rack:%p tp:%p",
7459 		      rack, tp);
7460 #endif
7461 	} else {
7462 #ifdef INVARIANTS
7463 		/*
7464 		 * Hmm beyond sndmax? (only if we are using the new rtt-pack
7465 		 * flag)
7466 		 */
7467 		panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
7468 		      seq_out, len, tp->snd_max, tp);
7469 #endif
7470 	}
7471 }
7472 
7473 /*
7474  * Record one of the RTT updates from an ack into
7475  * our sample structure.
7476  */
7477 
7478 static void
7479 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
7480 		    int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
7481 {
7482 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7483 	    (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
7484 		rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
7485 	}
7486 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7487 	    (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
7488 		rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
7489 	}
7490 	if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
7491 	    if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
7492 		rack->r_ctl.rc_gp_lowrtt = us_rtt;
7493 	    if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
7494 		    rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
7495 	}
7496 	if ((confidence == 1) &&
7497 	    ((rsm == NULL) ||
7498 	     (rsm->r_just_ret) ||
7499 	     (rsm->r_one_out_nr &&
7500 	      len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
7501 		/*
7502 		 * If the rsm had a just return
7503 		 * hit it then we can't trust the
7504 		 * rtt measurement for buffer deterimination
7505 		 * Note that a confidence of 2, indicates
7506 		 * SACK'd which overrides the r_just_ret or
7507 		 * the r_one_out_nr. If it was a CUM-ACK and
7508 		 * we had only two outstanding, but get an
7509 		 * ack for only 1. Then that also lowers our
7510 		 * confidence.
7511 		 */
7512 		confidence = 0;
7513 	}
7514 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7515 	    (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
7516 		if (rack->r_ctl.rack_rs.confidence == 0) {
7517 			/*
7518 			 * We take anything with no current confidence
7519 			 * saved.
7520 			 */
7521 			rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7522 			rack->r_ctl.rack_rs.confidence = confidence;
7523 			rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7524 		} else if (confidence || rack->r_ctl.rack_rs.confidence) {
7525 			/*
7526 			 * Once we have a confident number,
7527 			 * we can update it with a smaller
7528 			 * value since this confident number
7529 			 * may include the DSACK time until
7530 			 * the next segment (the second one) arrived.
7531 			 */
7532 			rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7533 			rack->r_ctl.rack_rs.confidence = confidence;
7534 			rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7535 		}
7536 	}
7537 	rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
7538 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
7539 	rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
7540 	rack->r_ctl.rack_rs.rs_rtt_cnt++;
7541 }
7542 
7543 /*
7544  * Collect new round-trip time estimate
7545  * and update averages and current timeout.
7546  */
7547 static void
7548 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
7549 {
7550 	int32_t delta;
7551 	uint32_t o_srtt, o_var;
7552 	int32_t hrtt_up = 0;
7553 	int32_t rtt;
7554 
7555 	if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
7556 		/* No valid sample */
7557 		return;
7558 	if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
7559 		/* We are to use the lowest RTT seen in a single ack */
7560 		rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
7561 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
7562 		/* We are to use the highest RTT seen in a single ack */
7563 		rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
7564 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
7565 		/* We are to use the average RTT seen in a single ack */
7566 		rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
7567 				(uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
7568 	} else {
7569 #ifdef INVARIANTS
7570 		panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
7571 #endif
7572 		return;
7573 	}
7574 	if (rtt == 0)
7575 		rtt = 1;
7576 	if (rack->rc_gp_rtt_set == 0) {
7577 		/*
7578 		 * With no RTT we have to accept
7579 		 * even one we are not confident of.
7580 		 */
7581 		rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
7582 		rack->rc_gp_rtt_set = 1;
7583 	} else if (rack->r_ctl.rack_rs.confidence) {
7584 		/* update the running gp srtt */
7585 		rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
7586 		rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
7587 	}
7588 	if (rack->r_ctl.rack_rs.confidence) {
7589 		/*
7590 		 * record the low and high for highly buffered path computation,
7591 		 * we only do this if we are confident (not a retransmission).
7592 		 */
7593 		if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
7594 			rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7595 			hrtt_up = 1;
7596 		}
7597 		if (rack->rc_highly_buffered == 0) {
7598 			/*
7599 			 * Currently once we declare a path has
7600 			 * highly buffered there is no going
7601 			 * back, which may be a problem...
7602 			 */
7603 			if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
7604 				rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
7605 						     rack->r_ctl.rc_highest_us_rtt,
7606 						     rack->r_ctl.rc_lowest_us_rtt,
7607 						     RACK_RTTS_SEEHBP);
7608 				rack->rc_highly_buffered = 1;
7609 			}
7610 		}
7611 	}
7612 	if ((rack->r_ctl.rack_rs.confidence) ||
7613 	    (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
7614 		/*
7615 		 * If we are highly confident of it <or> it was
7616 		 * never retransmitted we accept it as the last us_rtt.
7617 		 */
7618 		rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7619 		/* The lowest rtt can be set if its was not retransmited */
7620 		if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
7621 			rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7622 			if (rack->r_ctl.rc_lowest_us_rtt == 0)
7623 				rack->r_ctl.rc_lowest_us_rtt = 1;
7624 		}
7625 	}
7626 	o_srtt = tp->t_srtt;
7627 	o_var = tp->t_rttvar;
7628 	rack = (struct tcp_rack *)tp->t_fb_ptr;
7629 	if (tp->t_srtt != 0) {
7630 		/*
7631 		 * We keep a simple srtt in microseconds, like our rtt
7632 		 * measurement. We don't need to do any tricks with shifting
7633 		 * etc. Instead we just add in 1/8th of the new measurement
7634 		 * and subtract out 1/8 of the old srtt. We do the same with
7635 		 * the variance after finding the absolute value of the
7636 		 * difference between this sample and the current srtt.
7637 		 */
7638 		delta = tp->t_srtt - rtt;
7639 		/* Take off 1/8th of the current sRTT */
7640 		tp->t_srtt -= (tp->t_srtt >> 3);
7641 		/* Add in 1/8th of the new RTT just measured */
7642 		tp->t_srtt += (rtt >> 3);
7643 		if (tp->t_srtt <= 0)
7644 			tp->t_srtt = 1;
7645 		/* Now lets make the absolute value of the variance */
7646 		if (delta < 0)
7647 			delta = -delta;
7648 		/* Subtract out 1/8th */
7649 		tp->t_rttvar -= (tp->t_rttvar >> 3);
7650 		/* Add in 1/8th of the new variance we just saw */
7651 		tp->t_rttvar += (delta >> 3);
7652 		if (tp->t_rttvar <= 0)
7653 			tp->t_rttvar = 1;
7654 		if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
7655 			tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7656 	} else {
7657 		/*
7658 		 * No rtt measurement yet - use the unsmoothed rtt. Set the
7659 		 * variance to half the rtt (so our first retransmit happens
7660 		 * at 3*rtt).
7661 		 */
7662 		tp->t_srtt = rtt;
7663 		tp->t_rttvar = rtt >> 1;
7664 		tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7665 	}
7666 	rack->rc_srtt_measure_made = 1;
7667 	KMOD_TCPSTAT_INC(tcps_rttupdated);
7668 	tp->t_rttupdated++;
7669 #ifdef STATS
7670 	if (rack_stats_gets_ms_rtt == 0) {
7671 		/* Send in the microsecond rtt used for rxt timeout purposes */
7672 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
7673 	} else if (rack_stats_gets_ms_rtt == 1) {
7674 		/* Send in the millisecond rtt used for rxt timeout purposes */
7675 		int32_t ms_rtt;
7676 
7677 		/* Round up */
7678 		ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7679 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7680 	} else if (rack_stats_gets_ms_rtt == 2) {
7681 		/* Send in the millisecond rtt has close to the path RTT as we can get  */
7682 		int32_t ms_rtt;
7683 
7684 		/* Round up */
7685 		ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7686 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7687 	}  else {
7688 		/* Send in the microsecond rtt has close to the path RTT as we can get  */
7689 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
7690 	}
7691 
7692 #endif
7693 	/*
7694 	 * the retransmit should happen at rtt + 4 * rttvar. Because of the
7695 	 * way we do the smoothing, srtt and rttvar will each average +1/2
7696 	 * tick of bias.  When we compute the retransmit timer, we want 1/2
7697 	 * tick of rounding and 1 extra tick because of +-1/2 tick
7698 	 * uncertainty in the firing of the timer.  The bias will give us
7699 	 * exactly the 1.5 tick we need.  But, because the bias is
7700 	 * statistical, we have to test that we don't drop below the minimum
7701 	 * feasible timer (which is 2 ticks).
7702 	 */
7703 	tp->t_rxtshift = 0;
7704 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7705 		      max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
7706 	rack_log_rtt_sample(rack, rtt);
7707 	tp->t_softerror = 0;
7708 }
7709 
7710 
7711 static void
7712 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
7713 {
7714 	/*
7715 	 * Apply to filter the inbound us-rtt at us_cts.
7716 	 */
7717 	uint32_t old_rtt;
7718 
7719 	old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
7720 	apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
7721 			       us_rtt, us_cts);
7722 	if (rack->r_ctl.last_pacing_time &&
7723 	    rack->rc_gp_dyn_mul &&
7724 	    (rack->r_ctl.last_pacing_time > us_rtt))
7725 		rack->pacing_longer_than_rtt = 1;
7726 	else
7727 		rack->pacing_longer_than_rtt = 0;
7728 	if (old_rtt > us_rtt) {
7729 		/* We just hit a new lower rtt time */
7730 		rack_log_rtt_shrinks(rack,  us_cts,  old_rtt,
7731 				     __LINE__, RACK_RTTS_NEWRTT);
7732 		/*
7733 		 * Only count it if its lower than what we saw within our
7734 		 * calculated range.
7735 		 */
7736 		if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
7737 			if (rack_probertt_lower_within &&
7738 			    rack->rc_gp_dyn_mul &&
7739 			    (rack->use_fixed_rate == 0) &&
7740 			    (rack->rc_always_pace)) {
7741 				/*
7742 				 * We are seeing a new lower rtt very close
7743 				 * to the time that we would have entered probe-rtt.
7744 				 * This is probably due to the fact that a peer flow
7745 				 * has entered probe-rtt. Lets go in now too.
7746 				 */
7747 				uint32_t val;
7748 
7749 				val = rack_probertt_lower_within * rack_time_between_probertt;
7750 				val /= 100;
7751 				if ((rack->in_probe_rtt == 0)  &&
7752 				    ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val)))	{
7753 					rack_enter_probertt(rack, us_cts);
7754 				}
7755 			}
7756 			rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
7757 		}
7758 	}
7759 }
7760 
7761 static int
7762 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
7763     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
7764 {
7765 	int32_t i, all;
7766 	uint32_t t, len_acked;
7767 
7768 	if ((rsm->r_flags & RACK_ACKED) ||
7769 	    (rsm->r_flags & RACK_WAS_ACKED))
7770 		/* Already done */
7771 		return (0);
7772 	if (rsm->r_no_rtt_allowed) {
7773 		/* Not allowed */
7774 		return (0);
7775 	}
7776 	if (ack_type == CUM_ACKED) {
7777 		if (SEQ_GT(th_ack, rsm->r_end)) {
7778 			len_acked = rsm->r_end - rsm->r_start;
7779 			all = 1;
7780 		} else {
7781 			len_acked = th_ack - rsm->r_start;
7782 			all = 0;
7783 		}
7784 	} else {
7785 		len_acked = rsm->r_end - rsm->r_start;
7786 		all = 0;
7787 	}
7788 	if (rsm->r_rtr_cnt == 1) {
7789 		uint32_t us_rtt;
7790 
7791 		t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7792 		if ((int)t <= 0)
7793 			t = 1;
7794 		if (!tp->t_rttlow || tp->t_rttlow > t)
7795 			tp->t_rttlow = t;
7796 		if (!rack->r_ctl.rc_rack_min_rtt ||
7797 		    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7798 			rack->r_ctl.rc_rack_min_rtt = t;
7799 			if (rack->r_ctl.rc_rack_min_rtt == 0) {
7800 				rack->r_ctl.rc_rack_min_rtt = 1;
7801 			}
7802 		}
7803 		if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
7804 			us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7805 		else
7806 			us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7807 		if (us_rtt == 0)
7808 			us_rtt = 1;
7809 		rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
7810 		if (ack_type == SACKED) {
7811 			rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
7812 			tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
7813 		} else {
7814 			/*
7815 			 * We need to setup what our confidence
7816 			 * is in this ack.
7817 			 *
7818 			 * If the rsm was app limited and it is
7819 			 * less than a mss in length (the end
7820 			 * of the send) then we have a gap. If we
7821 			 * were app limited but say we were sending
7822 			 * multiple MSS's then we are more confident
7823 			 * int it.
7824 			 *
7825 			 * When we are not app-limited then we see if
7826 			 * the rsm is being included in the current
7827 			 * measurement, we tell this by the app_limited_needs_set
7828 			 * flag.
7829 			 *
7830 			 * Note that being cwnd blocked is not applimited
7831 			 * as well as the pacing delay between packets which
7832 			 * are sending only 1 or 2 MSS's also will show up
7833 			 * in the RTT. We probably need to examine this algorithm
7834 			 * a bit more and enhance it to account for the delay
7835 			 * between rsm's. We could do that by saving off the
7836 			 * pacing delay of each rsm (in an rsm) and then
7837 			 * factoring that in somehow though for now I am
7838 			 * not sure how :)
7839 			 */
7840 			int calc_conf = 0;
7841 
7842 			if (rsm->r_flags & RACK_APP_LIMITED) {
7843 				if (all && (len_acked <= ctf_fixed_maxseg(tp)))
7844 					calc_conf = 0;
7845 				else
7846 					calc_conf = 1;
7847 			} else if (rack->app_limited_needs_set == 0) {
7848 				calc_conf = 1;
7849 			} else {
7850 				calc_conf = 0;
7851 			}
7852 			rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
7853 			tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
7854 					    calc_conf, rsm, rsm->r_rtr_cnt);
7855 		}
7856 		if ((rsm->r_flags & RACK_TLP) &&
7857 		    (!IN_FASTRECOVERY(tp->t_flags))) {
7858 			/* Segment was a TLP and our retrans matched */
7859 			if (rack->r_ctl.rc_tlp_cwnd_reduce) {
7860 				rack->r_ctl.rc_rsm_start = tp->snd_max;
7861 				rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7862 				rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7863 				rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
7864 			}
7865 		}
7866 		if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7867 			/* New more recent rack_tmit_time */
7868 			rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7869 			rack->rc_rack_rtt = t;
7870 		}
7871 		return (1);
7872 	}
7873 	/*
7874 	 * We clear the soft/rxtshift since we got an ack.
7875 	 * There is no assurance we will call the commit() function
7876 	 * so we need to clear these to avoid incorrect handling.
7877 	 */
7878 	tp->t_rxtshift = 0;
7879 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7880 		      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
7881 	tp->t_softerror = 0;
7882 	if (to && (to->to_flags & TOF_TS) &&
7883 	    (ack_type == CUM_ACKED) &&
7884 	    (to->to_tsecr) &&
7885 	    ((rsm->r_flags & RACK_OVERMAX) == 0)) {
7886 		/*
7887 		 * Now which timestamp does it match? In this block the ACK
7888 		 * must be coming from a previous transmission.
7889 		 */
7890 		for (i = 0; i < rsm->r_rtr_cnt; i++) {
7891 			if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
7892 				t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7893 				if ((int)t <= 0)
7894 					t = 1;
7895 				if ((i + 1) < rsm->r_rtr_cnt) {
7896 					/*
7897 					 * The peer ack'd from our previous
7898 					 * transmission. We have a spurious
7899 					 * retransmission and thus we dont
7900 					 * want to update our rack_rtt.
7901 					 */
7902 					return (0);
7903 				}
7904 				if (!tp->t_rttlow || tp->t_rttlow > t)
7905 					tp->t_rttlow = t;
7906 				if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7907 					rack->r_ctl.rc_rack_min_rtt = t;
7908 					if (rack->r_ctl.rc_rack_min_rtt == 0) {
7909 						rack->r_ctl.rc_rack_min_rtt = 1;
7910 					}
7911 				}
7912 				if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
7913 					   (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7914 					/* New more recent rack_tmit_time */
7915 					rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7916 					rack->rc_rack_rtt = t;
7917 				}
7918 				rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
7919 				tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
7920 						    rsm->r_rtr_cnt);
7921 				return (1);
7922 			}
7923 		}
7924 		goto ts_not_found;
7925 	} else {
7926 		/*
7927 		 * Ok its a SACK block that we retransmitted. or a windows
7928 		 * machine without timestamps. We can tell nothing from the
7929 		 * time-stamp since its not there or the time the peer last
7930 		 * recieved a segment that moved forward its cum-ack point.
7931 		 */
7932 ts_not_found:
7933 		i = rsm->r_rtr_cnt - 1;
7934 		t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7935 		if ((int)t <= 0)
7936 			t = 1;
7937 		if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7938 			/*
7939 			 * We retransmitted and the ack came back in less
7940 			 * than the smallest rtt we have observed. We most
7941 			 * likely did an improper retransmit as outlined in
7942 			 * 6.2 Step 2 point 2 in the rack-draft so we
7943 			 * don't want to update our rack_rtt. We in
7944 			 * theory (in future) might want to think about reverting our
7945 			 * cwnd state but we won't for now.
7946 			 */
7947 			return (0);
7948 		} else if (rack->r_ctl.rc_rack_min_rtt) {
7949 			/*
7950 			 * We retransmitted it and the retransmit did the
7951 			 * job.
7952 			 */
7953 			if (!rack->r_ctl.rc_rack_min_rtt ||
7954 			    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7955 				rack->r_ctl.rc_rack_min_rtt = t;
7956 				if (rack->r_ctl.rc_rack_min_rtt == 0) {
7957 					rack->r_ctl.rc_rack_min_rtt = 1;
7958 				}
7959 			}
7960 			if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) {
7961 				/* New more recent rack_tmit_time */
7962 				rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
7963 				rack->rc_rack_rtt = t;
7964 			}
7965 			return (1);
7966 		}
7967 	}
7968 	return (0);
7969 }
7970 
7971 /*
7972  * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
7973  */
7974 static void
7975 rack_log_sack_passed(struct tcpcb *tp,
7976     struct tcp_rack *rack, struct rack_sendmap *rsm)
7977 {
7978 	struct rack_sendmap *nrsm;
7979 
7980 	nrsm = rsm;
7981 	TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
7982 	    rack_head, r_tnext) {
7983 		if (nrsm == rsm) {
7984 			/* Skip orginal segment he is acked */
7985 			continue;
7986 		}
7987 		if (nrsm->r_flags & RACK_ACKED) {
7988 			/*
7989 			 * Skip ack'd segments, though we
7990 			 * should not see these, since tmap
7991 			 * should not have ack'd segments.
7992 			 */
7993 			continue;
7994 		}
7995 		if (nrsm->r_flags & RACK_SACK_PASSED) {
7996 			/*
7997 			 * We found one that is already marked
7998 			 * passed, we have been here before and
7999 			 * so all others below this are marked.
8000 			 */
8001 			break;
8002 		}
8003 		nrsm->r_flags |= RACK_SACK_PASSED;
8004 		nrsm->r_flags &= ~RACK_WAS_SACKPASS;
8005 	}
8006 }
8007 
8008 static void
8009 rack_need_set_test(struct tcpcb *tp,
8010 		   struct tcp_rack *rack,
8011 		   struct rack_sendmap *rsm,
8012 		   tcp_seq th_ack,
8013 		   int line,
8014 		   int use_which)
8015 {
8016 
8017 	if ((tp->t_flags & TF_GPUTINPROG) &&
8018 	    SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8019 		/*
8020 		 * We were app limited, and this ack
8021 		 * butts up or goes beyond the point where we want
8022 		 * to start our next measurement. We need
8023 		 * to record the new gput_ts as here and
8024 		 * possibly update the start sequence.
8025 		 */
8026 		uint32_t seq, ts;
8027 
8028 		if (rsm->r_rtr_cnt > 1) {
8029 			/*
8030 			 * This is a retransmit, can we
8031 			 * really make any assessment at this
8032 			 * point?  We are not really sure of
8033 			 * the timestamp, is it this or the
8034 			 * previous transmission?
8035 			 *
8036 			 * Lets wait for something better that
8037 			 * is not retransmitted.
8038 			 */
8039 			return;
8040 		}
8041 		seq = tp->gput_seq;
8042 		ts = tp->gput_ts;
8043 		rack->app_limited_needs_set = 0;
8044 		tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
8045 		/* Do we start at a new end? */
8046 		if ((use_which == RACK_USE_BEG) &&
8047 		    SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
8048 			/*
8049 			 * When we get an ACK that just eats
8050 			 * up some of the rsm, we set RACK_USE_BEG
8051 			 * since whats at r_start (i.e. th_ack)
8052 			 * is left unacked and thats where the
8053 			 * measurement not starts.
8054 			 */
8055 			tp->gput_seq = rsm->r_start;
8056 			rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8057 		}
8058 		if ((use_which == RACK_USE_END) &&
8059 		    SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8060 			    /*
8061 			     * We use the end when the cumack
8062 			     * is moving forward and completely
8063 			     * deleting the rsm passed so basically
8064 			     * r_end holds th_ack.
8065 			     *
8066 			     * For SACK's we also want to use the end
8067 			     * since this piece just got sacked and
8068 			     * we want to target anything after that
8069 			     * in our measurement.
8070 			     */
8071 			    tp->gput_seq = rsm->r_end;
8072 			    rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8073 		}
8074 		if (use_which == RACK_USE_END_OR_THACK) {
8075 			/*
8076 			 * special case for ack moving forward,
8077 			 * not a sack, we need to move all the
8078 			 * way up to where this ack cum-ack moves
8079 			 * to.
8080 			 */
8081 			if (SEQ_GT(th_ack, rsm->r_end))
8082 				tp->gput_seq = th_ack;
8083 			else
8084 				tp->gput_seq = rsm->r_end;
8085 			rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8086 		}
8087 		if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
8088 			/*
8089 			 * We moved beyond this guy's range, re-calculate
8090 			 * the new end point.
8091 			 */
8092 			if (rack->rc_gp_filled == 0) {
8093 				tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
8094 			} else {
8095 				tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
8096 			}
8097 		}
8098 		/*
8099 		 * We are moving the goal post, we may be able to clear the
8100 		 * measure_saw_probe_rtt flag.
8101 		 */
8102 		if ((rack->in_probe_rtt == 0) &&
8103 		    (rack->measure_saw_probe_rtt) &&
8104 		    (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
8105 			rack->measure_saw_probe_rtt = 0;
8106 		rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
8107 					   seq, tp->gput_seq, 0, 5, line, NULL);
8108 		if (rack->rc_gp_filled &&
8109 		    ((tp->gput_ack - tp->gput_seq) <
8110 		     max(rc_init_window(rack), (MIN_GP_WIN *
8111 						ctf_fixed_maxseg(tp))))) {
8112 			uint32_t ideal_amount;
8113 
8114 			ideal_amount = rack_get_measure_window(tp, rack);
8115 			if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
8116 				/*
8117 				 * There is no sense of continuing this measurement
8118 				 * because its too small to gain us anything we
8119 				 * trust. Skip it and that way we can start a new
8120 				 * measurement quicker.
8121 				 */
8122 				tp->t_flags &= ~TF_GPUTINPROG;
8123 				rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
8124 							   0, 0, 0, 6, __LINE__, NULL);
8125 			} else {
8126 				/*
8127 				 * Reset the window further out.
8128 				 */
8129 				tp->gput_ack = tp->gput_seq + ideal_amount;
8130 			}
8131 		}
8132 	}
8133 }
8134 
8135 static uint32_t
8136 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
8137 		   struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
8138 {
8139 	uint32_t start, end, changed = 0;
8140 	struct rack_sendmap stack_map;
8141 	struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
8142 	int32_t used_ref = 1;
8143 	int moved = 0;
8144 
8145 	start = sack->start;
8146 	end = sack->end;
8147 	rsm = *prsm;
8148 	memset(&fe, 0, sizeof(fe));
8149 do_rest_ofb:
8150 	if ((rsm == NULL) ||
8151 	    (SEQ_LT(end, rsm->r_start)) ||
8152 	    (SEQ_GEQ(start, rsm->r_end)) ||
8153 	    (SEQ_LT(start, rsm->r_start))) {
8154 		/*
8155 		 * We are not in the right spot,
8156 		 * find the correct spot in the tree.
8157 		 */
8158 		used_ref = 0;
8159 		fe.r_start = start;
8160 		rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8161 		moved++;
8162 	}
8163 	if (rsm == NULL) {
8164 		/* TSNH */
8165 		goto out;
8166 	}
8167 	/* Ok we have an ACK for some piece of this rsm */
8168 	if (rsm->r_start != start) {
8169 		if ((rsm->r_flags & RACK_ACKED) == 0) {
8170 			/**
8171 			 * Need to split this in two pieces the before and after,
8172 			 * the before remains in the map, the after must be
8173 			 * added. In other words we have:
8174 			 * rsm        |--------------|
8175 			 * sackblk        |------->
8176 			 * rsm will become
8177 			 *     rsm    |---|
8178 			 * and nrsm will be  the sacked piece
8179 			 *     nrsm       |----------|
8180 			 *
8181 			 * But before we start down that path lets
8182 			 * see if the sack spans over on top of
8183 			 * the next guy and it is already sacked.
8184 			 */
8185 			next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8186 			if (next && (next->r_flags & RACK_ACKED) &&
8187 			    SEQ_GEQ(end, next->r_start)) {
8188 				/**
8189 				 * So the next one is already acked, and
8190 				 * we can thus by hookery use our stack_map
8191 				 * to reflect the piece being sacked and
8192 				 * then adjust the two tree entries moving
8193 				 * the start and ends around. So we start like:
8194 				 *  rsm     |------------|             (not-acked)
8195 				 *  next                 |-----------| (acked)
8196 				 *  sackblk        |-------->
8197 				 *  We want to end like so:
8198 				 *  rsm     |------|                   (not-acked)
8199 				 *  next           |-----------------| (acked)
8200 				 *  nrsm           |-----|
8201 				 * Where nrsm is a temporary stack piece we
8202 				 * use to update all the gizmos.
8203 				 */
8204 				/* Copy up our fudge block */
8205 				nrsm = &stack_map;
8206 				memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8207 				/* Now adjust our tree blocks */
8208 				rsm->r_end = start;
8209 				next->r_start = start;
8210 				/* Now we must adjust back where next->m is */
8211 				rack_setup_offset_for_rsm(rsm, next);
8212 
8213 				/* We don't need to adjust rsm, it did not change */
8214 				/* Clear out the dup ack count of the remainder */
8215 				rsm->r_dupack = 0;
8216 				rsm->r_just_ret = 0;
8217 				rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8218 				/* Now lets make sure our fudge block is right */
8219 				nrsm->r_start = start;
8220 				/* Now lets update all the stats and such */
8221 				rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8222 				if (rack->app_limited_needs_set)
8223 					rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8224 				changed += (nrsm->r_end - nrsm->r_start);
8225 				rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8226 				if (nrsm->r_flags & RACK_SACK_PASSED) {
8227 					counter_u64_add(rack_reorder_seen, 1);
8228 					rack->r_ctl.rc_reorder_ts = cts;
8229 				}
8230 				/*
8231 				 * Now we want to go up from rsm (the
8232 				 * one left un-acked) to the next one
8233 				 * in the tmap. We do this so when
8234 				 * we walk backwards we include marking
8235 				 * sack-passed on rsm (The one passed in
8236 				 * is skipped since it is generally called
8237 				 * on something sacked before removing it
8238 				 * from the tmap).
8239 				 */
8240 				if (rsm->r_in_tmap) {
8241 					nrsm = TAILQ_NEXT(rsm, r_tnext);
8242 					/*
8243 					 * Now that we have the next
8244 					 * one walk backwards from there.
8245 					 */
8246 					if (nrsm && nrsm->r_in_tmap)
8247 						rack_log_sack_passed(tp, rack, nrsm);
8248 				}
8249 				/* Now are we done? */
8250 				if (SEQ_LT(end, next->r_end) ||
8251 				    (end == next->r_end)) {
8252 					/* Done with block */
8253 					goto out;
8254 				}
8255 				rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
8256 				counter_u64_add(rack_sack_used_next_merge, 1);
8257 				/* Postion for the next block */
8258 				start = next->r_end;
8259 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
8260 				if (rsm == NULL)
8261 					goto out;
8262 			} else {
8263 				/**
8264 				 * We can't use any hookery here, so we
8265 				 * need to split the map. We enter like
8266 				 * so:
8267 				 *  rsm      |--------|
8268 				 *  sackblk       |----->
8269 				 * We will add the new block nrsm and
8270 				 * that will be the new portion, and then
8271 				 * fall through after reseting rsm. So we
8272 				 * split and look like this:
8273 				 *  rsm      |----|
8274 				 *  sackblk       |----->
8275 				 *  nrsm          |---|
8276 				 * We then fall through reseting
8277 				 * rsm to nrsm, so the next block
8278 				 * picks it up.
8279 				 */
8280 				nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8281 				if (nrsm == NULL) {
8282 					/*
8283 					 * failed XXXrrs what can we do but loose the sack
8284 					 * info?
8285 					 */
8286 					goto out;
8287 				}
8288 				counter_u64_add(rack_sack_splits, 1);
8289 				rack_clone_rsm(rack, nrsm, rsm, start);
8290 				rsm->r_just_ret = 0;
8291 				insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8292 #ifdef INVARIANTS
8293 				if (insret != NULL) {
8294 					panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8295 					      nrsm, insret, rack, rsm);
8296 				}
8297 #endif
8298 				if (rsm->r_in_tmap) {
8299 					TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8300 					nrsm->r_in_tmap = 1;
8301 				}
8302 				rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
8303 				rsm->r_flags &= (~RACK_HAS_FIN);
8304 				/* Position us to point to the new nrsm that starts the sack blk */
8305 				rsm = nrsm;
8306 			}
8307 		} else {
8308 			/* Already sacked this piece */
8309 			counter_u64_add(rack_sack_skipped_acked, 1);
8310 			moved++;
8311 			if (end == rsm->r_end) {
8312 				/* Done with block */
8313 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8314 				goto out;
8315 			} else if (SEQ_LT(end, rsm->r_end)) {
8316 				/* A partial sack to a already sacked block */
8317 				moved++;
8318 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8319 				goto out;
8320 			} else {
8321 				/*
8322 				 * The end goes beyond this guy
8323 				 * repostion the start to the
8324 				 * next block.
8325 				 */
8326 				start = rsm->r_end;
8327 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8328 				if (rsm == NULL)
8329 					goto out;
8330 			}
8331 		}
8332 	}
8333 	if (SEQ_GEQ(end, rsm->r_end)) {
8334 		/**
8335 		 * The end of this block is either beyond this guy or right
8336 		 * at this guy. I.e.:
8337 		 *  rsm ---                 |-----|
8338 		 *  end                     |-----|
8339 		 *  <or>
8340 		 *  end                     |---------|
8341 		 */
8342 		if ((rsm->r_flags & RACK_ACKED) == 0) {
8343 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8344 			changed += (rsm->r_end - rsm->r_start);
8345 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8346 			if (rsm->r_in_tmap) /* should be true */
8347 				rack_log_sack_passed(tp, rack, rsm);
8348 			/* Is Reordering occuring? */
8349 			if (rsm->r_flags & RACK_SACK_PASSED) {
8350 				rsm->r_flags &= ~RACK_SACK_PASSED;
8351 				counter_u64_add(rack_reorder_seen, 1);
8352 				rack->r_ctl.rc_reorder_ts = cts;
8353 			}
8354 			if (rack->app_limited_needs_set)
8355 				rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8356 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8357 			rsm->r_flags |= RACK_ACKED;
8358 			rsm->r_flags &= ~RACK_TLP;
8359 			if (rsm->r_in_tmap) {
8360 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8361 				rsm->r_in_tmap = 0;
8362 			}
8363 			rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
8364 		} else {
8365 			counter_u64_add(rack_sack_skipped_acked, 1);
8366 			moved++;
8367 		}
8368 		if (end == rsm->r_end) {
8369 			/* This block only - done, setup for next */
8370 			goto out;
8371 		}
8372 		/*
8373 		 * There is more not coverend by this rsm move on
8374 		 * to the next block in the RB tree.
8375 		 */
8376 		nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8377 		start = rsm->r_end;
8378 		rsm = nrsm;
8379 		if (rsm == NULL)
8380 			goto out;
8381 		goto do_rest_ofb;
8382 	}
8383 	/**
8384 	 * The end of this sack block is smaller than
8385 	 * our rsm i.e.:
8386 	 *  rsm ---                 |-----|
8387 	 *  end                     |--|
8388 	 */
8389 	if ((rsm->r_flags & RACK_ACKED) == 0) {
8390 		prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8391 		if (prev && (prev->r_flags & RACK_ACKED)) {
8392 			/**
8393 			 * Goal, we want the right remainder of rsm to shrink
8394 			 * in place and span from (rsm->r_start = end) to rsm->r_end.
8395 			 * We want to expand prev to go all the way
8396 			 * to prev->r_end <- end.
8397 			 * so in the tree we have before:
8398 			 *   prev     |--------|         (acked)
8399 			 *   rsm               |-------| (non-acked)
8400 			 *   sackblk           |-|
8401 			 * We churn it so we end up with
8402 			 *   prev     |----------|       (acked)
8403 			 *   rsm                 |-----| (non-acked)
8404 			 *   nrsm              |-| (temporary)
8405 			 */
8406 			nrsm = &stack_map;
8407 			memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8408 			prev->r_end = end;
8409 			rsm->r_start = end;
8410 			/* Now adjust nrsm (stack copy) to be
8411 			 * the one that is the small
8412 			 * piece that was "sacked".
8413 			 */
8414 			nrsm->r_end = end;
8415 			rsm->r_dupack = 0;
8416 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8417 			/*
8418 			 * Now that the rsm has had its start moved forward
8419 			 * lets go ahead and get its new place in the world.
8420 			 */
8421 			rack_setup_offset_for_rsm(prev, rsm);
8422 			/*
8423 			 * Now nrsm is our new little piece
8424 			 * that is acked (which was merged
8425 			 * to prev). Update the rtt and changed
8426 			 * based on that. Also check for reordering.
8427 			 */
8428 			rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8429 			if (rack->app_limited_needs_set)
8430 				rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8431 			changed += (nrsm->r_end - nrsm->r_start);
8432 			rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8433 			if (nrsm->r_flags & RACK_SACK_PASSED) {
8434 				counter_u64_add(rack_reorder_seen, 1);
8435 				rack->r_ctl.rc_reorder_ts = cts;
8436 			}
8437 			rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
8438 			rsm = prev;
8439 			counter_u64_add(rack_sack_used_prev_merge, 1);
8440 		} else {
8441 			/**
8442 			 * This is the case where our previous
8443 			 * block is not acked either, so we must
8444 			 * split the block in two.
8445 			 */
8446 			nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8447 			if (nrsm == NULL) {
8448 				/* failed rrs what can we do but loose the sack info? */
8449 				goto out;
8450 			}
8451 			/**
8452 			 * In this case nrsm becomes
8453 			 * nrsm->r_start = end;
8454 			 * nrsm->r_end = rsm->r_end;
8455 			 * which is un-acked.
8456 			 * <and>
8457 			 * rsm->r_end = nrsm->r_start;
8458 			 * i.e. the remaining un-acked
8459 			 * piece is left on the left
8460 			 * hand side.
8461 			 *
8462 			 * So we start like this
8463 			 * rsm      |----------| (not acked)
8464 			 * sackblk  |---|
8465 			 * build it so we have
8466 			 * rsm      |---|         (acked)
8467 			 * nrsm         |------|  (not acked)
8468 			 */
8469 			counter_u64_add(rack_sack_splits, 1);
8470 			rack_clone_rsm(rack, nrsm, rsm, end);
8471 			rsm->r_flags &= (~RACK_HAS_FIN);
8472 			rsm->r_just_ret = 0;
8473 			insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8474 #ifdef INVARIANTS
8475 			if (insret != NULL) {
8476 				panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8477 				      nrsm, insret, rack, rsm);
8478 			}
8479 #endif
8480 			if (rsm->r_in_tmap) {
8481 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8482 				nrsm->r_in_tmap = 1;
8483 			}
8484 			nrsm->r_dupack = 0;
8485 			rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8486 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8487 			changed += (rsm->r_end - rsm->r_start);
8488 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8489 			if (rsm->r_in_tmap) /* should be true */
8490 				rack_log_sack_passed(tp, rack, rsm);
8491 			/* Is Reordering occuring? */
8492 			if (rsm->r_flags & RACK_SACK_PASSED) {
8493 				rsm->r_flags &= ~RACK_SACK_PASSED;
8494 				counter_u64_add(rack_reorder_seen, 1);
8495 				rack->r_ctl.rc_reorder_ts = cts;
8496 			}
8497 			if (rack->app_limited_needs_set)
8498 				rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8499 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8500 			rsm->r_flags |= RACK_ACKED;
8501 			rsm->r_flags &= ~RACK_TLP;
8502 			rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
8503 			if (rsm->r_in_tmap) {
8504 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8505 				rsm->r_in_tmap = 0;
8506 			}
8507 		}
8508 	} else if (start != end){
8509 		/*
8510 		 * The block was already acked.
8511 		 */
8512 		counter_u64_add(rack_sack_skipped_acked, 1);
8513 		moved++;
8514 	}
8515 out:
8516 	if (rsm && (rsm->r_flags & RACK_ACKED)) {
8517 		/*
8518 		 * Now can we merge where we worked
8519 		 * with either the previous or
8520 		 * next block?
8521 		 */
8522 		next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8523 		while (next) {
8524 		    if (next->r_flags & RACK_ACKED) {
8525 			/* yep this and next can be merged */
8526 			rsm = rack_merge_rsm(rack, rsm, next);
8527 			next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8528 		    } else
8529 			    break;
8530 		}
8531 		/* Now what about the previous? */
8532 		prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8533 		while (prev) {
8534 		    if (prev->r_flags & RACK_ACKED) {
8535 			/* yep the previous and this can be merged */
8536 			rsm = rack_merge_rsm(rack, prev, rsm);
8537 			prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8538 		    } else
8539 			    break;
8540 		}
8541 	}
8542 	if (used_ref == 0) {
8543 		counter_u64_add(rack_sack_proc_all, 1);
8544 	} else {
8545 		counter_u64_add(rack_sack_proc_short, 1);
8546 	}
8547 	/* Save off the next one for quick reference. */
8548 	if (rsm)
8549 		nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8550 	else
8551 		nrsm = NULL;
8552 	*prsm = rack->r_ctl.rc_sacklast = nrsm;
8553 	/* Pass back the moved. */
8554 	*moved_two = moved;
8555 	return (changed);
8556 }
8557 
8558 static void inline
8559 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
8560 {
8561 	struct rack_sendmap *tmap;
8562 
8563 	tmap = NULL;
8564 	while (rsm && (rsm->r_flags & RACK_ACKED)) {
8565 		/* Its no longer sacked, mark it so */
8566 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8567 #ifdef INVARIANTS
8568 		if (rsm->r_in_tmap) {
8569 			panic("rack:%p rsm:%p flags:0x%x in tmap?",
8570 			      rack, rsm, rsm->r_flags);
8571 		}
8572 #endif
8573 		rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
8574 		/* Rebuild it into our tmap */
8575 		if (tmap == NULL) {
8576 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8577 			tmap = rsm;
8578 		} else {
8579 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
8580 			tmap = rsm;
8581 		}
8582 		tmap->r_in_tmap = 1;
8583 		rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8584 	}
8585 	/*
8586 	 * Now lets possibly clear the sack filter so we start
8587 	 * recognizing sacks that cover this area.
8588 	 */
8589 	sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
8590 
8591 }
8592 
8593 static void
8594 rack_do_decay(struct tcp_rack *rack)
8595 {
8596 	struct timeval res;
8597 
8598 #define	timersub(tvp, uvp, vvp)						\
8599 	do {								\
8600 		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
8601 		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
8602 		if ((vvp)->tv_usec < 0) {				\
8603 			(vvp)->tv_sec--;				\
8604 			(vvp)->tv_usec += 1000000;			\
8605 		}							\
8606 	} while (0)
8607 
8608 	timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
8609 #undef timersub
8610 
8611 	rack->r_ctl.input_pkt++;
8612 	if ((rack->rc_in_persist) ||
8613 	    (res.tv_sec >= 1) ||
8614 	    (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
8615 		/*
8616 		 * Check for decay of non-SAD,
8617 		 * we want all SAD detection metrics to
8618 		 * decay 1/4 per second (or more) passed.
8619 		 */
8620 		uint32_t pkt_delta;
8621 
8622 		pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
8623 		/* Update our saved tracking values */
8624 		rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
8625 		rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
8626 		/* Now do we escape without decay? */
8627 #ifdef NETFLIX_EXP_DETECTION
8628 		if (rack->rc_in_persist ||
8629 		    (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
8630 		    (pkt_delta < tcp_sad_low_pps)){
8631 			/*
8632 			 * We don't decay idle connections
8633 			 * or ones that have a low input pps.
8634 			 */
8635 			return;
8636 		}
8637 		/* Decay the counters */
8638 		rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
8639 							tcp_sad_decay_val);
8640 		rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
8641 							 tcp_sad_decay_val);
8642 		rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
8643 							       tcp_sad_decay_val);
8644 		rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
8645 								tcp_sad_decay_val);
8646 #endif
8647 	}
8648 }
8649 
8650 static void
8651 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to)
8652 {
8653 	struct rack_sendmap *rsm, *rm;
8654 
8655 	/*
8656 	 * The ACK point is advancing to th_ack, we must drop off
8657 	 * the packets in the rack log and calculate any eligble
8658 	 * RTT's.
8659 	 */
8660 	rack->r_wanted_output = 1;
8661 more:
8662 	rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8663 	if (rsm == NULL) {
8664 		if ((th_ack - 1) == tp->iss) {
8665 			/*
8666 			 * For the SYN incoming case we will not
8667 			 * have called tcp_output for the sending of
8668 			 * the SYN, so there will be no map. All
8669 			 * other cases should probably be a panic.
8670 			 */
8671 			return;
8672 		}
8673 		if (tp->t_flags & TF_SENTFIN) {
8674 			/* if we sent a FIN we often will not have map */
8675 			return;
8676 		}
8677 #ifdef INVARIANTS
8678 		panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
8679 		      tp,
8680 		      tp->t_state, th_ack, rack,
8681 		      tp->snd_una, tp->snd_max, tp->snd_nxt);
8682 #endif
8683 		return;
8684 	}
8685 	if (SEQ_LT(th_ack, rsm->r_start)) {
8686 		/* Huh map is missing this */
8687 #ifdef INVARIANTS
8688 		printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
8689 		       rsm->r_start,
8690 		       th_ack, tp->t_state, rack->r_state);
8691 #endif
8692 		return;
8693 	}
8694 	rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
8695 	/* Now do we consume the whole thing? */
8696 	if (SEQ_GEQ(th_ack, rsm->r_end)) {
8697 		/* Its all consumed. */
8698 		uint32_t left;
8699 		uint8_t newly_acked;
8700 
8701 		rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
8702 		rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
8703 		rsm->r_rtr_bytes = 0;
8704 		/* Record the time of highest cumack sent */
8705 		rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8706 		rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8707 #ifdef INVARIANTS
8708 		if (rm != rsm) {
8709 			panic("removing head in rack:%p rsm:%p rm:%p",
8710 			      rack, rsm, rm);
8711 		}
8712 #endif
8713 		if (rsm->r_in_tmap) {
8714 			TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8715 			rsm->r_in_tmap = 0;
8716 		}
8717 		newly_acked = 1;
8718 		if (rsm->r_flags & RACK_ACKED) {
8719 			/*
8720 			 * It was acked on the scoreboard -- remove
8721 			 * it from total
8722 			 */
8723 			rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8724 			newly_acked = 0;
8725 		} else if (rsm->r_flags & RACK_SACK_PASSED) {
8726 			/*
8727 			 * There are segments ACKED on the
8728 			 * scoreboard further up. We are seeing
8729 			 * reordering.
8730 			 */
8731 			rsm->r_flags &= ~RACK_SACK_PASSED;
8732 			counter_u64_add(rack_reorder_seen, 1);
8733 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8734 			rsm->r_flags |= RACK_ACKED;
8735 			rack->r_ctl.rc_reorder_ts = cts;
8736 			if (rack->r_ent_rec_ns) {
8737 				/*
8738 				 * We have sent no more, and we saw an sack
8739 				 * then ack arrive.
8740 				 */
8741 				rack->r_might_revert = 1;
8742 			}
8743 		}
8744 		if ((rsm->r_flags & RACK_TO_REXT) &&
8745 		    (tp->t_flags & TF_RCVD_TSTMP) &&
8746 		    (to->to_flags & TOF_TS) &&
8747 		    (tp->t_flags & TF_PREVVALID)) {
8748 			/*
8749 			 * We can use the timestamp to see
8750 			 * if this retransmission was from the
8751 			 * first transmit. If so we made a mistake.
8752 			 */
8753 			tp->t_flags &= ~TF_PREVVALID;
8754 			if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
8755 				/* The first transmit is what this ack is for */
8756 				rack_cong_signal(tp, CC_RTO_ERR, th_ack);
8757 			}
8758 		}
8759 		left = th_ack - rsm->r_end;
8760 		if (rack->app_limited_needs_set && newly_acked)
8761 			rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
8762 		/* Free back to zone */
8763 		rack_free(rack, rsm);
8764 		if (left) {
8765 			goto more;
8766 		}
8767 		/* Check for reneging */
8768 		rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8769 		if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
8770 			/*
8771 			 * The peer has moved snd_una up to
8772 			 * the edge of this send, i.e. one
8773 			 * that it had previously acked. The only
8774 			 * way that can be true if the peer threw
8775 			 * away data (space issues) that it had
8776 			 * previously sacked (else it would have
8777 			 * given us snd_una up to (rsm->r_end).
8778 			 * We need to undo the acked markings here.
8779 			 *
8780 			 * Note we have to look to make sure th_ack is
8781 			 * our rsm->r_start in case we get an old ack
8782 			 * where th_ack is behind snd_una.
8783 			 */
8784 			rack_peer_reneges(rack, rsm, th_ack);
8785 		}
8786 		return;
8787 	}
8788 	if (rsm->r_flags & RACK_ACKED) {
8789 		/*
8790 		 * It was acked on the scoreboard -- remove it from
8791 		 * total for the part being cum-acked.
8792 		 */
8793 		rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
8794 	}
8795 	/*
8796 	 * Clear the dup ack count for
8797 	 * the piece that remains.
8798 	 */
8799 	rsm->r_dupack = 0;
8800 	rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8801 	if (rsm->r_rtr_bytes) {
8802 		/*
8803 		 * It was retransmitted adjust the
8804 		 * sack holes for what was acked.
8805 		 */
8806 		int ack_am;
8807 
8808 		ack_am = (th_ack - rsm->r_start);
8809 		if (ack_am >= rsm->r_rtr_bytes) {
8810 			rack->r_ctl.rc_holes_rxt -= ack_am;
8811 			rsm->r_rtr_bytes -= ack_am;
8812 		}
8813 	}
8814 	/*
8815 	 * Update where the piece starts and record
8816 	 * the time of send of highest cumack sent.
8817 	 */
8818 	rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8819 	rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
8820 	/* Now we need to move our offset forward too */
8821 	if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) {
8822 		/* Fix up the orig_m_len and possibly the mbuf offset */
8823 		rack_adjust_orig_mlen(rsm);
8824 	}
8825 	rsm->soff += (th_ack - rsm->r_start);
8826 	rsm->r_start = th_ack;
8827 	/* Now do we need to move the mbuf fwd too? */
8828 	if (rsm->m) {
8829 		while (rsm->soff >= rsm->m->m_len) {
8830 			rsm->soff -= rsm->m->m_len;
8831 			rsm->m = rsm->m->m_next;
8832 			KASSERT((rsm->m != NULL),
8833 				(" nrsm:%p hit at soff:%u null m",
8834 				 rsm, rsm->soff));
8835 		}
8836 		rsm->orig_m_len = rsm->m->m_len;
8837 	}
8838 	if (rack->app_limited_needs_set)
8839 		rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
8840 }
8841 
8842 static void
8843 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
8844 {
8845 	struct rack_sendmap *rsm;
8846 	int sack_pass_fnd = 0;
8847 
8848 	if (rack->r_might_revert) {
8849 		/*
8850 		 * Ok we have reordering, have not sent anything, we
8851 		 * might want to revert the congestion state if nothing
8852 		 * further has SACK_PASSED on it. Lets check.
8853 		 *
8854 		 * We also get here when we have DSACKs come in for
8855 		 * all the data that we FR'd. Note that a rxt or tlp
8856 		 * timer clears this from happening.
8857 		 */
8858 
8859 		TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
8860 			if (rsm->r_flags & RACK_SACK_PASSED) {
8861 				sack_pass_fnd = 1;
8862 				break;
8863 			}
8864 		}
8865 		if (sack_pass_fnd == 0) {
8866 			/*
8867 			 * We went into recovery
8868 			 * incorrectly due to reordering!
8869 			 */
8870 			int orig_cwnd;
8871 
8872 			rack->r_ent_rec_ns = 0;
8873 			orig_cwnd = tp->snd_cwnd;
8874 			tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec;
8875 			tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
8876 			tp->snd_recover = tp->snd_una;
8877 			rack_log_to_prr(rack, 14, orig_cwnd);
8878 			EXIT_RECOVERY(tp->t_flags);
8879 		}
8880 		rack->r_might_revert = 0;
8881 	}
8882 }
8883 
8884 #ifdef NETFLIX_EXP_DETECTION
8885 static void
8886 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack,  uint32_t bytes_this_ack, uint32_t segsiz)
8887 {
8888 	if ((rack->do_detection || tcp_force_detection) &&
8889 	    tcp_sack_to_ack_thresh &&
8890 	    tcp_sack_to_move_thresh &&
8891 	    ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
8892 		/*
8893 		 * We have thresholds set to find
8894 		 * possible attackers and disable sack.
8895 		 * Check them.
8896 		 */
8897 		uint64_t ackratio, moveratio, movetotal;
8898 
8899 		/* Log detecting */
8900 		rack_log_sad(rack, 1);
8901 		ackratio = (uint64_t)(rack->r_ctl.sack_count);
8902 		ackratio *= (uint64_t)(1000);
8903 		if (rack->r_ctl.ack_count)
8904 			ackratio /= (uint64_t)(rack->r_ctl.ack_count);
8905 		else {
8906 			/* We really should not hit here */
8907 			ackratio = 1000;
8908 		}
8909 		if ((rack->sack_attack_disable == 0) &&
8910 		    (ackratio > rack_highest_sack_thresh_seen))
8911 			rack_highest_sack_thresh_seen = (uint32_t)ackratio;
8912 		movetotal = rack->r_ctl.sack_moved_extra;
8913 		movetotal += rack->r_ctl.sack_noextra_move;
8914 		moveratio = rack->r_ctl.sack_moved_extra;
8915 		moveratio *= (uint64_t)1000;
8916 		if (movetotal)
8917 			moveratio /= movetotal;
8918 		else {
8919 			/* No moves, thats pretty good */
8920 			moveratio = 0;
8921 		}
8922 		if ((rack->sack_attack_disable == 0) &&
8923 		    (moveratio > rack_highest_move_thresh_seen))
8924 			rack_highest_move_thresh_seen = (uint32_t)moveratio;
8925 		if (rack->sack_attack_disable == 0) {
8926 			if ((ackratio > tcp_sack_to_ack_thresh) &&
8927 			    (moveratio > tcp_sack_to_move_thresh)) {
8928 				/* Disable sack processing */
8929 				rack->sack_attack_disable = 1;
8930 				if (rack->r_rep_attack == 0) {
8931 					rack->r_rep_attack = 1;
8932 					counter_u64_add(rack_sack_attacks_detected, 1);
8933 				}
8934 				if (tcp_attack_on_turns_on_logging) {
8935 					/*
8936 					 * Turn on logging, used for debugging
8937 					 * false positives.
8938 					 */
8939 					rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
8940 				}
8941 				/* Clamp the cwnd at flight size */
8942 				rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
8943 				rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
8944 				rack_log_sad(rack, 2);
8945 			}
8946 		} else {
8947 			/* We are sack-disabled check for false positives */
8948 			if ((ackratio <= tcp_restoral_thresh) ||
8949 			    (rack->r_ctl.rc_num_maps_alloced  < tcp_map_minimum)) {
8950 				rack->sack_attack_disable = 0;
8951 				rack_log_sad(rack, 3);
8952 				/* Restart counting */
8953 				rack->r_ctl.sack_count = 0;
8954 				rack->r_ctl.sack_moved_extra = 0;
8955 				rack->r_ctl.sack_noextra_move = 1;
8956 				rack->r_ctl.ack_count = max(1,
8957 				      (bytes_this_ack / segsiz));
8958 
8959 				if (rack->r_rep_reverse == 0) {
8960 					rack->r_rep_reverse = 1;
8961 					counter_u64_add(rack_sack_attacks_reversed, 1);
8962 				}
8963 				/* Restore the cwnd */
8964 				if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
8965 					rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
8966 			}
8967 		}
8968 	}
8969 }
8970 #endif
8971 
8972 static void
8973 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
8974 {
8975 
8976 	uint32_t am;
8977 
8978 	if (SEQ_GT(end, start))
8979 		am = end - start;
8980 	else
8981 		am = 0;
8982 	/*
8983 	 * We keep track of how many DSACK blocks we get
8984 	 * after a recovery incident.
8985 	 */
8986 	rack->r_ctl.dsack_byte_cnt += am;
8987 	if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
8988 	    rack->r_ctl.retran_during_recovery &&
8989 	    (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
8990 		/*
8991 		 * False recovery most likely culprit is reordering. If
8992 		 * nothing else is missing we need to revert.
8993 		 */
8994 		rack->r_might_revert = 1;
8995 		rack_handle_might_revert(rack->rc_tp, rack);
8996 		rack->r_might_revert = 0;
8997 		rack->r_ctl.retran_during_recovery = 0;
8998 		rack->r_ctl.dsack_byte_cnt = 0;
8999 	}
9000 }
9001 
9002 static void
9003 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
9004 {
9005 	/* Deal with changed and PRR here (in recovery only) */
9006 	uint32_t pipe, snd_una;
9007 
9008 	rack->r_ctl.rc_prr_delivered += changed;
9009 
9010 	if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
9011 		/*
9012 		 * It is all outstanding, we are application limited
9013 		 * and thus we don't need more room to send anything.
9014 		 * Note we use tp->snd_una here and not th_ack because
9015 		 * the data as yet not been cut from the sb.
9016 		 */
9017 		rack->r_ctl.rc_prr_sndcnt = 0;
9018 		return;
9019 	}
9020 	/* Compute prr_sndcnt */
9021 	if (SEQ_GT(tp->snd_una, th_ack)) {
9022 		snd_una = tp->snd_una;
9023 	} else {
9024 		snd_una = th_ack;
9025 	}
9026 	pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
9027 	if (pipe > tp->snd_ssthresh) {
9028 		long sndcnt;
9029 
9030 		sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
9031 		if (rack->r_ctl.rc_prr_recovery_fs > 0)
9032 			sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
9033 		else {
9034 			rack->r_ctl.rc_prr_sndcnt = 0;
9035 			rack_log_to_prr(rack, 9, 0);
9036 			sndcnt = 0;
9037 		}
9038 		sndcnt++;
9039 		if (sndcnt > (long)rack->r_ctl.rc_prr_out)
9040 			sndcnt -= rack->r_ctl.rc_prr_out;
9041 		else
9042 			sndcnt = 0;
9043 		rack->r_ctl.rc_prr_sndcnt = sndcnt;
9044 		rack_log_to_prr(rack, 10, 0);
9045 	} else {
9046 		uint32_t limit;
9047 
9048 		if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
9049 			limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
9050 		else
9051 			limit = 0;
9052 		if (changed > limit)
9053 			limit = changed;
9054 		limit += ctf_fixed_maxseg(tp);
9055 		if (tp->snd_ssthresh > pipe) {
9056 			rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
9057 			rack_log_to_prr(rack, 11, 0);
9058 		} else {
9059 			rack->r_ctl.rc_prr_sndcnt = min(0, limit);
9060 			rack_log_to_prr(rack, 12, 0);
9061 		}
9062 	}
9063 }
9064 
9065 static void
9066 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck)
9067 {
9068 	uint32_t changed;
9069 	struct tcp_rack *rack;
9070 	struct rack_sendmap *rsm;
9071 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
9072 	register uint32_t th_ack;
9073 	int32_t i, j, k, num_sack_blks = 0;
9074 	uint32_t cts, acked, ack_point, sack_changed = 0;
9075 	int loop_start = 0, moved_two = 0;
9076 	uint32_t tsused;
9077 
9078 
9079 	INP_WLOCK_ASSERT(tp->t_inpcb);
9080 	if (th->th_flags & TH_RST) {
9081 		/* We don't log resets */
9082 		return;
9083 	}
9084 	rack = (struct tcp_rack *)tp->t_fb_ptr;
9085 	cts = tcp_get_usecs(NULL);
9086 	rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9087 	changed = 0;
9088 	th_ack = th->th_ack;
9089 	if (rack->sack_attack_disable == 0)
9090 		rack_do_decay(rack);
9091 	if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
9092 		/*
9093 		 * You only get credit for
9094 		 * MSS and greater (and you get extra
9095 		 * credit for larger cum-ack moves).
9096 		 */
9097 		int ac;
9098 
9099 		ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
9100 		rack->r_ctl.ack_count += ac;
9101 		counter_u64_add(rack_ack_total, ac);
9102 	}
9103 	if (rack->r_ctl.ack_count > 0xfff00000) {
9104 		/*
9105 		 * reduce the number to keep us under
9106 		 * a uint32_t.
9107 		 */
9108 		rack->r_ctl.ack_count /= 2;
9109 		rack->r_ctl.sack_count /= 2;
9110 	}
9111 	if (SEQ_GT(th_ack, tp->snd_una)) {
9112 		rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
9113 		tp->t_acktime = ticks;
9114 	}
9115 	if (rsm && SEQ_GT(th_ack, rsm->r_start))
9116 		changed = th_ack - rsm->r_start;
9117 	if (changed) {
9118 		rack_process_to_cumack(tp, rack, th_ack, cts, to);
9119 	}
9120 	if ((to->to_flags & TOF_SACK) == 0) {
9121 		/* We are done nothing left and no sack. */
9122 		rack_handle_might_revert(tp, rack);
9123 		/*
9124 		 * For cases where we struck a dup-ack
9125 		 * with no SACK, add to the changes so
9126 		 * PRR will work right.
9127 		 */
9128 		if (dup_ack_struck && (changed == 0)) {
9129 			changed += ctf_fixed_maxseg(rack->rc_tp);
9130 		}
9131 		goto out;
9132 	}
9133 	/* Sack block processing */
9134 	if (SEQ_GT(th_ack, tp->snd_una))
9135 		ack_point = th_ack;
9136 	else
9137 		ack_point = tp->snd_una;
9138 	for (i = 0; i < to->to_nsacks; i++) {
9139 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
9140 		      &sack, sizeof(sack));
9141 		sack.start = ntohl(sack.start);
9142 		sack.end = ntohl(sack.end);
9143 		if (SEQ_GT(sack.end, sack.start) &&
9144 		    SEQ_GT(sack.start, ack_point) &&
9145 		    SEQ_LT(sack.start, tp->snd_max) &&
9146 		    SEQ_GT(sack.end, ack_point) &&
9147 		    SEQ_LEQ(sack.end, tp->snd_max)) {
9148 			sack_blocks[num_sack_blks] = sack;
9149 			num_sack_blks++;
9150 #ifdef NETFLIX_STATS
9151 		} else if (SEQ_LEQ(sack.start, th_ack) &&
9152 			   SEQ_LEQ(sack.end, th_ack)) {
9153 			/*
9154 			 * Its a D-SACK block.
9155 			 */
9156 			tcp_record_dsack(sack.start, sack.end);
9157 #endif
9158 			rack_note_dsack(rack, sack.start, sack.end);
9159 		}
9160 	}
9161 	/*
9162 	 * Sort the SACK blocks so we can update the rack scoreboard with
9163 	 * just one pass.
9164 	 */
9165 	num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
9166 					 num_sack_blks, th->th_ack);
9167 	ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
9168 	if (num_sack_blks == 0) {
9169 		/* Nothing to sack (DSACKs?) */
9170 		goto out_with_totals;
9171 	}
9172 	if (num_sack_blks < 2) {
9173 		/* Only one, we don't need to sort */
9174 		goto do_sack_work;
9175 	}
9176 	/* Sort the sacks */
9177 	for (i = 0; i < num_sack_blks; i++) {
9178 		for (j = i + 1; j < num_sack_blks; j++) {
9179 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
9180 				sack = sack_blocks[i];
9181 				sack_blocks[i] = sack_blocks[j];
9182 				sack_blocks[j] = sack;
9183 			}
9184 		}
9185 	}
9186 	/*
9187 	 * Now are any of the sack block ends the same (yes some
9188 	 * implementations send these)?
9189 	 */
9190 again:
9191 	if (num_sack_blks == 0)
9192 		goto out_with_totals;
9193 	if (num_sack_blks > 1) {
9194 		for (i = 0; i < num_sack_blks; i++) {
9195 			for (j = i + 1; j < num_sack_blks; j++) {
9196 				if (sack_blocks[i].end == sack_blocks[j].end) {
9197 					/*
9198 					 * Ok these two have the same end we
9199 					 * want the smallest end and then
9200 					 * throw away the larger and start
9201 					 * again.
9202 					 */
9203 					if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
9204 						/*
9205 						 * The second block covers
9206 						 * more area use that
9207 						 */
9208 						sack_blocks[i].start = sack_blocks[j].start;
9209 					}
9210 					/*
9211 					 * Now collapse out the dup-sack and
9212 					 * lower the count
9213 					 */
9214 					for (k = (j + 1); k < num_sack_blks; k++) {
9215 						sack_blocks[j].start = sack_blocks[k].start;
9216 						sack_blocks[j].end = sack_blocks[k].end;
9217 						j++;
9218 					}
9219 					num_sack_blks--;
9220 					goto again;
9221 				}
9222 			}
9223 		}
9224 	}
9225 do_sack_work:
9226 	/*
9227 	 * First lets look to see if
9228 	 * we have retransmitted and
9229 	 * can use the transmit next?
9230 	 */
9231 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9232 	if (rsm &&
9233 	    SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
9234 	    SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
9235 		/*
9236 		 * We probably did the FR and the next
9237 		 * SACK in continues as we would expect.
9238 		 */
9239 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
9240 		if (acked) {
9241 			rack->r_wanted_output = 1;
9242 			changed += acked;
9243 			sack_changed += acked;
9244 		}
9245 		if (num_sack_blks == 1) {
9246 			/*
9247 			 * This is what we would expect from
9248 			 * a normal implementation to happen
9249 			 * after we have retransmitted the FR,
9250 			 * i.e the sack-filter pushes down
9251 			 * to 1 block and the next to be retransmitted
9252 			 * is the sequence in the sack block (has more
9253 			 * are acked). Count this as ACK'd data to boost
9254 			 * up the chances of recovering any false positives.
9255 			 */
9256 			rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
9257 			counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
9258 			counter_u64_add(rack_express_sack, 1);
9259 			if (rack->r_ctl.ack_count > 0xfff00000) {
9260 				/*
9261 				 * reduce the number to keep us under
9262 				 * a uint32_t.
9263 				 */
9264 				rack->r_ctl.ack_count /= 2;
9265 				rack->r_ctl.sack_count /= 2;
9266 			}
9267 			goto out_with_totals;
9268 		} else {
9269 			/*
9270 			 * Start the loop through the
9271 			 * rest of blocks, past the first block.
9272 			 */
9273 			moved_two = 0;
9274 			loop_start = 1;
9275 		}
9276 	}
9277 	/* Its a sack of some sort */
9278 	rack->r_ctl.sack_count++;
9279 	if (rack->r_ctl.sack_count > 0xfff00000) {
9280 		/*
9281 		 * reduce the number to keep us under
9282 		 * a uint32_t.
9283 		 */
9284 		rack->r_ctl.ack_count /= 2;
9285 		rack->r_ctl.sack_count /= 2;
9286 	}
9287 	counter_u64_add(rack_sack_total, 1);
9288 	if (rack->sack_attack_disable) {
9289 		/* An attacker disablement is in place */
9290 		if (num_sack_blks > 1) {
9291 			rack->r_ctl.sack_count += (num_sack_blks - 1);
9292 			rack->r_ctl.sack_moved_extra++;
9293 			counter_u64_add(rack_move_some, 1);
9294 			if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
9295 				rack->r_ctl.sack_moved_extra /= 2;
9296 				rack->r_ctl.sack_noextra_move /= 2;
9297 			}
9298 		}
9299 		goto out;
9300 	}
9301 	rsm = rack->r_ctl.rc_sacklast;
9302 	for (i = loop_start; i < num_sack_blks; i++) {
9303 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
9304 		if (acked) {
9305 			rack->r_wanted_output = 1;
9306 			changed += acked;
9307 			sack_changed += acked;
9308 		}
9309 		if (moved_two) {
9310 			/*
9311 			 * If we did not get a SACK for at least a MSS and
9312 			 * had to move at all, or if we moved more than our
9313 			 * threshold, it counts against the "extra" move.
9314 			 */
9315 			rack->r_ctl.sack_moved_extra += moved_two;
9316 			counter_u64_add(rack_move_some, 1);
9317 		} else {
9318 			/*
9319 			 * else we did not have to move
9320 			 * any more than we would expect.
9321 			 */
9322 			rack->r_ctl.sack_noextra_move++;
9323 			counter_u64_add(rack_move_none, 1);
9324 		}
9325 		if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
9326 			/*
9327 			 * If the SACK was not a full MSS then
9328 			 * we add to sack_count the number of
9329 			 * MSS's (or possibly more than
9330 			 * a MSS if its a TSO send) we had to skip by.
9331 			 */
9332 			rack->r_ctl.sack_count += moved_two;
9333 			counter_u64_add(rack_sack_total, moved_two);
9334 		}
9335 		/*
9336 		 * Now we need to setup for the next
9337 		 * round. First we make sure we won't
9338 		 * exceed the size of our uint32_t on
9339 		 * the various counts, and then clear out
9340 		 * moved_two.
9341 		 */
9342 		if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
9343 		    (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
9344 			rack->r_ctl.sack_moved_extra /= 2;
9345 			rack->r_ctl.sack_noextra_move /= 2;
9346 		}
9347 		if (rack->r_ctl.sack_count > 0xfff00000) {
9348 			rack->r_ctl.ack_count /= 2;
9349 			rack->r_ctl.sack_count /= 2;
9350 		}
9351 		moved_two = 0;
9352 	}
9353 out_with_totals:
9354 	if (num_sack_blks > 1) {
9355 		/*
9356 		 * You get an extra stroke if
9357 		 * you have more than one sack-blk, this
9358 		 * could be where we are skipping forward
9359 		 * and the sack-filter is still working, or
9360 		 * it could be an attacker constantly
9361 		 * moving us.
9362 		 */
9363 		rack->r_ctl.sack_moved_extra++;
9364 		counter_u64_add(rack_move_some, 1);
9365 	}
9366 out:
9367 #ifdef NETFLIX_EXP_DETECTION
9368 	rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
9369 #endif
9370 	if (changed) {
9371 		/* Something changed cancel the rack timer */
9372 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9373 	}
9374 	tsused = tcp_get_usecs(NULL);
9375 	rsm = tcp_rack_output(tp, rack, tsused);
9376 	if ((!IN_FASTRECOVERY(tp->t_flags)) &&
9377 	    rsm) {
9378 		/* Enter recovery */
9379 		rack->r_ctl.rc_rsm_start = rsm->r_start;
9380 		rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
9381 		rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
9382 		entered_recovery = 1;
9383 		rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
9384 		/*
9385 		 * When we enter recovery we need to assure we send
9386 		 * one packet.
9387 		 */
9388 		if (rack->rack_no_prr == 0) {
9389 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
9390 			rack_log_to_prr(rack, 8, 0);
9391 		}
9392 		rack->r_timer_override = 1;
9393 		rack->r_early = 0;
9394 		rack->r_ctl.rc_agg_early = 0;
9395 	} else if (IN_FASTRECOVERY(tp->t_flags) &&
9396 		   rsm &&
9397 		   (rack->r_rr_config == 3)) {
9398 		/*
9399 		 * Assure we can output and we get no
9400 		 * remembered pace time except the retransmit.
9401 		 */
9402 		rack->r_timer_override = 1;
9403 		rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
9404 		rack->r_ctl.rc_resend = rsm;
9405 	}
9406 	if (IN_FASTRECOVERY(tp->t_flags) &&
9407 	    (rack->rack_no_prr == 0) &&
9408 	    (entered_recovery == 0)) {
9409 		rack_update_prr(tp, rack, changed, th_ack);
9410 		if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
9411 		     ((rack->rc_inp->inp_in_hpts == 0) &&
9412 		      ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
9413 			/*
9414 			 * If you are pacing output you don't want
9415 			 * to override.
9416 			 */
9417 			rack->r_early = 0;
9418 			rack->r_ctl.rc_agg_early = 0;
9419 			rack->r_timer_override = 1;
9420 		}
9421 	}
9422 }
9423 
9424 static void
9425 rack_strike_dupack(struct tcp_rack *rack)
9426 {
9427 	struct rack_sendmap *rsm;
9428 
9429 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9430 	while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
9431 		rsm = TAILQ_NEXT(rsm, r_tnext);
9432 	}
9433 	if (rsm && (rsm->r_dupack < 0xff)) {
9434 		rsm->r_dupack++;
9435 		if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
9436 			struct timeval tv;
9437 			uint32_t cts;
9438 			/*
9439 			 * Here we see if we need to retransmit. For
9440 			 * a SACK type connection if enough time has passed
9441 			 * we will get a return of the rsm. For a non-sack
9442 			 * connection we will get the rsm returned if the
9443 			 * dupack value is 3 or more.
9444 			 */
9445 			cts = tcp_get_usecs(&tv);
9446 			rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
9447 			if (rack->r_ctl.rc_resend != NULL) {
9448 				if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
9449 					rack_cong_signal(rack->rc_tp, CC_NDUPACK,
9450 							 rack->rc_tp->snd_una);
9451 				}
9452 				rack->r_wanted_output = 1;
9453 				rack->r_timer_override = 1;
9454 				rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
9455 			}
9456 		} else {
9457 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
9458 		}
9459 	}
9460 }
9461 
9462 static void
9463 rack_check_bottom_drag(struct tcpcb *tp,
9464 		       struct tcp_rack *rack,
9465 		       struct socket *so, int32_t acked)
9466 {
9467 	uint32_t segsiz, minseg;
9468 
9469 	segsiz = ctf_fixed_maxseg(tp);
9470 	minseg = segsiz;
9471 
9472 	if (tp->snd_max == tp->snd_una) {
9473 		/*
9474 		 * We are doing dynamic pacing and we are way
9475 		 * under. Basically everything got acked while
9476 		 * we were still waiting on the pacer to expire.
9477 		 *
9478 		 * This means we need to boost the b/w in
9479 		 * addition to any earlier boosting of
9480 		 * the multipler.
9481 		 */
9482 		rack->rc_dragged_bottom = 1;
9483 		rack_validate_multipliers_at_or_above100(rack);
9484 		/*
9485 		 * Lets use the segment bytes acked plus
9486 		 * the lowest RTT seen as the basis to
9487 		 * form a b/w estimate. This will be off
9488 		 * due to the fact that the true estimate
9489 		 * should be around 1/2 the time of the RTT
9490 		 * but we can settle for that.
9491 		 */
9492 		if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
9493 		    acked) {
9494 			uint64_t bw, calc_bw, rtt;
9495 
9496 			rtt = rack->r_ctl.rack_rs.rs_us_rtt;
9497 			if (rtt == 0) {
9498 				/* no us sample is there a ms one? */
9499 				if (rack->r_ctl.rack_rs.rs_rtt_lowest) {
9500 					rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
9501 				} else {
9502 					goto no_measurement;
9503 				}
9504 			}
9505 			bw = acked;
9506 			calc_bw = bw * 1000000;
9507 			calc_bw /= rtt;
9508 			if (rack->r_ctl.last_max_bw &&
9509 			    (rack->r_ctl.last_max_bw < calc_bw)) {
9510 				/*
9511 				 * If we have a last calculated max bw
9512 				 * enforce it.
9513 				 */
9514 				calc_bw = rack->r_ctl.last_max_bw;
9515 			}
9516 			/* now plop it in */
9517 			if (rack->rc_gp_filled == 0) {
9518 				if (calc_bw > ONE_POINT_TWO_MEG) {
9519 					/*
9520 					 * If we have no measurement
9521 					 * don't let us set in more than
9522 					 * 1.2Mbps. If we are still too
9523 					 * low after pacing with this we
9524 					 * will hopefully have a max b/w
9525 					 * available to sanity check things.
9526 					 */
9527 					calc_bw = ONE_POINT_TWO_MEG;
9528 				}
9529 				rack->r_ctl.rc_rtt_diff = 0;
9530 				rack->r_ctl.gp_bw = calc_bw;
9531 				rack->rc_gp_filled = 1;
9532 				if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9533 					rack->r_ctl.num_measurements = RACK_REQ_AVG;
9534 				rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9535 			} else if (calc_bw > rack->r_ctl.gp_bw) {
9536 				rack->r_ctl.rc_rtt_diff = 0;
9537 				if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9538 					rack->r_ctl.num_measurements = RACK_REQ_AVG;
9539 				rack->r_ctl.gp_bw = calc_bw;
9540 				rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9541 			} else
9542 				rack_increase_bw_mul(rack, -1, 0, 0, 1);
9543 			if ((rack->gp_ready == 0) &&
9544 			    (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
9545 				/* We have enough measurements now */
9546 				rack->gp_ready = 1;
9547 				rack_set_cc_pacing(rack);
9548 				if (rack->defer_options)
9549 					rack_apply_deferred_options(rack);
9550 			}
9551 			/*
9552 			 * For acks over 1mss we do a extra boost to simulate
9553 			 * where we would get 2 acks (we want 110 for the mul).
9554 			 */
9555 			if (acked > segsiz)
9556 				rack_increase_bw_mul(rack, -1, 0, 0, 1);
9557 		} else {
9558 			/*
9559 			 * zero rtt possibly?, settle for just an old increase.
9560 			 */
9561 no_measurement:
9562 			rack_increase_bw_mul(rack, -1, 0, 0, 1);
9563 		}
9564 	} else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
9565 		   (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
9566 					       minseg)) &&
9567 		   (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9568 		   (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9569 		   (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
9570 		    (segsiz * rack_req_segs))) {
9571 		/*
9572 		 * We are doing dynamic GP pacing and
9573 		 * we have everything except 1MSS or less
9574 		 * bytes left out. We are still pacing away.
9575 		 * And there is data that could be sent, This
9576 		 * means we are inserting delayed ack time in
9577 		 * our measurements because we are pacing too slow.
9578 		 */
9579 		rack_validate_multipliers_at_or_above100(rack);
9580 		rack->rc_dragged_bottom = 1;
9581 		rack_increase_bw_mul(rack, -1, 0, 0, 1);
9582 	}
9583 }
9584 
9585 
9586 
9587 static void
9588 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
9589 {
9590 	/*
9591 	 * The fast output path is enabled and we
9592 	 * have moved the cumack forward. Lets see if
9593 	 * we can expand forward the fast path length by
9594 	 * that amount. What we would ideally like to
9595 	 * do is increase the number of bytes in the
9596 	 * fast path block (left_to_send) by the
9597 	 * acked amount. However we have to gate that
9598 	 * by two factors:
9599 	 * 1) The amount outstanding and the rwnd of the peer
9600 	 *    (i.e. we don't want to exceed the rwnd of the peer).
9601 	 *    <and>
9602 	 * 2) The amount of data left in the socket buffer (i.e.
9603 	 *    we can't send beyond what is in the buffer).
9604 	 *
9605 	 * Note that this does not take into account any increase
9606 	 * in the cwnd. We will only extend the fast path by
9607 	 * what was acked.
9608 	 */
9609 	uint32_t new_total, gating_val;
9610 
9611 	new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
9612 	gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
9613 			 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
9614 	if (new_total <= gating_val) {
9615 		/* We can increase left_to_send by the acked amount */
9616 		counter_u64_add(rack_extended_rfo, 1);
9617 		rack->r_ctl.fsb.left_to_send = new_total;
9618 		KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
9619 			("rack:%p left_to_send:%u sbavail:%u out:%u",
9620 			 rack, rack->r_ctl.fsb.left_to_send,
9621 			 sbavail(&rack->rc_inp->inp_socket->so_snd),
9622 			 (tp->snd_max - tp->snd_una)));
9623 
9624 	}
9625 }
9626 
9627 static void
9628 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una)
9629 {
9630 	/*
9631 	 * Here any sendmap entry that points to the
9632 	 * beginning mbuf must be adjusted to the correct
9633 	 * offset. This must be called with:
9634 	 * 1) The socket buffer locked
9635 	 * 2) snd_una adjusted to its new postion.
9636 	 *
9637 	 * Note that (2) implies rack_ack_received has also
9638 	 * been called.
9639 	 *
9640 	 * We grab the first mbuf in the socket buffer and
9641 	 * then go through the front of the sendmap, recalculating
9642 	 * the stored offset for any sendmap entry that has
9643 	 * that mbuf. We must use the sb functions to do this
9644 	 * since its possible an add was done has well as
9645 	 * the subtraction we may have just completed. This should
9646 	 * not be a penalty though, since we just referenced the sb
9647 	 * to go in and trim off the mbufs that we freed (of course
9648 	 * there will be a penalty for the sendmap references though).
9649 	 */
9650 	struct mbuf *m;
9651 	struct rack_sendmap *rsm;
9652 
9653 	SOCKBUF_LOCK_ASSERT(sb);
9654 	m = sb->sb_mb;
9655 	rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9656 	if ((rsm == NULL) || (m == NULL)) {
9657 		/* Nothing outstanding */
9658 		return;
9659 	}
9660 	while (rsm->m && (rsm->m == m)) {
9661 		/* one to adjust */
9662 #ifdef INVARIANTS
9663 		struct mbuf *tm;
9664 		uint32_t soff;
9665 
9666 		tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
9667 		if (rsm->orig_m_len != m->m_len) {
9668 			rack_adjust_orig_mlen(rsm);
9669 		}
9670 		if (rsm->soff != soff) {
9671 			/*
9672 			 * This is not a fatal error, we anticipate it
9673 			 * might happen (the else code), so we count it here
9674 			 * so that under invariant we can see that it really
9675 			 * does happen.
9676 			 */
9677 			counter_u64_add(rack_adjust_map_bw, 1);
9678 		}
9679 		rsm->m = tm;
9680 		rsm->soff = soff;
9681 		if (tm)
9682 			rsm->orig_m_len = rsm->m->m_len;
9683 		else
9684 			rsm->orig_m_len = 0;
9685 #else
9686 		rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
9687 		if (rsm->m)
9688 			rsm->orig_m_len = rsm->m->m_len;
9689 		else
9690 			rsm->orig_m_len = 0;
9691 #endif
9692 		rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
9693 			      rsm);
9694 		if (rsm == NULL)
9695 			break;
9696 	}
9697 }
9698 
9699 /*
9700  * Return value of 1, we do not need to call rack_process_data().
9701  * return value of 0, rack_process_data can be called.
9702  * For ret_val if its 0 the TCP is locked, if its non-zero
9703  * its unlocked and probably unsafe to touch the TCB.
9704  */
9705 static int
9706 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
9707     struct tcpcb *tp, struct tcpopt *to,
9708     uint32_t tiwin, int32_t tlen,
9709     int32_t * ofia, int32_t thflags, int32_t *ret_val)
9710 {
9711 	int32_t ourfinisacked = 0;
9712 	int32_t nsegs, acked_amount;
9713 	int32_t acked;
9714 	struct mbuf *mfree;
9715 	struct tcp_rack *rack;
9716 	int32_t under_pacing = 0;
9717 	int32_t recovery = 0;
9718 
9719 	rack = (struct tcp_rack *)tp->t_fb_ptr;
9720 	if (SEQ_GT(th->th_ack, tp->snd_max)) {
9721 		__ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
9722 				      &rack->r_ctl.challenge_ack_ts,
9723 				      &rack->r_ctl.challenge_ack_cnt);
9724 		rack->r_wanted_output = 1;
9725 		return (1);
9726 	}
9727 	if (rack->gp_ready &&
9728 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
9729 		under_pacing = 1;
9730 	}
9731 	if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
9732 		int in_rec, dup_ack_struck = 0;
9733 
9734 		in_rec = IN_FASTRECOVERY(tp->t_flags);
9735 		if (rack->rc_in_persist) {
9736 			tp->t_rxtshift = 0;
9737 			RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9738 				      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
9739 		}
9740 		if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) {
9741 			rack_strike_dupack(rack);
9742 			dup_ack_struck = 1;
9743 		}
9744 		rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck);
9745 	}
9746 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
9747 		/*
9748 		 * Old ack, behind (or duplicate to) the last one rcv'd
9749 		 * Note: We mark reordering is occuring if its
9750 		 * less than and we have not closed our window.
9751 		 */
9752 		if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
9753 			counter_u64_add(rack_reorder_seen, 1);
9754 			rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
9755 		}
9756 		return (0);
9757 	}
9758 	/*
9759 	 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
9760 	 * something we sent.
9761 	 */
9762 	if (tp->t_flags & TF_NEEDSYN) {
9763 		/*
9764 		 * T/TCP: Connection was half-synchronized, and our SYN has
9765 		 * been ACK'd (so connection is now fully synchronized).  Go
9766 		 * to non-starred state, increment snd_una for ACK of SYN,
9767 		 * and check if we can do window scaling.
9768 		 */
9769 		tp->t_flags &= ~TF_NEEDSYN;
9770 		tp->snd_una++;
9771 		/* Do window scaling? */
9772 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9773 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9774 			tp->rcv_scale = tp->request_r_scale;
9775 			/* Send window already scaled. */
9776 		}
9777 	}
9778 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
9779 	INP_WLOCK_ASSERT(tp->t_inpcb);
9780 
9781 	acked = BYTES_THIS_ACK(tp, th);
9782 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
9783 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
9784 	/*
9785 	 * If we just performed our first retransmit, and the ACK arrives
9786 	 * within our recovery window, then it was a mistake to do the
9787 	 * retransmit in the first place.  Recover our original cwnd and
9788 	 * ssthresh, and proceed to transmit where we left off.
9789 	 */
9790 	if ((tp->t_flags & TF_PREVVALID) &&
9791 	    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
9792 		tp->t_flags &= ~TF_PREVVALID;
9793 		if (tp->t_rxtshift == 1 &&
9794 		    (int)(ticks - tp->t_badrxtwin) < 0)
9795 			rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
9796 	}
9797 	if (acked) {
9798 		/* assure we are not backed off */
9799 		tp->t_rxtshift = 0;
9800 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9801 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
9802 		rack->rc_tlp_in_progress = 0;
9803 		rack->r_ctl.rc_tlp_cnt_out = 0;
9804 		/*
9805 		 * If it is the RXT timer we want to
9806 		 * stop it, so we can restart a TLP.
9807 		 */
9808 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
9809 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9810 #ifdef NETFLIX_HTTP_LOGGING
9811 		tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
9812 #endif
9813 	}
9814 	/*
9815 	 * If we have a timestamp reply, update smoothed round trip time. If
9816 	 * no timestamp is present but transmit timer is running and timed
9817 	 * sequence number was acked, update smoothed round trip time. Since
9818 	 * we now have an rtt measurement, cancel the timer backoff (cf.,
9819 	 * Phil Karn's retransmit alg.). Recompute the initial retransmit
9820 	 * timer.
9821 	 *
9822 	 * Some boxes send broken timestamp replies during the SYN+ACK
9823 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
9824 	 * and blow up the retransmit timer.
9825 	 */
9826 	/*
9827 	 * If all outstanding data is acked, stop retransmit timer and
9828 	 * remember to restart (more output or persist). If there is more
9829 	 * data to be acked, restart retransmit timer, using current
9830 	 * (possibly backed-off) value.
9831 	 */
9832 	if (acked == 0) {
9833 		if (ofia)
9834 			*ofia = ourfinisacked;
9835 		return (0);
9836 	}
9837 	if (IN_RECOVERY(tp->t_flags)) {
9838 		if (SEQ_LT(th->th_ack, tp->snd_recover) &&
9839 		    (SEQ_LT(th->th_ack, tp->snd_max))) {
9840 			tcp_rack_partialack(tp);
9841 		} else {
9842 			rack_post_recovery(tp, th->th_ack);
9843 			recovery = 1;
9844 		}
9845 	}
9846 	/*
9847 	 * Let the congestion control algorithm update congestion control
9848 	 * related information. This typically means increasing the
9849 	 * congestion window.
9850 	 */
9851 	rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
9852 	SOCKBUF_LOCK(&so->so_snd);
9853 	acked_amount = min(acked, (int)sbavail(&so->so_snd));
9854 	tp->snd_wnd -= acked_amount;
9855 	mfree = sbcut_locked(&so->so_snd, acked_amount);
9856 	if ((sbused(&so->so_snd) == 0) &&
9857 	    (acked > acked_amount) &&
9858 	    (tp->t_state >= TCPS_FIN_WAIT_1) &&
9859 	    (tp->t_flags & TF_SENTFIN)) {
9860 		/*
9861 		 * We must be sure our fin
9862 		 * was sent and acked (we can be
9863 		 * in FIN_WAIT_1 without having
9864 		 * sent the fin).
9865 		 */
9866 		ourfinisacked = 1;
9867 	}
9868 	tp->snd_una = th->th_ack;
9869 	if (acked_amount && sbavail(&so->so_snd))
9870 		rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
9871 	rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
9872 	/* NB: sowwakeup_locked() does an implicit unlock. */
9873 	sowwakeup_locked(so);
9874 	m_freem(mfree);
9875 	if (SEQ_GT(tp->snd_una, tp->snd_recover))
9876 		tp->snd_recover = tp->snd_una;
9877 
9878 	if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
9879 		tp->snd_nxt = tp->snd_una;
9880 	}
9881 	if (under_pacing &&
9882 	    (rack->use_fixed_rate == 0) &&
9883 	    (rack->in_probe_rtt == 0) &&
9884 	    rack->rc_gp_dyn_mul &&
9885 	    rack->rc_always_pace) {
9886 		/* Check if we are dragging bottom */
9887 		rack_check_bottom_drag(tp, rack, so, acked);
9888 	}
9889 	if (tp->snd_una == tp->snd_max) {
9890 		/* Nothing left outstanding */
9891 		tp->t_flags &= ~TF_PREVVALID;
9892 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
9893 		rack->r_ctl.retran_during_recovery = 0;
9894 		rack->r_ctl.dsack_byte_cnt = 0;
9895 		if (rack->r_ctl.rc_went_idle_time == 0)
9896 			rack->r_ctl.rc_went_idle_time = 1;
9897 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
9898 		if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
9899 			tp->t_acktime = 0;
9900 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9901 		/* Set need output so persist might get set */
9902 		rack->r_wanted_output = 1;
9903 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
9904 		if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
9905 		    (sbavail(&so->so_snd) == 0) &&
9906 		    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
9907 			/*
9908 			 * The socket was gone and the
9909 			 * peer sent data (now or in the past), time to
9910 			 * reset him.
9911 			 */
9912 			*ret_val = 1;
9913 			/* tcp_close will kill the inp pre-log the Reset */
9914 			tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
9915 			tp = tcp_close(tp);
9916 			ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
9917 			return (1);
9918 		}
9919 	}
9920 	if (ofia)
9921 		*ofia = ourfinisacked;
9922 	return (0);
9923 }
9924 
9925 static void
9926 rack_collapsed_window(struct tcp_rack *rack)
9927 {
9928 	/*
9929 	 * Now we must walk the
9930 	 * send map and divide the
9931 	 * ones left stranded. These
9932 	 * guys can't cause us to abort
9933 	 * the connection and are really
9934 	 * "unsent". However if a buggy
9935 	 * client actually did keep some
9936 	 * of the data i.e. collapsed the win
9937 	 * and refused to ack and then opened
9938 	 * the win and acked that data. We would
9939 	 * get into an ack war, the simplier
9940 	 * method then of just pretending we
9941 	 * did not send those segments something
9942 	 * won't work.
9943 	 */
9944 	struct rack_sendmap *rsm, *nrsm, fe, *insret;
9945 	tcp_seq max_seq;
9946 
9947 	max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
9948 	memset(&fe, 0, sizeof(fe));
9949 	fe.r_start = max_seq;
9950 	/* Find the first seq past or at maxseq */
9951 	rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
9952 	if (rsm == NULL) {
9953 		/* Nothing to do strange */
9954 		rack->rc_has_collapsed = 0;
9955 		return;
9956 	}
9957 	/*
9958 	 * Now do we need to split at
9959 	 * the collapse point?
9960 	 */
9961 	if (SEQ_GT(max_seq, rsm->r_start)) {
9962 		nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9963 		if (nrsm == NULL) {
9964 			/* We can't get a rsm, mark all? */
9965 			nrsm = rsm;
9966 			goto no_split;
9967 		}
9968 		/* Clone it */
9969 		rack_clone_rsm(rack, nrsm, rsm, max_seq);
9970 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
9971 #ifdef INVARIANTS
9972 		if (insret != NULL) {
9973 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
9974 			      nrsm, insret, rack, rsm);
9975 		}
9976 #endif
9977 		rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__);
9978 		if (rsm->r_in_tmap) {
9979 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9980 			nrsm->r_in_tmap = 1;
9981 		}
9982 		/*
9983 		 * Set in the new RSM as the
9984 		 * collapsed starting point
9985 		 */
9986 		rsm = nrsm;
9987 	}
9988 no_split:
9989 	counter_u64_add(rack_collapsed_win, 1);
9990 	RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
9991 		nrsm->r_flags |= RACK_RWND_COLLAPSED;
9992 	}
9993 	rack->rc_has_collapsed = 1;
9994 }
9995 
9996 static void
9997 rack_un_collapse_window(struct tcp_rack *rack)
9998 {
9999 	struct rack_sendmap *rsm;
10000 
10001 	RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
10002 		if (rsm->r_flags & RACK_RWND_COLLAPSED)
10003 			rsm->r_flags &= ~RACK_RWND_COLLAPSED;
10004 		else
10005 			break;
10006 	}
10007 	rack->rc_has_collapsed = 0;
10008 }
10009 
10010 static void
10011 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
10012 			int32_t tlen, int32_t tfo_syn)
10013 {
10014 	if (DELAY_ACK(tp, tlen) || tfo_syn) {
10015 		if (rack->rc_dack_mode &&
10016 		    (tlen > 500) &&
10017 		    (rack->rc_dack_toggle == 1)) {
10018 			goto no_delayed_ack;
10019 		}
10020 		rack_timer_cancel(tp, rack,
10021 				  rack->r_ctl.rc_rcvtime, __LINE__);
10022 		tp->t_flags |= TF_DELACK;
10023 	} else {
10024 no_delayed_ack:
10025 		rack->r_wanted_output = 1;
10026 		tp->t_flags |= TF_ACKNOW;
10027 		if (rack->rc_dack_mode) {
10028 			if (tp->t_flags & TF_DELACK)
10029 				rack->rc_dack_toggle = 1;
10030 			else
10031 				rack->rc_dack_toggle = 0;
10032 		}
10033 	}
10034 }
10035 
10036 static void
10037 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
10038 {
10039 	/*
10040 	 * If fast output is in progress, lets validate that
10041 	 * the new window did not shrink on us and make it
10042 	 * so fast output should end.
10043 	 */
10044 	if (rack->r_fast_output) {
10045 		uint32_t out;
10046 
10047 		/*
10048 		 * Calculate what we will send if left as is
10049 		 * and compare that to our send window.
10050 		 */
10051 		out = ctf_outstanding(tp);
10052 		if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
10053 			/* ok we have an issue */
10054 			if (out >= tp->snd_wnd) {
10055 				/* Turn off fast output the window is met or collapsed */
10056 				rack->r_fast_output = 0;
10057 			} else {
10058 				/* we have some room left */
10059 				rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
10060 				if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
10061 					/* If not at least 1 full segment never mind */
10062 					rack->r_fast_output = 0;
10063 				}
10064 			}
10065 		}
10066 	}
10067 }
10068 
10069 
10070 /*
10071  * Return value of 1, the TCB is unlocked and most
10072  * likely gone, return value of 0, the TCP is still
10073  * locked.
10074  */
10075 static int
10076 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
10077     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
10078     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
10079 {
10080 	/*
10081 	 * Update window information. Don't look at window if no ACK: TAC's
10082 	 * send garbage on first SYN.
10083 	 */
10084 	int32_t nsegs;
10085 	int32_t tfo_syn;
10086 	struct tcp_rack *rack;
10087 
10088 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10089 	INP_WLOCK_ASSERT(tp->t_inpcb);
10090 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
10091 	if ((thflags & TH_ACK) &&
10092 	    (SEQ_LT(tp->snd_wl1, th->th_seq) ||
10093 	    (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
10094 	    (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
10095 		/* keep track of pure window updates */
10096 		if (tlen == 0 &&
10097 		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
10098 			KMOD_TCPSTAT_INC(tcps_rcvwinupd);
10099 		tp->snd_wnd = tiwin;
10100 		rack_validate_fo_sendwin_up(tp, rack);
10101 		tp->snd_wl1 = th->th_seq;
10102 		tp->snd_wl2 = th->th_ack;
10103 		if (tp->snd_wnd > tp->max_sndwnd)
10104 			tp->max_sndwnd = tp->snd_wnd;
10105 		rack->r_wanted_output = 1;
10106 	} else if (thflags & TH_ACK) {
10107 		if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
10108 			tp->snd_wnd = tiwin;
10109 			rack_validate_fo_sendwin_up(tp, rack);
10110 			tp->snd_wl1 = th->th_seq;
10111 			tp->snd_wl2 = th->th_ack;
10112 		}
10113 	}
10114 	if (tp->snd_wnd < ctf_outstanding(tp))
10115 		/* The peer collapsed the window */
10116 		rack_collapsed_window(rack);
10117 	else if (rack->rc_has_collapsed)
10118 		rack_un_collapse_window(rack);
10119 	/* Was persist timer active and now we have window space? */
10120 	if ((rack->rc_in_persist != 0) &&
10121 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10122 				rack->r_ctl.rc_pace_min_segs))) {
10123 		rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10124 		tp->snd_nxt = tp->snd_max;
10125 		/* Make sure we output to start the timer */
10126 		rack->r_wanted_output = 1;
10127 	}
10128 	/* Do we enter persists? */
10129 	if ((rack->rc_in_persist == 0) &&
10130 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10131 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
10132 	    (tp->snd_max == tp->snd_una) &&
10133 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10134 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10135 		/*
10136 		 * Here the rwnd is less than
10137 		 * the pacing size, we are established,
10138 		 * nothing is outstanding, and there is
10139 		 * data to send. Enter persists.
10140 		 */
10141 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10142 	}
10143 	if (tp->t_flags2 & TF2_DROP_AF_DATA) {
10144 		m_freem(m);
10145 		return (0);
10146 	}
10147 	/*
10148 	 * don't process the URG bit, ignore them drag
10149 	 * along the up.
10150 	 */
10151 	tp->rcv_up = tp->rcv_nxt;
10152 	INP_WLOCK_ASSERT(tp->t_inpcb);
10153 
10154 	/*
10155 	 * Process the segment text, merging it into the TCP sequencing
10156 	 * queue, and arranging for acknowledgment of receipt if necessary.
10157 	 * This process logically involves adjusting tp->rcv_wnd as data is
10158 	 * presented to the user (this happens in tcp_usrreq.c, case
10159 	 * PRU_RCVD).  If a FIN has already been received on this connection
10160 	 * then we just ignore the text.
10161 	 */
10162 	tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
10163 		   IS_FASTOPEN(tp->t_flags));
10164 	if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
10165 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10166 		tcp_seq save_start = th->th_seq;
10167 		tcp_seq save_rnxt  = tp->rcv_nxt;
10168 		int     save_tlen  = tlen;
10169 
10170 		m_adj(m, drop_hdrlen);	/* delayed header drop */
10171 		/*
10172 		 * Insert segment which includes th into TCP reassembly
10173 		 * queue with control block tp.  Set thflags to whether
10174 		 * reassembly now includes a segment with FIN.  This handles
10175 		 * the common case inline (segment is the next to be
10176 		 * received on an established connection, and the queue is
10177 		 * empty), avoiding linkage into and removal from the queue
10178 		 * and repetition of various conversions. Set DELACK for
10179 		 * segments received in order, but ack immediately when
10180 		 * segments are out of order (so fast retransmit can work).
10181 		 */
10182 		if (th->th_seq == tp->rcv_nxt &&
10183 		    SEGQ_EMPTY(tp) &&
10184 		    (TCPS_HAVEESTABLISHED(tp->t_state) ||
10185 		    tfo_syn)) {
10186 #ifdef NETFLIX_SB_LIMITS
10187 			u_int mcnt, appended;
10188 
10189 			if (so->so_rcv.sb_shlim) {
10190 				mcnt = m_memcnt(m);
10191 				appended = 0;
10192 				if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10193 				    CFO_NOSLEEP, NULL) == false) {
10194 					counter_u64_add(tcp_sb_shlim_fails, 1);
10195 					m_freem(m);
10196 					return (0);
10197 				}
10198 			}
10199 #endif
10200 			rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
10201 			tp->rcv_nxt += tlen;
10202 			if (tlen &&
10203 			    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10204 			    (tp->t_fbyte_in == 0)) {
10205 				tp->t_fbyte_in = ticks;
10206 				if (tp->t_fbyte_in == 0)
10207 					tp->t_fbyte_in = 1;
10208 				if (tp->t_fbyte_out && tp->t_fbyte_in)
10209 					tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10210 			}
10211 			thflags = th->th_flags & TH_FIN;
10212 			KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10213 			KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10214 			SOCKBUF_LOCK(&so->so_rcv);
10215 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10216 				m_freem(m);
10217 			} else
10218 #ifdef NETFLIX_SB_LIMITS
10219 				appended =
10220 #endif
10221 					sbappendstream_locked(&so->so_rcv, m, 0);
10222 
10223 			rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10224 			tp->t_flags |= TF_WAKESOR;
10225 #ifdef NETFLIX_SB_LIMITS
10226 			if (so->so_rcv.sb_shlim && appended != mcnt)
10227 				counter_fo_release(so->so_rcv.sb_shlim,
10228 				    mcnt - appended);
10229 #endif
10230 		} else {
10231 			/*
10232 			 * XXX: Due to the header drop above "th" is
10233 			 * theoretically invalid by now.  Fortunately
10234 			 * m_adj() doesn't actually frees any mbufs when
10235 			 * trimming from the head.
10236 			 */
10237 			tcp_seq temp = save_start;
10238 			if (tlen || (th->th_seq != tp->rcv_nxt)) {
10239 				/*
10240 				 * We add the th_seq != rcv_nxt to
10241 				 * catch the case of a stand alone out
10242 				 * of order FIN.
10243 				 */
10244 				thflags = tcp_reass(tp, th, &temp, &tlen, m);
10245 				tp->t_flags |= TF_ACKNOW;
10246 			}
10247 		}
10248 		if ((tp->t_flags & TF_SACK_PERMIT) &&
10249 		    (save_tlen > 0) &&
10250 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
10251 			if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
10252 				/*
10253 				 * DSACK actually handled in the fastpath
10254 				 * above.
10255 				 */
10256 				RACK_OPTS_INC(tcp_sack_path_1);
10257 				tcp_update_sack_list(tp, save_start,
10258 				    save_start + save_tlen);
10259 			} else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
10260 				if ((tp->rcv_numsacks >= 1) &&
10261 				    (tp->sackblks[0].end == save_start)) {
10262 					/*
10263 					 * Partial overlap, recorded at todrop
10264 					 * above.
10265 					 */
10266 					RACK_OPTS_INC(tcp_sack_path_2a);
10267 					tcp_update_sack_list(tp,
10268 					    tp->sackblks[0].start,
10269 					    tp->sackblks[0].end);
10270 				} else {
10271 					RACK_OPTS_INC(tcp_sack_path_2b);
10272 					tcp_update_dsack_list(tp, save_start,
10273 					    save_start + save_tlen);
10274 				}
10275 			} else if (tlen >= save_tlen) {
10276 				/* Update of sackblks. */
10277 				RACK_OPTS_INC(tcp_sack_path_3);
10278 				tcp_update_dsack_list(tp, save_start,
10279 				    save_start + save_tlen);
10280 			} else if (tlen > 0) {
10281 				RACK_OPTS_INC(tcp_sack_path_4);
10282 				tcp_update_dsack_list(tp, save_start,
10283 				    save_start + tlen);
10284 			}
10285 		}
10286 		tcp_handle_wakeup(tp, so);
10287 	} else {
10288 		m_freem(m);
10289 		thflags &= ~TH_FIN;
10290 	}
10291 
10292 	/*
10293 	 * If FIN is received ACK the FIN and let the user know that the
10294 	 * connection is closing.
10295 	 */
10296 	if (thflags & TH_FIN) {
10297 		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10298 			/* The socket upcall is handled by socantrcvmore. */
10299 			socantrcvmore(so);
10300 			/*
10301 			 * If connection is half-synchronized (ie NEEDSYN
10302 			 * flag on) then delay ACK, so it may be piggybacked
10303 			 * when SYN is sent. Otherwise, since we received a
10304 			 * FIN then no more input can be expected, send ACK
10305 			 * now.
10306 			 */
10307 			if (tp->t_flags & TF_NEEDSYN) {
10308 				rack_timer_cancel(tp, rack,
10309 				    rack->r_ctl.rc_rcvtime, __LINE__);
10310 				tp->t_flags |= TF_DELACK;
10311 			} else {
10312 				tp->t_flags |= TF_ACKNOW;
10313 			}
10314 			tp->rcv_nxt++;
10315 		}
10316 		switch (tp->t_state) {
10317 			/*
10318 			 * In SYN_RECEIVED and ESTABLISHED STATES enter the
10319 			 * CLOSE_WAIT state.
10320 			 */
10321 		case TCPS_SYN_RECEIVED:
10322 			tp->t_starttime = ticks;
10323 			/* FALLTHROUGH */
10324 		case TCPS_ESTABLISHED:
10325 			rack_timer_cancel(tp, rack,
10326 			    rack->r_ctl.rc_rcvtime, __LINE__);
10327 			tcp_state_change(tp, TCPS_CLOSE_WAIT);
10328 			break;
10329 
10330 			/*
10331 			 * If still in FIN_WAIT_1 STATE FIN has not been
10332 			 * acked so enter the CLOSING state.
10333 			 */
10334 		case TCPS_FIN_WAIT_1:
10335 			rack_timer_cancel(tp, rack,
10336 			    rack->r_ctl.rc_rcvtime, __LINE__);
10337 			tcp_state_change(tp, TCPS_CLOSING);
10338 			break;
10339 
10340 			/*
10341 			 * In FIN_WAIT_2 state enter the TIME_WAIT state,
10342 			 * starting the time-wait timer, turning off the
10343 			 * other standard timers.
10344 			 */
10345 		case TCPS_FIN_WAIT_2:
10346 			rack_timer_cancel(tp, rack,
10347 			    rack->r_ctl.rc_rcvtime, __LINE__);
10348 			tcp_twstart(tp);
10349 			return (1);
10350 		}
10351 	}
10352 	/*
10353 	 * Return any desired output.
10354 	 */
10355 	if ((tp->t_flags & TF_ACKNOW) ||
10356 	    (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
10357 		rack->r_wanted_output = 1;
10358 	}
10359 	INP_WLOCK_ASSERT(tp->t_inpcb);
10360 	return (0);
10361 }
10362 
10363 /*
10364  * Here nothing is really faster, its just that we
10365  * have broken out the fast-data path also just like
10366  * the fast-ack.
10367  */
10368 static int
10369 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
10370     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10371     uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
10372 {
10373 	int32_t nsegs;
10374 	int32_t newsize = 0;	/* automatic sockbuf scaling */
10375 	struct tcp_rack *rack;
10376 #ifdef NETFLIX_SB_LIMITS
10377 	u_int mcnt, appended;
10378 #endif
10379 #ifdef TCPDEBUG
10380 	/*
10381 	 * The size of tcp_saveipgen must be the size of the max ip header,
10382 	 * now IPv6.
10383 	 */
10384 	u_char tcp_saveipgen[IP6_HDR_LEN];
10385 	struct tcphdr tcp_savetcp;
10386 	short ostate = 0;
10387 
10388 #endif
10389 	/*
10390 	 * If last ACK falls within this segment's sequence numbers, record
10391 	 * the timestamp. NOTE that the test is modified according to the
10392 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10393 	 */
10394 	if (__predict_false(th->th_seq != tp->rcv_nxt)) {
10395 		return (0);
10396 	}
10397 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10398 		return (0);
10399 	}
10400 	if (tiwin && tiwin != tp->snd_wnd) {
10401 		return (0);
10402 	}
10403 	if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
10404 		return (0);
10405 	}
10406 	if (__predict_false((to->to_flags & TOF_TS) &&
10407 	    (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
10408 		return (0);
10409 	}
10410 	if (__predict_false((th->th_ack != tp->snd_una))) {
10411 		return (0);
10412 	}
10413 	if (__predict_false(tlen > sbspace(&so->so_rcv))) {
10414 		return (0);
10415 	}
10416 	if ((to->to_flags & TOF_TS) != 0 &&
10417 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10418 		tp->ts_recent_age = tcp_ts_getticks();
10419 		tp->ts_recent = to->to_tsval;
10420 	}
10421 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10422 	/*
10423 	 * This is a pure, in-sequence data packet with nothing on the
10424 	 * reassembly queue and we have enough buffer space to take it.
10425 	 */
10426 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
10427 
10428 #ifdef NETFLIX_SB_LIMITS
10429 	if (so->so_rcv.sb_shlim) {
10430 		mcnt = m_memcnt(m);
10431 		appended = 0;
10432 		if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10433 		    CFO_NOSLEEP, NULL) == false) {
10434 			counter_u64_add(tcp_sb_shlim_fails, 1);
10435 			m_freem(m);
10436 			return (1);
10437 		}
10438 	}
10439 #endif
10440 	/* Clean receiver SACK report if present */
10441 	if (tp->rcv_numsacks)
10442 		tcp_clean_sackreport(tp);
10443 	KMOD_TCPSTAT_INC(tcps_preddat);
10444 	tp->rcv_nxt += tlen;
10445 	if (tlen &&
10446 	    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10447 	    (tp->t_fbyte_in == 0)) {
10448 		tp->t_fbyte_in = ticks;
10449 		if (tp->t_fbyte_in == 0)
10450 			tp->t_fbyte_in = 1;
10451 		if (tp->t_fbyte_out && tp->t_fbyte_in)
10452 			tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10453 	}
10454 	/*
10455 	 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
10456 	 */
10457 	tp->snd_wl1 = th->th_seq;
10458 	/*
10459 	 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
10460 	 */
10461 	tp->rcv_up = tp->rcv_nxt;
10462 	KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10463 	KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10464 #ifdef TCPDEBUG
10465 	if (so->so_options & SO_DEBUG)
10466 		tcp_trace(TA_INPUT, ostate, tp,
10467 		    (void *)tcp_saveipgen, &tcp_savetcp, 0);
10468 #endif
10469 	newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
10470 
10471 	/* Add data to socket buffer. */
10472 	SOCKBUF_LOCK(&so->so_rcv);
10473 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10474 		m_freem(m);
10475 	} else {
10476 		/*
10477 		 * Set new socket buffer size. Give up when limit is
10478 		 * reached.
10479 		 */
10480 		if (newsize)
10481 			if (!sbreserve_locked(&so->so_rcv,
10482 			    newsize, so, NULL))
10483 				so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
10484 		m_adj(m, drop_hdrlen);	/* delayed header drop */
10485 #ifdef NETFLIX_SB_LIMITS
10486 		appended =
10487 #endif
10488 			sbappendstream_locked(&so->so_rcv, m, 0);
10489 		ctf_calc_rwin(so, tp);
10490 	}
10491 	rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10492 	tp->t_flags |= TF_WAKESOR;
10493 #ifdef NETFLIX_SB_LIMITS
10494 	if (so->so_rcv.sb_shlim && mcnt != appended)
10495 		counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
10496 #endif
10497 	rack_handle_delayed_ack(tp, rack, tlen, 0);
10498 	if (tp->snd_una == tp->snd_max)
10499 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10500 	tcp_handle_wakeup(tp, so);
10501 	return (1);
10502 }
10503 
10504 /*
10505  * This subfunction is used to try to highly optimize the
10506  * fast path. We again allow window updates that are
10507  * in sequence to remain in the fast-path. We also add
10508  * in the __predict's to attempt to help the compiler.
10509  * Note that if we return a 0, then we can *not* process
10510  * it and the caller should push the packet into the
10511  * slow-path.
10512  */
10513 static int
10514 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10515     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10516     uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
10517 {
10518 	int32_t acked;
10519 	int32_t nsegs;
10520 #ifdef TCPDEBUG
10521 	/*
10522 	 * The size of tcp_saveipgen must be the size of the max ip header,
10523 	 * now IPv6.
10524 	 */
10525 	u_char tcp_saveipgen[IP6_HDR_LEN];
10526 	struct tcphdr tcp_savetcp;
10527 	short ostate = 0;
10528 #endif
10529 	int32_t under_pacing = 0;
10530 	struct tcp_rack *rack;
10531 
10532 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10533 		/* Old ack, behind (or duplicate to) the last one rcv'd */
10534 		return (0);
10535 	}
10536 	if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
10537 		/* Above what we have sent? */
10538 		return (0);
10539 	}
10540 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10541 		/* We are retransmitting */
10542 		return (0);
10543 	}
10544 	if (__predict_false(tiwin == 0)) {
10545 		/* zero window */
10546 		return (0);
10547 	}
10548 	if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
10549 		/* We need a SYN or a FIN, unlikely.. */
10550 		return (0);
10551 	}
10552 	if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
10553 		/* Timestamp is behind .. old ack with seq wrap? */
10554 		return (0);
10555 	}
10556 	if (__predict_false(IN_RECOVERY(tp->t_flags))) {
10557 		/* Still recovering */
10558 		return (0);
10559 	}
10560 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10561 	if (rack->r_ctl.rc_sacked) {
10562 		/* We have sack holes on our scoreboard */
10563 		return (0);
10564 	}
10565 	/* Ok if we reach here, we can process a fast-ack */
10566 	if (rack->gp_ready &&
10567 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10568 		under_pacing = 1;
10569 	}
10570 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
10571 	rack_log_ack(tp, to, th, 0, 0);
10572 	/* Did the window get updated? */
10573 	if (tiwin != tp->snd_wnd) {
10574 		tp->snd_wnd = tiwin;
10575 		rack_validate_fo_sendwin_up(tp, rack);
10576 		tp->snd_wl1 = th->th_seq;
10577 		if (tp->snd_wnd > tp->max_sndwnd)
10578 			tp->max_sndwnd = tp->snd_wnd;
10579 	}
10580 	/* Do we exit persists? */
10581 	if ((rack->rc_in_persist != 0) &&
10582 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10583 			       rack->r_ctl.rc_pace_min_segs))) {
10584 		rack_exit_persist(tp, rack, cts);
10585 	}
10586 	/* Do we enter persists? */
10587 	if ((rack->rc_in_persist == 0) &&
10588 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10589 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
10590 	    (tp->snd_max == tp->snd_una) &&
10591 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10592 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10593 		/*
10594 		 * Here the rwnd is less than
10595 		 * the pacing size, we are established,
10596 		 * nothing is outstanding, and there is
10597 		 * data to send. Enter persists.
10598 		 */
10599 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10600 	}
10601 	/*
10602 	 * If last ACK falls within this segment's sequence numbers, record
10603 	 * the timestamp. NOTE that the test is modified according to the
10604 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10605 	 */
10606 	if ((to->to_flags & TOF_TS) != 0 &&
10607 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10608 		tp->ts_recent_age = tcp_ts_getticks();
10609 		tp->ts_recent = to->to_tsval;
10610 	}
10611 	/*
10612 	 * This is a pure ack for outstanding data.
10613 	 */
10614 	KMOD_TCPSTAT_INC(tcps_predack);
10615 
10616 	/*
10617 	 * "bad retransmit" recovery.
10618 	 */
10619 	if ((tp->t_flags & TF_PREVVALID) &&
10620 	    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10621 		tp->t_flags &= ~TF_PREVVALID;
10622 		if (tp->t_rxtshift == 1 &&
10623 		    (int)(ticks - tp->t_badrxtwin) < 0)
10624 			rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
10625 	}
10626 	/*
10627 	 * Recalculate the transmit timer / rtt.
10628 	 *
10629 	 * Some boxes send broken timestamp replies during the SYN+ACK
10630 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10631 	 * and blow up the retransmit timer.
10632 	 */
10633 	acked = BYTES_THIS_ACK(tp, th);
10634 
10635 #ifdef TCP_HHOOK
10636 	/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
10637 	hhook_run_tcp_est_in(tp, th, to);
10638 #endif
10639 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10640 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10641 	if (acked) {
10642 		struct mbuf *mfree;
10643 
10644 		rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
10645 		SOCKBUF_LOCK(&so->so_snd);
10646 		mfree = sbcut_locked(&so->so_snd, acked);
10647 		tp->snd_una = th->th_ack;
10648 		/* Note we want to hold the sb lock through the sendmap adjust */
10649 		rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10650 		/* Wake up the socket if we have room to write more */
10651 		rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10652 		sowwakeup_locked(so);
10653 		m_freem(mfree);
10654 		tp->t_rxtshift = 0;
10655 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10656 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
10657 		rack->rc_tlp_in_progress = 0;
10658 		rack->r_ctl.rc_tlp_cnt_out = 0;
10659 		/*
10660 		 * If it is the RXT timer we want to
10661 		 * stop it, so we can restart a TLP.
10662 		 */
10663 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10664 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10665 #ifdef NETFLIX_HTTP_LOGGING
10666 		tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10667 #endif
10668 	}
10669 	/*
10670 	 * Let the congestion control algorithm update congestion control
10671 	 * related information. This typically means increasing the
10672 	 * congestion window.
10673 	 */
10674 	if (tp->snd_wnd < ctf_outstanding(tp)) {
10675 		/* The peer collapsed the window */
10676 		rack_collapsed_window(rack);
10677 	} else if (rack->rc_has_collapsed)
10678 		rack_un_collapse_window(rack);
10679 
10680 	/*
10681 	 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
10682 	 */
10683 	tp->snd_wl2 = th->th_ack;
10684 	tp->t_dupacks = 0;
10685 	m_freem(m);
10686 	/* ND6_HINT(tp);	 *//* Some progress has been made. */
10687 
10688 	/*
10689 	 * If all outstanding data are acked, stop retransmit timer,
10690 	 * otherwise restart timer using current (possibly backed-off)
10691 	 * value. If process is waiting for space, wakeup/selwakeup/signal.
10692 	 * If data are ready to send, let tcp_output decide between more
10693 	 * output or persist.
10694 	 */
10695 #ifdef TCPDEBUG
10696 	if (so->so_options & SO_DEBUG)
10697 		tcp_trace(TA_INPUT, ostate, tp,
10698 		    (void *)tcp_saveipgen,
10699 		    &tcp_savetcp, 0);
10700 #endif
10701 	if (under_pacing &&
10702 	    (rack->use_fixed_rate == 0) &&
10703 	    (rack->in_probe_rtt == 0) &&
10704 	    rack->rc_gp_dyn_mul &&
10705 	    rack->rc_always_pace) {
10706 		/* Check if we are dragging bottom */
10707 		rack_check_bottom_drag(tp, rack, so, acked);
10708 	}
10709 	if (tp->snd_una == tp->snd_max) {
10710 		tp->t_flags &= ~TF_PREVVALID;
10711 		rack->r_ctl.retran_during_recovery = 0;
10712 		rack->r_ctl.dsack_byte_cnt = 0;
10713 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
10714 		if (rack->r_ctl.rc_went_idle_time == 0)
10715 			rack->r_ctl.rc_went_idle_time = 1;
10716 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
10717 		if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
10718 			tp->t_acktime = 0;
10719 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10720 	}
10721 	if (acked && rack->r_fast_output)
10722 		rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
10723 	if (sbavail(&so->so_snd)) {
10724 		rack->r_wanted_output = 1;
10725 	}
10726 	return (1);
10727 }
10728 
10729 /*
10730  * Return value of 1, the TCB is unlocked and most
10731  * likely gone, return value of 0, the TCP is still
10732  * locked.
10733  */
10734 static int
10735 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
10736     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10737     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10738 {
10739 	int32_t ret_val = 0;
10740 	int32_t todrop;
10741 	int32_t ourfinisacked = 0;
10742 	struct tcp_rack *rack;
10743 
10744 	ctf_calc_rwin(so, tp);
10745 	/*
10746 	 * If the state is SYN_SENT: if seg contains an ACK, but not for our
10747 	 * SYN, drop the input. if seg contains a RST, then drop the
10748 	 * connection. if seg does not contain SYN, then drop it. Otherwise
10749 	 * this is an acceptable SYN segment initialize tp->rcv_nxt and
10750 	 * tp->irs if seg contains ack then advance tp->snd_una if seg
10751 	 * contains an ECE and ECN support is enabled, the stream is ECN
10752 	 * capable. if SYN has been acked change to ESTABLISHED else
10753 	 * SYN_RCVD state arrange for segment to be acked (eventually)
10754 	 * continue processing rest of data/controls.
10755 	 */
10756 	if ((thflags & TH_ACK) &&
10757 	    (SEQ_LEQ(th->th_ack, tp->iss) ||
10758 	    SEQ_GT(th->th_ack, tp->snd_max))) {
10759 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10760 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10761 		return (1);
10762 	}
10763 	if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
10764 		TCP_PROBE5(connect__refused, NULL, tp,
10765 		    mtod(m, const char *), tp, th);
10766 		tp = tcp_drop(tp, ECONNREFUSED);
10767 		ctf_do_drop(m, tp);
10768 		return (1);
10769 	}
10770 	if (thflags & TH_RST) {
10771 		ctf_do_drop(m, tp);
10772 		return (1);
10773 	}
10774 	if (!(thflags & TH_SYN)) {
10775 		ctf_do_drop(m, tp);
10776 		return (1);
10777 	}
10778 	tp->irs = th->th_seq;
10779 	tcp_rcvseqinit(tp);
10780 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10781 	if (thflags & TH_ACK) {
10782 		int tfo_partial = 0;
10783 
10784 		KMOD_TCPSTAT_INC(tcps_connects);
10785 		soisconnected(so);
10786 #ifdef MAC
10787 		mac_socketpeer_set_from_mbuf(m, so);
10788 #endif
10789 		/* Do window scaling on this connection? */
10790 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
10791 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
10792 			tp->rcv_scale = tp->request_r_scale;
10793 		}
10794 		tp->rcv_adv += min(tp->rcv_wnd,
10795 		    TCP_MAXWIN << tp->rcv_scale);
10796 		/*
10797 		 * If not all the data that was sent in the TFO SYN
10798 		 * has been acked, resend the remainder right away.
10799 		 */
10800 		if (IS_FASTOPEN(tp->t_flags) &&
10801 		    (tp->snd_una != tp->snd_max)) {
10802 			tp->snd_nxt = th->th_ack;
10803 			tfo_partial = 1;
10804 		}
10805 		/*
10806 		 * If there's data, delay ACK; if there's also a FIN ACKNOW
10807 		 * will be turned on later.
10808 		 */
10809 		if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
10810 			rack_timer_cancel(tp, rack,
10811 					  rack->r_ctl.rc_rcvtime, __LINE__);
10812 			tp->t_flags |= TF_DELACK;
10813 		} else {
10814 			rack->r_wanted_output = 1;
10815 			tp->t_flags |= TF_ACKNOW;
10816 			rack->rc_dack_toggle = 0;
10817 		}
10818 		if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
10819 		    (V_tcp_do_ecn == 1)) {
10820 			tp->t_flags2 |= TF2_ECN_PERMIT;
10821 			KMOD_TCPSTAT_INC(tcps_ecn_shs);
10822 		}
10823 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
10824 			/*
10825 			 * We advance snd_una for the
10826 			 * fast open case. If th_ack is
10827 			 * acknowledging data beyond
10828 			 * snd_una we can't just call
10829 			 * ack-processing since the
10830 			 * data stream in our send-map
10831 			 * will start at snd_una + 1 (one
10832 			 * beyond the SYN). If its just
10833 			 * equal we don't need to do that
10834 			 * and there is no send_map.
10835 			 */
10836 			tp->snd_una++;
10837 		}
10838 		/*
10839 		 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
10840 		 * SYN_SENT  --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
10841 		 */
10842 		tp->t_starttime = ticks;
10843 		if (tp->t_flags & TF_NEEDFIN) {
10844 			tcp_state_change(tp, TCPS_FIN_WAIT_1);
10845 			tp->t_flags &= ~TF_NEEDFIN;
10846 			thflags &= ~TH_SYN;
10847 		} else {
10848 			tcp_state_change(tp, TCPS_ESTABLISHED);
10849 			TCP_PROBE5(connect__established, NULL, tp,
10850 			    mtod(m, const char *), tp, th);
10851 			rack_cc_conn_init(tp);
10852 		}
10853 	} else {
10854 		/*
10855 		 * Received initial SYN in SYN-SENT[*] state => simultaneous
10856 		 * open.  If segment contains CC option and there is a
10857 		 * cached CC, apply TAO test. If it succeeds, connection is *
10858 		 * half-synchronized. Otherwise, do 3-way handshake:
10859 		 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
10860 		 * there was no CC option, clear cached CC value.
10861 		 */
10862 		tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
10863 		tcp_state_change(tp, TCPS_SYN_RECEIVED);
10864 	}
10865 	INP_WLOCK_ASSERT(tp->t_inpcb);
10866 	/*
10867 	 * Advance th->th_seq to correspond to first data byte. If data,
10868 	 * trim to stay within window, dropping FIN if necessary.
10869 	 */
10870 	th->th_seq++;
10871 	if (tlen > tp->rcv_wnd) {
10872 		todrop = tlen - tp->rcv_wnd;
10873 		m_adj(m, -todrop);
10874 		tlen = tp->rcv_wnd;
10875 		thflags &= ~TH_FIN;
10876 		KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
10877 		KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
10878 	}
10879 	tp->snd_wl1 = th->th_seq - 1;
10880 	tp->rcv_up = th->th_seq;
10881 	/*
10882 	 * Client side of transaction: already sent SYN and data. If the
10883 	 * remote host used T/TCP to validate the SYN, our data will be
10884 	 * ACK'd; if so, enter normal data segment processing in the middle
10885 	 * of step 5, ack processing. Otherwise, goto step 6.
10886 	 */
10887 	if (thflags & TH_ACK) {
10888 		/* For syn-sent we need to possibly update the rtt */
10889 		if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
10890 			uint32_t t, mcts;
10891 
10892 			mcts = tcp_ts_getticks();
10893 			t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
10894 			if (!tp->t_rttlow || tp->t_rttlow > t)
10895 				tp->t_rttlow = t;
10896 			rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
10897 			tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
10898 			tcp_rack_xmit_timer_commit(rack, tp);
10899 		}
10900 		if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
10901 			return (ret_val);
10902 		/* We may have changed to FIN_WAIT_1 above */
10903 		if (tp->t_state == TCPS_FIN_WAIT_1) {
10904 			/*
10905 			 * In FIN_WAIT_1 STATE in addition to the processing
10906 			 * for the ESTABLISHED state if our FIN is now
10907 			 * acknowledged then enter FIN_WAIT_2.
10908 			 */
10909 			if (ourfinisacked) {
10910 				/*
10911 				 * If we can't receive any more data, then
10912 				 * closing user can proceed. Starting the
10913 				 * timer is contrary to the specification,
10914 				 * but if we don't get a FIN we'll hang
10915 				 * forever.
10916 				 *
10917 				 * XXXjl: we should release the tp also, and
10918 				 * use a compressed state.
10919 				 */
10920 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10921 					soisdisconnected(so);
10922 					tcp_timer_activate(tp, TT_2MSL,
10923 					    (tcp_fast_finwait2_recycle ?
10924 					    tcp_finwait2_timeout :
10925 					    TP_MAXIDLE(tp)));
10926 				}
10927 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
10928 			}
10929 		}
10930 	}
10931 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10932 	   tiwin, thflags, nxt_pkt));
10933 }
10934 
10935 /*
10936  * Return value of 1, the TCB is unlocked and most
10937  * likely gone, return value of 0, the TCP is still
10938  * locked.
10939  */
10940 static int
10941 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
10942     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10943     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10944 {
10945 	struct tcp_rack *rack;
10946 	int32_t ret_val = 0;
10947 	int32_t ourfinisacked = 0;
10948 
10949 	ctf_calc_rwin(so, tp);
10950 	if ((thflags & TH_ACK) &&
10951 	    (SEQ_LEQ(th->th_ack, tp->snd_una) ||
10952 	    SEQ_GT(th->th_ack, tp->snd_max))) {
10953 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10954 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10955 		return (1);
10956 	}
10957 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10958 	if (IS_FASTOPEN(tp->t_flags)) {
10959 		/*
10960 		 * When a TFO connection is in SYN_RECEIVED, the
10961 		 * only valid packets are the initial SYN, a
10962 		 * retransmit/copy of the initial SYN (possibly with
10963 		 * a subset of the original data), a valid ACK, a
10964 		 * FIN, or a RST.
10965 		 */
10966 		if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
10967 			tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10968 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10969 			return (1);
10970 		} else if (thflags & TH_SYN) {
10971 			/* non-initial SYN is ignored */
10972 			if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
10973 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
10974 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
10975 				ctf_do_drop(m, NULL);
10976 				return (0);
10977 			}
10978 		} else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
10979 			ctf_do_drop(m, NULL);
10980 			return (0);
10981 		}
10982 	}
10983 	if ((thflags & TH_RST) ||
10984 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
10985 		return (ctf_process_rst(m, th, so, tp));
10986 	/*
10987 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10988 	 * it's less than ts_recent, drop it.
10989 	 */
10990 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10991 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10992 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10993 			return (ret_val);
10994 	}
10995 	/*
10996 	 * In the SYN-RECEIVED state, validate that the packet belongs to
10997 	 * this connection before trimming the data to fit the receive
10998 	 * window.  Check the sequence number versus IRS since we know the
10999 	 * sequence numbers haven't wrapped.  This is a partial fix for the
11000 	 * "LAND" DoS attack.
11001 	 */
11002 	if (SEQ_LT(th->th_seq, tp->irs)) {
11003 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
11004 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11005 		return (1);
11006 	}
11007 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11008 			      &rack->r_ctl.challenge_ack_ts,
11009 			      &rack->r_ctl.challenge_ack_cnt)) {
11010 		return (ret_val);
11011 	}
11012 	/*
11013 	 * If last ACK falls within this segment's sequence numbers, record
11014 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11015 	 * from the latest proposal of the tcplw@cray.com list (Braden
11016 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11017 	 * with our earlier PAWS tests, so this check should be solely
11018 	 * predicated on the sequence space of this segment. 3) That we
11019 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11020 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11021 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11022 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11023 	 * p.869. In such cases, we can still calculate the RTT correctly
11024 	 * when RCV.NXT == Last.ACK.Sent.
11025 	 */
11026 	if ((to->to_flags & TOF_TS) != 0 &&
11027 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11028 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11029 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11030 		tp->ts_recent_age = tcp_ts_getticks();
11031 		tp->ts_recent = to->to_tsval;
11032 	}
11033 	tp->snd_wnd = tiwin;
11034 	rack_validate_fo_sendwin_up(tp, rack);
11035 	/*
11036 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11037 	 * is on (half-synchronized state), then queue data for later
11038 	 * processing; else drop segment and return.
11039 	 */
11040 	if ((thflags & TH_ACK) == 0) {
11041 		if (IS_FASTOPEN(tp->t_flags)) {
11042 			rack_cc_conn_init(tp);
11043 		}
11044 		return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11045 		    tiwin, thflags, nxt_pkt));
11046 	}
11047 	KMOD_TCPSTAT_INC(tcps_connects);
11048 	soisconnected(so);
11049 	/* Do window scaling? */
11050 	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11051 	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11052 		tp->rcv_scale = tp->request_r_scale;
11053 	}
11054 	/*
11055 	 * Make transitions: SYN-RECEIVED  -> ESTABLISHED SYN-RECEIVED* ->
11056 	 * FIN-WAIT-1
11057 	 */
11058 	tp->t_starttime = ticks;
11059 	if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
11060 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
11061 		tp->t_tfo_pending = NULL;
11062 	}
11063 	if (tp->t_flags & TF_NEEDFIN) {
11064 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
11065 		tp->t_flags &= ~TF_NEEDFIN;
11066 	} else {
11067 		tcp_state_change(tp, TCPS_ESTABLISHED);
11068 		TCP_PROBE5(accept__established, NULL, tp,
11069 		    mtod(m, const char *), tp, th);
11070 		/*
11071 		 * TFO connections call cc_conn_init() during SYN
11072 		 * processing.  Calling it again here for such connections
11073 		 * is not harmless as it would undo the snd_cwnd reduction
11074 		 * that occurs when a TFO SYN|ACK is retransmitted.
11075 		 */
11076 		if (!IS_FASTOPEN(tp->t_flags))
11077 			rack_cc_conn_init(tp);
11078 	}
11079 	/*
11080 	 * Account for the ACK of our SYN prior to
11081 	 * regular ACK processing below, except for
11082 	 * simultaneous SYN, which is handled later.
11083 	 */
11084 	if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
11085 		tp->snd_una++;
11086 	/*
11087 	 * If segment contains data or ACK, will call tcp_reass() later; if
11088 	 * not, do so now to pass queued data to user.
11089 	 */
11090 	if (tlen == 0 && (thflags & TH_FIN) == 0) {
11091 		(void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
11092 		    (struct mbuf *)0);
11093 		tcp_handle_wakeup(tp, so);
11094 	}
11095 	tp->snd_wl1 = th->th_seq - 1;
11096 	/* For syn-recv we need to possibly update the rtt */
11097 	if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11098 		uint32_t t, mcts;
11099 
11100 		mcts = tcp_ts_getticks();
11101 		t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11102 		if (!tp->t_rttlow || tp->t_rttlow > t)
11103 			tp->t_rttlow = t;
11104 		rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
11105 		tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11106 		tcp_rack_xmit_timer_commit(rack, tp);
11107 	}
11108 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11109 		return (ret_val);
11110 	}
11111 	if (tp->t_state == TCPS_FIN_WAIT_1) {
11112 		/* We could have went to FIN_WAIT_1 (or EST) above */
11113 		/*
11114 		 * In FIN_WAIT_1 STATE in addition to the processing for the
11115 		 * ESTABLISHED state if our FIN is now acknowledged then
11116 		 * enter FIN_WAIT_2.
11117 		 */
11118 		if (ourfinisacked) {
11119 			/*
11120 			 * If we can't receive any more data, then closing
11121 			 * user can proceed. Starting the timer is contrary
11122 			 * to the specification, but if we don't get a FIN
11123 			 * we'll hang forever.
11124 			 *
11125 			 * XXXjl: we should release the tp also, and use a
11126 			 * compressed state.
11127 			 */
11128 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11129 				soisdisconnected(so);
11130 				tcp_timer_activate(tp, TT_2MSL,
11131 				    (tcp_fast_finwait2_recycle ?
11132 				    tcp_finwait2_timeout :
11133 				    TP_MAXIDLE(tp)));
11134 			}
11135 			tcp_state_change(tp, TCPS_FIN_WAIT_2);
11136 		}
11137 	}
11138 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11139 	    tiwin, thflags, nxt_pkt));
11140 }
11141 
11142 /*
11143  * Return value of 1, the TCB is unlocked and most
11144  * likely gone, return value of 0, the TCP is still
11145  * locked.
11146  */
11147 static int
11148 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
11149     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11150     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11151 {
11152 	int32_t ret_val = 0;
11153 	struct tcp_rack *rack;
11154 
11155 	/*
11156 	 * Header prediction: check for the two common cases of a
11157 	 * uni-directional data xfer.  If the packet has no control flags,
11158 	 * is in-sequence, the window didn't change and we're not
11159 	 * retransmitting, it's a candidate.  If the length is zero and the
11160 	 * ack moved forward, we're the sender side of the xfer.  Just free
11161 	 * the data acked & wake any higher level process that was blocked
11162 	 * waiting for space.  If the length is non-zero and the ack didn't
11163 	 * move, we're the receiver side.  If we're getting packets in-order
11164 	 * (the reassembly queue is empty), add the data toc The socket
11165 	 * buffer and note that we need a delayed ack. Make sure that the
11166 	 * hidden state-flags are also off. Since we check for
11167 	 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
11168 	 */
11169 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11170 	if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
11171 	    __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
11172 	    __predict_true(SEGQ_EMPTY(tp)) &&
11173 	    __predict_true(th->th_seq == tp->rcv_nxt)) {
11174 		if (tlen == 0) {
11175 			if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
11176 			    tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
11177 				return (0);
11178 			}
11179 		} else {
11180 			if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
11181 			    tiwin, nxt_pkt, iptos)) {
11182 				return (0);
11183 			}
11184 		}
11185 	}
11186 	ctf_calc_rwin(so, tp);
11187 
11188 	if ((thflags & TH_RST) ||
11189 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11190 		return (ctf_process_rst(m, th, so, tp));
11191 
11192 	/*
11193 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11194 	 * synchronized state.
11195 	 */
11196 	if (thflags & TH_SYN) {
11197 		ctf_challenge_ack(m, th, tp, &ret_val);
11198 		return (ret_val);
11199 	}
11200 	/*
11201 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11202 	 * it's less than ts_recent, drop it.
11203 	 */
11204 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11205 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11206 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11207 			return (ret_val);
11208 	}
11209 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11210 			      &rack->r_ctl.challenge_ack_ts,
11211 			      &rack->r_ctl.challenge_ack_cnt)) {
11212 		return (ret_val);
11213 	}
11214 	/*
11215 	 * If last ACK falls within this segment's sequence numbers, record
11216 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11217 	 * from the latest proposal of the tcplw@cray.com list (Braden
11218 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11219 	 * with our earlier PAWS tests, so this check should be solely
11220 	 * predicated on the sequence space of this segment. 3) That we
11221 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11222 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11223 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11224 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11225 	 * p.869. In such cases, we can still calculate the RTT correctly
11226 	 * when RCV.NXT == Last.ACK.Sent.
11227 	 */
11228 	if ((to->to_flags & TOF_TS) != 0 &&
11229 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11230 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11231 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11232 		tp->ts_recent_age = tcp_ts_getticks();
11233 		tp->ts_recent = to->to_tsval;
11234 	}
11235 	/*
11236 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11237 	 * is on (half-synchronized state), then queue data for later
11238 	 * processing; else drop segment and return.
11239 	 */
11240 	if ((thflags & TH_ACK) == 0) {
11241 		if (tp->t_flags & TF_NEEDSYN) {
11242 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11243 			    tiwin, thflags, nxt_pkt));
11244 
11245 		} else if (tp->t_flags & TF_ACKNOW) {
11246 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11247 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11248 			return (ret_val);
11249 		} else {
11250 			ctf_do_drop(m, NULL);
11251 			return (0);
11252 		}
11253 	}
11254 	/*
11255 	 * Ack processing.
11256 	 */
11257 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11258 		return (ret_val);
11259 	}
11260 	if (sbavail(&so->so_snd)) {
11261 		if (ctf_progress_timeout_check(tp, true)) {
11262 			rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
11263 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11264 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11265 			return (1);
11266 		}
11267 	}
11268 	/* State changes only happen in rack_process_data() */
11269 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11270 	    tiwin, thflags, nxt_pkt));
11271 }
11272 
11273 /*
11274  * Return value of 1, the TCB is unlocked and most
11275  * likely gone, return value of 0, the TCP is still
11276  * locked.
11277  */
11278 static int
11279 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
11280     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11281     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11282 {
11283 	int32_t ret_val = 0;
11284 	struct tcp_rack *rack;
11285 
11286 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11287 	ctf_calc_rwin(so, tp);
11288 	if ((thflags & TH_RST) ||
11289 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11290 		return (ctf_process_rst(m, th, so, tp));
11291 	/*
11292 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11293 	 * synchronized state.
11294 	 */
11295 	if (thflags & TH_SYN) {
11296 		ctf_challenge_ack(m, th, tp, &ret_val);
11297 		return (ret_val);
11298 	}
11299 	/*
11300 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11301 	 * it's less than ts_recent, drop it.
11302 	 */
11303 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11304 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11305 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11306 			return (ret_val);
11307 	}
11308 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11309 			      &rack->r_ctl.challenge_ack_ts,
11310 			      &rack->r_ctl.challenge_ack_cnt)) {
11311 		return (ret_val);
11312 	}
11313 	/*
11314 	 * If last ACK falls within this segment's sequence numbers, record
11315 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11316 	 * from the latest proposal of the tcplw@cray.com list (Braden
11317 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11318 	 * with our earlier PAWS tests, so this check should be solely
11319 	 * predicated on the sequence space of this segment. 3) That we
11320 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11321 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11322 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11323 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11324 	 * p.869. In such cases, we can still calculate the RTT correctly
11325 	 * when RCV.NXT == Last.ACK.Sent.
11326 	 */
11327 	if ((to->to_flags & TOF_TS) != 0 &&
11328 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11329 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11330 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11331 		tp->ts_recent_age = tcp_ts_getticks();
11332 		tp->ts_recent = to->to_tsval;
11333 	}
11334 	/*
11335 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11336 	 * is on (half-synchronized state), then queue data for later
11337 	 * processing; else drop segment and return.
11338 	 */
11339 	if ((thflags & TH_ACK) == 0) {
11340 		if (tp->t_flags & TF_NEEDSYN) {
11341 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11342 			    tiwin, thflags, nxt_pkt));
11343 
11344 		} else if (tp->t_flags & TF_ACKNOW) {
11345 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11346 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11347 			return (ret_val);
11348 		} else {
11349 			ctf_do_drop(m, NULL);
11350 			return (0);
11351 		}
11352 	}
11353 	/*
11354 	 * Ack processing.
11355 	 */
11356 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11357 		return (ret_val);
11358 	}
11359 	if (sbavail(&so->so_snd)) {
11360 		if (ctf_progress_timeout_check(tp, true)) {
11361 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11362 						tp, tick, PROGRESS_DROP, __LINE__);
11363 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11364 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11365 			return (1);
11366 		}
11367 	}
11368 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11369 	    tiwin, thflags, nxt_pkt));
11370 }
11371 
11372 static int
11373 rack_check_data_after_close(struct mbuf *m,
11374     struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
11375 {
11376 	struct tcp_rack *rack;
11377 
11378 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11379 	if (rack->rc_allow_data_af_clo == 0) {
11380 	close_now:
11381 		tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11382 		/* tcp_close will kill the inp pre-log the Reset */
11383 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
11384 		tp = tcp_close(tp);
11385 		KMOD_TCPSTAT_INC(tcps_rcvafterclose);
11386 		ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
11387 		return (1);
11388 	}
11389 	if (sbavail(&so->so_snd) == 0)
11390 		goto close_now;
11391 	/* Ok we allow data that is ignored and a followup reset */
11392 	tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11393 	tp->rcv_nxt = th->th_seq + *tlen;
11394 	tp->t_flags2 |= TF2_DROP_AF_DATA;
11395 	rack->r_wanted_output = 1;
11396 	*tlen = 0;
11397 	return (0);
11398 }
11399 
11400 /*
11401  * Return value of 1, the TCB is unlocked and most
11402  * likely gone, return value of 0, the TCP is still
11403  * locked.
11404  */
11405 static int
11406 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
11407     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11408     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11409 {
11410 	int32_t ret_val = 0;
11411 	int32_t ourfinisacked = 0;
11412 	struct tcp_rack *rack;
11413 
11414 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11415 	ctf_calc_rwin(so, tp);
11416 
11417 	if ((thflags & TH_RST) ||
11418 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11419 		return (ctf_process_rst(m, th, so, tp));
11420 	/*
11421 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11422 	 * synchronized state.
11423 	 */
11424 	if (thflags & TH_SYN) {
11425 		ctf_challenge_ack(m, th, tp, &ret_val);
11426 		return (ret_val);
11427 	}
11428 	/*
11429 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11430 	 * it's less than ts_recent, drop it.
11431 	 */
11432 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11433 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11434 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11435 			return (ret_val);
11436 	}
11437 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11438 			      &rack->r_ctl.challenge_ack_ts,
11439 			      &rack->r_ctl.challenge_ack_cnt)) {
11440 		return (ret_val);
11441 	}
11442 	/*
11443 	 * If new data are received on a connection after the user processes
11444 	 * are gone, then RST the other end.
11445 	 */
11446 	if ((so->so_state & SS_NOFDREF) && tlen) {
11447 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
11448 			return (1);
11449 	}
11450 	/*
11451 	 * If last ACK falls within this segment's sequence numbers, record
11452 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11453 	 * from the latest proposal of the tcplw@cray.com list (Braden
11454 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11455 	 * with our earlier PAWS tests, so this check should be solely
11456 	 * predicated on the sequence space of this segment. 3) That we
11457 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11458 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11459 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11460 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11461 	 * p.869. In such cases, we can still calculate the RTT correctly
11462 	 * when RCV.NXT == Last.ACK.Sent.
11463 	 */
11464 	if ((to->to_flags & TOF_TS) != 0 &&
11465 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11466 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11467 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11468 		tp->ts_recent_age = tcp_ts_getticks();
11469 		tp->ts_recent = to->to_tsval;
11470 	}
11471 	/*
11472 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11473 	 * is on (half-synchronized state), then queue data for later
11474 	 * processing; else drop segment and return.
11475 	 */
11476 	if ((thflags & TH_ACK) == 0) {
11477 		if (tp->t_flags & TF_NEEDSYN) {
11478 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11479 			    tiwin, thflags, nxt_pkt));
11480 		} else if (tp->t_flags & TF_ACKNOW) {
11481 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11482 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11483 			return (ret_val);
11484 		} else {
11485 			ctf_do_drop(m, NULL);
11486 			return (0);
11487 		}
11488 	}
11489 	/*
11490 	 * Ack processing.
11491 	 */
11492 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11493 		return (ret_val);
11494 	}
11495 	if (ourfinisacked) {
11496 		/*
11497 		 * If we can't receive any more data, then closing user can
11498 		 * proceed. Starting the timer is contrary to the
11499 		 * specification, but if we don't get a FIN we'll hang
11500 		 * forever.
11501 		 *
11502 		 * XXXjl: we should release the tp also, and use a
11503 		 * compressed state.
11504 		 */
11505 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11506 			soisdisconnected(so);
11507 			tcp_timer_activate(tp, TT_2MSL,
11508 			    (tcp_fast_finwait2_recycle ?
11509 			    tcp_finwait2_timeout :
11510 			    TP_MAXIDLE(tp)));
11511 		}
11512 		tcp_state_change(tp, TCPS_FIN_WAIT_2);
11513 	}
11514 	if (sbavail(&so->so_snd)) {
11515 		if (ctf_progress_timeout_check(tp, true)) {
11516 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11517 						tp, tick, PROGRESS_DROP, __LINE__);
11518 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11519 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11520 			return (1);
11521 		}
11522 	}
11523 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11524 	    tiwin, thflags, nxt_pkt));
11525 }
11526 
11527 /*
11528  * Return value of 1, the TCB is unlocked and most
11529  * likely gone, return value of 0, the TCP is still
11530  * locked.
11531  */
11532 static int
11533 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
11534     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11535     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11536 {
11537 	int32_t ret_val = 0;
11538 	int32_t ourfinisacked = 0;
11539 	struct tcp_rack *rack;
11540 
11541 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11542 	ctf_calc_rwin(so, tp);
11543 
11544 	if ((thflags & TH_RST) ||
11545 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11546 		return (ctf_process_rst(m, th, so, tp));
11547 	/*
11548 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11549 	 * synchronized state.
11550 	 */
11551 	if (thflags & TH_SYN) {
11552 		ctf_challenge_ack(m, th, tp, &ret_val);
11553 		return (ret_val);
11554 	}
11555 	/*
11556 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11557 	 * it's less than ts_recent, drop it.
11558 	 */
11559 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11560 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11561 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11562 			return (ret_val);
11563 	}
11564 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11565 			      &rack->r_ctl.challenge_ack_ts,
11566 			      &rack->r_ctl.challenge_ack_cnt)) {
11567 		return (ret_val);
11568 	}
11569 	/*
11570 	 * If new data are received on a connection after the user processes
11571 	 * are gone, then RST the other end.
11572 	 */
11573 	if ((so->so_state & SS_NOFDREF) && tlen) {
11574 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
11575 			return (1);
11576 	}
11577 	/*
11578 	 * If last ACK falls within this segment's sequence numbers, record
11579 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11580 	 * from the latest proposal of the tcplw@cray.com list (Braden
11581 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11582 	 * with our earlier PAWS tests, so this check should be solely
11583 	 * predicated on the sequence space of this segment. 3) That we
11584 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11585 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11586 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11587 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11588 	 * p.869. In such cases, we can still calculate the RTT correctly
11589 	 * when RCV.NXT == Last.ACK.Sent.
11590 	 */
11591 	if ((to->to_flags & TOF_TS) != 0 &&
11592 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11593 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11594 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11595 		tp->ts_recent_age = tcp_ts_getticks();
11596 		tp->ts_recent = to->to_tsval;
11597 	}
11598 	/*
11599 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11600 	 * is on (half-synchronized state), then queue data for later
11601 	 * processing; else drop segment and return.
11602 	 */
11603 	if ((thflags & TH_ACK) == 0) {
11604 		if (tp->t_flags & TF_NEEDSYN) {
11605 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11606 			    tiwin, thflags, nxt_pkt));
11607 		} else if (tp->t_flags & TF_ACKNOW) {
11608 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11609 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11610 			return (ret_val);
11611 		} else {
11612 			ctf_do_drop(m, NULL);
11613 			return (0);
11614 		}
11615 	}
11616 	/*
11617 	 * Ack processing.
11618 	 */
11619 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11620 		return (ret_val);
11621 	}
11622 	if (ourfinisacked) {
11623 		tcp_twstart(tp);
11624 		m_freem(m);
11625 		return (1);
11626 	}
11627 	if (sbavail(&so->so_snd)) {
11628 		if (ctf_progress_timeout_check(tp, true)) {
11629 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11630 						tp, tick, PROGRESS_DROP, __LINE__);
11631 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11632 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11633 			return (1);
11634 		}
11635 	}
11636 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11637 	    tiwin, thflags, nxt_pkt));
11638 }
11639 
11640 /*
11641  * Return value of 1, the TCB is unlocked and most
11642  * likely gone, return value of 0, the TCP is still
11643  * locked.
11644  */
11645 static int
11646 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11647     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11648     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11649 {
11650 	int32_t ret_val = 0;
11651 	int32_t ourfinisacked = 0;
11652 	struct tcp_rack *rack;
11653 
11654 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11655 	ctf_calc_rwin(so, tp);
11656 
11657 	if ((thflags & TH_RST) ||
11658 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11659 		return (ctf_process_rst(m, th, so, tp));
11660 	/*
11661 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11662 	 * synchronized state.
11663 	 */
11664 	if (thflags & TH_SYN) {
11665 		ctf_challenge_ack(m, th, tp, &ret_val);
11666 		return (ret_val);
11667 	}
11668 	/*
11669 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11670 	 * it's less than ts_recent, drop it.
11671 	 */
11672 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11673 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11674 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11675 			return (ret_val);
11676 	}
11677 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11678 			      &rack->r_ctl.challenge_ack_ts,
11679 			      &rack->r_ctl.challenge_ack_cnt)) {
11680 		return (ret_val);
11681 	}
11682 	/*
11683 	 * If new data are received on a connection after the user processes
11684 	 * are gone, then RST the other end.
11685 	 */
11686 	if ((so->so_state & SS_NOFDREF) && tlen) {
11687 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
11688 			return (1);
11689 	}
11690 	/*
11691 	 * If last ACK falls within this segment's sequence numbers, record
11692 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11693 	 * from the latest proposal of the tcplw@cray.com list (Braden
11694 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11695 	 * with our earlier PAWS tests, so this check should be solely
11696 	 * predicated on the sequence space of this segment. 3) That we
11697 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11698 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11699 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11700 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11701 	 * p.869. In such cases, we can still calculate the RTT correctly
11702 	 * when RCV.NXT == Last.ACK.Sent.
11703 	 */
11704 	if ((to->to_flags & TOF_TS) != 0 &&
11705 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11706 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11707 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11708 		tp->ts_recent_age = tcp_ts_getticks();
11709 		tp->ts_recent = to->to_tsval;
11710 	}
11711 	/*
11712 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11713 	 * is on (half-synchronized state), then queue data for later
11714 	 * processing; else drop segment and return.
11715 	 */
11716 	if ((thflags & TH_ACK) == 0) {
11717 		if (tp->t_flags & TF_NEEDSYN) {
11718 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11719 			    tiwin, thflags, nxt_pkt));
11720 		} else if (tp->t_flags & TF_ACKNOW) {
11721 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11722 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11723 			return (ret_val);
11724 		} else {
11725 			ctf_do_drop(m, NULL);
11726 			return (0);
11727 		}
11728 	}
11729 	/*
11730 	 * case TCPS_LAST_ACK: Ack processing.
11731 	 */
11732 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11733 		return (ret_val);
11734 	}
11735 	if (ourfinisacked) {
11736 		tp = tcp_close(tp);
11737 		ctf_do_drop(m, tp);
11738 		return (1);
11739 	}
11740 	if (sbavail(&so->so_snd)) {
11741 		if (ctf_progress_timeout_check(tp, true)) {
11742 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11743 						tp, tick, PROGRESS_DROP, __LINE__);
11744 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11745 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11746 			return (1);
11747 		}
11748 	}
11749 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11750 	    tiwin, thflags, nxt_pkt));
11751 }
11752 
11753 /*
11754  * Return value of 1, the TCB is unlocked and most
11755  * likely gone, return value of 0, the TCP is still
11756  * locked.
11757  */
11758 static int
11759 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
11760     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11761     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11762 {
11763 	int32_t ret_val = 0;
11764 	int32_t ourfinisacked = 0;
11765 	struct tcp_rack *rack;
11766 
11767 	rack = (struct tcp_rack *)tp->t_fb_ptr;
11768 	ctf_calc_rwin(so, tp);
11769 
11770 	/* Reset receive buffer auto scaling when not in bulk receive mode. */
11771 	if ((thflags & TH_RST) ||
11772 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
11773 		return (ctf_process_rst(m, th, so, tp));
11774 	/*
11775 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11776 	 * synchronized state.
11777 	 */
11778 	if (thflags & TH_SYN) {
11779 		ctf_challenge_ack(m, th, tp, &ret_val);
11780 		return (ret_val);
11781 	}
11782 	/*
11783 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11784 	 * it's less than ts_recent, drop it.
11785 	 */
11786 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11787 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11788 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11789 			return (ret_val);
11790 	}
11791 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11792 			      &rack->r_ctl.challenge_ack_ts,
11793 			      &rack->r_ctl.challenge_ack_cnt)) {
11794 		return (ret_val);
11795 	}
11796 	/*
11797 	 * If new data are received on a connection after the user processes
11798 	 * are gone, then RST the other end.
11799 	 */
11800 	if ((so->so_state & SS_NOFDREF) &&
11801 	    tlen) {
11802 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
11803 			return (1);
11804 	}
11805 	/*
11806 	 * If last ACK falls within this segment's sequence numbers, record
11807 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
11808 	 * from the latest proposal of the tcplw@cray.com list (Braden
11809 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
11810 	 * with our earlier PAWS tests, so this check should be solely
11811 	 * predicated on the sequence space of this segment. 3) That we
11812 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11813 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11814 	 * SEG.Len, This modified check allows us to overcome RFC1323's
11815 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11816 	 * p.869. In such cases, we can still calculate the RTT correctly
11817 	 * when RCV.NXT == Last.ACK.Sent.
11818 	 */
11819 	if ((to->to_flags & TOF_TS) != 0 &&
11820 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11821 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11822 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11823 		tp->ts_recent_age = tcp_ts_getticks();
11824 		tp->ts_recent = to->to_tsval;
11825 	}
11826 	/*
11827 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
11828 	 * is on (half-synchronized state), then queue data for later
11829 	 * processing; else drop segment and return.
11830 	 */
11831 	if ((thflags & TH_ACK) == 0) {
11832 		if (tp->t_flags & TF_NEEDSYN) {
11833 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11834 			    tiwin, thflags, nxt_pkt));
11835 		} else if (tp->t_flags & TF_ACKNOW) {
11836 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11837 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11838 			return (ret_val);
11839 		} else {
11840 			ctf_do_drop(m, NULL);
11841 			return (0);
11842 		}
11843 	}
11844 	/*
11845 	 * Ack processing.
11846 	 */
11847 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11848 		return (ret_val);
11849 	}
11850 	if (sbavail(&so->so_snd)) {
11851 		if (ctf_progress_timeout_check(tp, true)) {
11852 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11853 						tp, tick, PROGRESS_DROP, __LINE__);
11854 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11855 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11856 			return (1);
11857 		}
11858 	}
11859 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11860 	    tiwin, thflags, nxt_pkt));
11861 }
11862 
11863 static void inline
11864 rack_clear_rate_sample(struct tcp_rack *rack)
11865 {
11866 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
11867 	rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
11868 	rack->r_ctl.rack_rs.rs_rtt_tot = 0;
11869 }
11870 
11871 static void
11872 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
11873 {
11874 	uint64_t bw_est, rate_wanted;
11875 	int chged = 0;
11876 	uint32_t user_max, orig_min, orig_max;
11877 
11878 	orig_min = rack->r_ctl.rc_pace_min_segs;
11879 	orig_max = rack->r_ctl.rc_pace_max_segs;
11880 	user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
11881 	if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
11882 		chged = 1;
11883 	rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
11884 	if (rack->use_fixed_rate || rack->rc_force_max_seg) {
11885 		if (user_max != rack->r_ctl.rc_pace_max_segs)
11886 			chged = 1;
11887 	}
11888 	if (rack->rc_force_max_seg) {
11889 		rack->r_ctl.rc_pace_max_segs = user_max;
11890 	} else if (rack->use_fixed_rate) {
11891 		bw_est = rack_get_bw(rack);
11892 		if ((rack->r_ctl.crte == NULL) ||
11893 		    (bw_est != rack->r_ctl.crte->rate)) {
11894 			rack->r_ctl.rc_pace_max_segs = user_max;
11895 		} else {
11896 			/* We are pacing right at the hardware rate */
11897 			uint32_t segsiz;
11898 
11899 			segsiz = min(ctf_fixed_maxseg(tp),
11900 				     rack->r_ctl.rc_pace_min_segs);
11901 			rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
11902 				                           tp, bw_est, segsiz, 0,
11903 							   rack->r_ctl.crte, NULL);
11904 		}
11905 	} else if (rack->rc_always_pace) {
11906 		if (rack->r_ctl.gp_bw ||
11907 #ifdef NETFLIX_PEAKRATE
11908 		    rack->rc_tp->t_maxpeakrate ||
11909 #endif
11910 		    rack->r_ctl.init_rate) {
11911 			/* We have a rate of some sort set */
11912 			uint32_t  orig;
11913 
11914 			bw_est = rack_get_bw(rack);
11915 			orig = rack->r_ctl.rc_pace_max_segs;
11916 			if (fill_override)
11917 				rate_wanted = *fill_override;
11918 			else
11919 				rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
11920 			if (rate_wanted) {
11921 				/* We have something */
11922 				rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
11923 										   rate_wanted,
11924 										   ctf_fixed_maxseg(rack->rc_tp));
11925 			} else
11926 				rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
11927 			if (orig != rack->r_ctl.rc_pace_max_segs)
11928 				chged = 1;
11929 		} else if ((rack->r_ctl.gp_bw == 0) &&
11930 			   (rack->r_ctl.rc_pace_max_segs == 0)) {
11931 			/*
11932 			 * If we have nothing limit us to bursting
11933 			 * out IW sized pieces.
11934 			 */
11935 			chged = 1;
11936 			rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
11937 		}
11938 	}
11939 	if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
11940 		chged = 1;
11941 		rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
11942 	}
11943 	if (chged)
11944 		rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
11945 }
11946 
11947 
11948 static void
11949 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack)
11950 {
11951 #ifdef INET6
11952 	struct ip6_hdr *ip6 = NULL;
11953 #endif
11954 #ifdef INET
11955 	struct ip *ip = NULL;
11956 #endif
11957 	struct udphdr *udp = NULL;
11958 
11959 	/* Ok lets fill in the fast block, it can only be used with no IP options! */
11960 #ifdef INET6
11961 	if (rack->r_is_v6) {
11962 		rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
11963 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
11964 		if (tp->t_port) {
11965 			rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11966 			udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
11967 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11968 			udp->uh_dport = tp->t_port;
11969 			rack->r_ctl.fsb.udp = udp;
11970 			rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11971 		} else
11972 		{
11973 			rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
11974 			rack->r_ctl.fsb.udp = NULL;
11975 		}
11976 		tcpip_fillheaders(rack->rc_inp,
11977 				  tp->t_port,
11978 				  ip6, rack->r_ctl.fsb.th);
11979 	} else
11980 #endif				/* INET6 */
11981 	{
11982 		rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
11983 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
11984 		if (tp->t_port) {
11985 			rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11986 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
11987 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11988 			udp->uh_dport = tp->t_port;
11989 			rack->r_ctl.fsb.udp = udp;
11990 			rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11991 		} else
11992 		{
11993 			rack->r_ctl.fsb.udp = NULL;
11994 			rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
11995 		}
11996 		tcpip_fillheaders(rack->rc_inp,
11997 				  tp->t_port,
11998 				  ip, rack->r_ctl.fsb.th);
11999 	}
12000 	rack->r_fsb_inited = 1;
12001 }
12002 
12003 static int
12004 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
12005 {
12006 	/*
12007 	 * Allocate the larger of spaces V6 if available else just
12008 	 * V4 and include udphdr (overbook)
12009 	 */
12010 #ifdef INET6
12011 	rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
12012 #else
12013 	rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
12014 #endif
12015 	rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
12016 					    M_TCPFSB, M_NOWAIT|M_ZERO);
12017 	if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
12018 		return (ENOMEM);
12019 	}
12020 	rack->r_fsb_inited = 0;
12021 	return (0);
12022 }
12023 
12024 static int
12025 rack_init(struct tcpcb *tp)
12026 {
12027 	struct tcp_rack *rack = NULL;
12028 	struct rack_sendmap *insret;
12029 	uint32_t iwin, snt, us_cts;
12030 	int err;
12031 
12032 	tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
12033 	if (tp->t_fb_ptr == NULL) {
12034 		/*
12035 		 * We need to allocate memory but cant. The INP and INP_INFO
12036 		 * locks and they are recusive (happens during setup. So a
12037 		 * scheme to drop the locks fails :(
12038 		 *
12039 		 */
12040 		return (ENOMEM);
12041 	}
12042 	memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
12043 
12044 	rack = (struct tcp_rack *)tp->t_fb_ptr;
12045 	RB_INIT(&rack->r_ctl.rc_mtree);
12046 	TAILQ_INIT(&rack->r_ctl.rc_free);
12047 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
12048 	rack->rc_tp = tp;
12049 	rack->rc_inp = tp->t_inpcb;
12050 	/* Set the flag */
12051 	rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12052 	/* Probably not needed but lets be sure */
12053 	rack_clear_rate_sample(rack);
12054 	/*
12055 	 * Save off the default values, socket options will poke
12056 	 * at these if pacing is not on or we have not yet
12057 	 * reached where pacing is on (gp_ready/fixed enabled).
12058 	 * When they get set into the CC module (when gp_ready
12059 	 * is enabled or we enable fixed) then we will set these
12060 	 * values into the CC and place in here the old values
12061 	 * so we have a restoral. Then we will set the flag
12062 	 * rc_pacing_cc_set. That way whenever we turn off pacing
12063 	 * or switch off this stack, we will know to go restore
12064 	 * the saved values.
12065 	 */
12066 	rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
12067 	rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
12068 	/* We want abe like behavior as well */
12069 	rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
12070 	rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
12071 	rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
12072 	rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
12073 	if (use_rack_rr)
12074 		rack->use_rack_rr = 1;
12075 	if (V_tcp_delack_enabled)
12076 		tp->t_delayed_ack = 1;
12077 	else
12078 		tp->t_delayed_ack = 0;
12079 #ifdef TCP_ACCOUNTING
12080 	if (rack_tcp_accounting) {
12081 		tp->t_flags2 |= TF2_TCP_ACCOUNTING;
12082 	}
12083 #endif
12084 	if (rack_enable_shared_cwnd)
12085 		rack->rack_enable_scwnd = 1;
12086 	rack->rc_user_set_max_segs = rack_hptsi_segments;
12087 	rack->rc_force_max_seg = 0;
12088 	if (rack_use_imac_dack)
12089 		rack->rc_dack_mode = 1;
12090 	TAILQ_INIT(&rack->r_ctl.opt_list);
12091 	rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
12092 	rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
12093 	rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
12094 	rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
12095 	rack->r_ctl.rc_highest_us_rtt = 0;
12096 	rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
12097 	rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
12098 	if (rack_use_cmp_acks)
12099 		rack->r_use_cmp_ack = 1;
12100 	if (rack_disable_prr)
12101 		rack->rack_no_prr = 1;
12102 	if (rack_gp_no_rec_chg)
12103 		rack->rc_gp_no_rec_chg = 1;
12104 	if (rack_pace_every_seg && tcp_can_enable_pacing()) {
12105 		rack->rc_always_pace = 1;
12106 		if (rack->use_fixed_rate || rack->gp_ready)
12107 			rack_set_cc_pacing(rack);
12108 	} else
12109 		rack->rc_always_pace = 0;
12110 	if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
12111 		rack->r_mbuf_queue = 1;
12112 	else
12113 		rack->r_mbuf_queue = 0;
12114 	if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
12115 		tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
12116 	else
12117 		tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12118 	rack_set_pace_segments(tp, rack, __LINE__, NULL);
12119 	if (rack_limits_scwnd)
12120 		rack->r_limit_scw = 1;
12121 	else
12122 		rack->r_limit_scw = 0;
12123 	rack->rc_labc = V_tcp_abc_l_var;
12124 	rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
12125 	rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12126 	rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
12127 	rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
12128 	rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
12129 	rack->r_ctl.rc_min_to = rack_min_to;
12130 	microuptime(&rack->r_ctl.act_rcv_time);
12131 	rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
12132 	rack->r_running_late = 0;
12133 	rack->r_running_early = 0;
12134 	rack->rc_init_win = rack_default_init_window;
12135 	rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
12136 	if (rack_hw_up_only)
12137 		rack->r_up_only = 1;
12138 	if (rack_do_dyn_mul) {
12139 		/* When dynamic adjustment is on CA needs to start at 100% */
12140 		rack->rc_gp_dyn_mul = 1;
12141 		if (rack_do_dyn_mul >= 100)
12142 			rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
12143 	} else
12144 		rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
12145 	rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
12146 	rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
12147 	rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
12148 	setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
12149 				rack_probertt_filter_life);
12150 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12151 	rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12152 	rack->r_ctl.rc_time_of_last_probertt = us_cts;
12153 	rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
12154 	rack->r_ctl.rc_time_probertt_starts = 0;
12155 	/* We require at least one measurement, even if the sysctl is 0 */
12156 	if (rack_req_measurements)
12157 		rack->r_ctl.req_measurements = rack_req_measurements;
12158 	else
12159 		rack->r_ctl.req_measurements = 1;
12160 	if (rack_enable_hw_pacing)
12161 		rack->rack_hdw_pace_ena = 1;
12162 	if (rack_hw_rate_caps)
12163 		rack->r_rack_hw_rate_caps = 1;
12164 	/* Do we force on detection? */
12165 #ifdef NETFLIX_EXP_DETECTION
12166 	if (tcp_force_detection)
12167 		rack->do_detection = 1;
12168 	else
12169 #endif
12170 		rack->do_detection = 0;
12171 	if (rack_non_rxt_use_cr)
12172 		rack->rack_rec_nonrxt_use_cr = 1;
12173 	err = rack_init_fsb(tp, rack);
12174 	if (err) {
12175 		uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12176 		tp->t_fb_ptr = NULL;
12177 		return (err);
12178 	}
12179 	if (tp->snd_una != tp->snd_max) {
12180 		/* Create a send map for the current outstanding data */
12181 		struct rack_sendmap *rsm;
12182 
12183 		rsm = rack_alloc(rack);
12184 		if (rsm == NULL) {
12185 			uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12186 			tp->t_fb_ptr = NULL;
12187 			return (ENOMEM);
12188 		}
12189 		rsm->r_no_rtt_allowed = 1;
12190 		rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
12191 		rsm->r_rtr_cnt = 1;
12192 		rsm->r_rtr_bytes = 0;
12193 		if (tp->t_flags & TF_SENTFIN) {
12194 			rsm->r_end = tp->snd_max - 1;
12195 			rsm->r_flags |= RACK_HAS_FIN;
12196 		} else {
12197 			rsm->r_end = tp->snd_max;
12198 		}
12199 		if (tp->snd_una == tp->iss) {
12200 			/* The data space is one beyond snd_una */
12201 			rsm->r_flags |= RACK_HAS_SYN;
12202 			rsm->r_start = tp->iss;
12203 			rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
12204 		} else
12205 			rsm->r_start = tp->snd_una;
12206 		rsm->r_dupack = 0;
12207 		if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
12208 			rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
12209 			if (rsm->m)
12210 				rsm->orig_m_len = rsm->m->m_len;
12211 			else
12212 				rsm->orig_m_len = 0;
12213 		} else {
12214 			/*
12215 			 * This can happen if we have a stand-alone FIN or
12216 			 *  SYN.
12217 			 */
12218 			rsm->m = NULL;
12219 			rsm->orig_m_len = 0;
12220 			rsm->soff = 0;
12221 		}
12222 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12223 #ifdef INVARIANTS
12224 		if (insret != NULL) {
12225 			panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
12226 			      insret, rack, rsm);
12227 		}
12228 #endif
12229 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
12230 		rsm->r_in_tmap = 1;
12231 	}
12232 	/*
12233 	 * Timers in Rack are kept in microseconds so lets
12234 	 * convert any initial incoming variables
12235 	 * from ticks into usecs. Note that we
12236 	 * also change the values of t_srtt and t_rttvar, if
12237 	 * they are non-zero. They are kept with a 5
12238 	 * bit decimal so we have to carefully convert
12239 	 * these to get the full precision.
12240 	 */
12241 	rack_convert_rtts(tp);
12242 	tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
12243 	if (rack_def_profile)
12244 		rack_set_profile(rack, rack_def_profile);
12245 	/* Cancel the GP measurement in progress */
12246 	tp->t_flags &= ~TF_GPUTINPROG;
12247 	if (SEQ_GT(tp->snd_max, tp->iss))
12248 		snt = tp->snd_max - tp->iss;
12249 	else
12250 		snt = 0;
12251 	iwin = rc_init_window(rack);
12252 	if (snt < iwin) {
12253 		/* We are not past the initial window
12254 		 * so we need to make sure cwnd is
12255 		 * correct.
12256 		 */
12257 		if (tp->snd_cwnd < iwin)
12258 			tp->snd_cwnd = iwin;
12259 		/*
12260 		 * If we are within the initial window
12261 		 * we want ssthresh to be unlimited. Setting
12262 		 * it to the rwnd (which the default stack does
12263 		 * and older racks) is not really a good idea
12264 		 * since we want to be in SS and grow both the
12265 		 * cwnd and the rwnd (via dynamic rwnd growth). If
12266 		 * we set it to the rwnd then as the peer grows its
12267 		 * rwnd we will be stuck in CA and never hit SS.
12268 		 *
12269 		 * Its far better to raise it up high (this takes the
12270 		 * risk that there as been a loss already, probably
12271 		 * we should have an indicator in all stacks of loss
12272 		 * but we don't), but considering the normal use this
12273 		 * is a risk worth taking. The consequences of not
12274 		 * hitting SS are far worse than going one more time
12275 		 * into it early on (before we have sent even a IW).
12276 		 * It is highly unlikely that we will have had a loss
12277 		 * before getting the IW out.
12278 		 */
12279 		tp->snd_ssthresh = 0xffffffff;
12280 	}
12281 	rack_stop_all_timers(tp);
12282 	/* Lets setup the fsb block */
12283 	rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12284 	rack_log_rtt_shrinks(rack,  us_cts,  tp->t_rxtcur,
12285 			     __LINE__, RACK_RTTS_INIT);
12286 	return (0);
12287 }
12288 
12289 static int
12290 rack_handoff_ok(struct tcpcb *tp)
12291 {
12292 	if ((tp->t_state == TCPS_CLOSED) ||
12293 	    (tp->t_state == TCPS_LISTEN)) {
12294 		/* Sure no problem though it may not stick */
12295 		return (0);
12296 	}
12297 	if ((tp->t_state == TCPS_SYN_SENT) ||
12298 	    (tp->t_state == TCPS_SYN_RECEIVED)) {
12299 		/*
12300 		 * We really don't know if you support sack,
12301 		 * you have to get to ESTAB or beyond to tell.
12302 		 */
12303 		return (EAGAIN);
12304 	}
12305 	if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
12306 		/*
12307 		 * Rack will only send a FIN after all data is acknowledged.
12308 		 * So in this case we have more data outstanding. We can't
12309 		 * switch stacks until either all data and only the FIN
12310 		 * is left (in which case rack_init() now knows how
12311 		 * to deal with that) <or> all is acknowledged and we
12312 		 * are only left with incoming data, though why you
12313 		 * would want to switch to rack after all data is acknowledged
12314 		 * I have no idea (rrs)!
12315 		 */
12316 		return (EAGAIN);
12317 	}
12318 	if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
12319 		return (0);
12320 	}
12321 	/*
12322 	 * If we reach here we don't do SACK on this connection so we can
12323 	 * never do rack.
12324 	 */
12325 	return (EINVAL);
12326 }
12327 
12328 
12329 static void
12330 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
12331 {
12332 	int ack_cmp = 0;
12333 
12334 	if (tp->t_fb_ptr) {
12335 		struct tcp_rack *rack;
12336 		struct rack_sendmap *rsm, *nrsm, *rm;
12337 
12338 		rack = (struct tcp_rack *)tp->t_fb_ptr;
12339 		if (tp->t_in_pkt) {
12340 			/*
12341 			 * Since we are switching we need to process any
12342 			 * inbound packets in case a compressed ack is
12343 			 * in queue or the new stack does not support
12344 			 * mbuf queuing. These packets in theory should
12345 			 * have been handled by the old stack anyway.
12346 			 */
12347 			if ((rack->rc_inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) ||
12348 			    (rack->rc_inp->inp_flags2 & INP_FREED)) {
12349 				/* Kill all the packets */
12350 				struct mbuf *save, *m;
12351 
12352 				m = tp->t_in_pkt;
12353 				tp->t_in_pkt = NULL;
12354 				tp->t_tail_pkt = NULL;
12355 				while (m) {
12356 					save = m->m_nextpkt;
12357 					m->m_nextpkt = NULL;
12358 					m_freem(m);
12359 					m = save;
12360 				}
12361 			} else {
12362 				/* Process all the packets */
12363 				ctf_do_queued_segments(rack->rc_inp->inp_socket, rack->rc_tp, 0);
12364 			}
12365 			if ((tp->t_inpcb) &&
12366 			    (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP))
12367 				ack_cmp = 1;
12368 			if (ack_cmp) {
12369 				/* Total if we used large or small (if ack-cmp was used). */
12370 				if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS)
12371 					counter_u64_add(rack_large_ackcmp, 1);
12372 				else
12373 					counter_u64_add(rack_small_ackcmp, 1);
12374 			}
12375 		}
12376 		tp->t_flags &= ~TF_FORCEDATA;
12377 #ifdef NETFLIX_SHARED_CWND
12378 		if (rack->r_ctl.rc_scw) {
12379 			uint32_t limit;
12380 
12381 			if (rack->r_limit_scw)
12382 				limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
12383 			else
12384 				limit = 0;
12385 			tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
12386 						  rack->r_ctl.rc_scw_index,
12387 						  limit);
12388 			rack->r_ctl.rc_scw = NULL;
12389 		}
12390 #endif
12391 		if (rack->r_ctl.fsb.tcp_ip_hdr) {
12392 			free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
12393 			rack->r_ctl.fsb.tcp_ip_hdr = NULL;
12394 			rack->r_ctl.fsb.th = NULL;
12395 		}
12396 		/* Convert back to ticks, with  */
12397 		if (tp->t_srtt > 1) {
12398 			uint32_t val, frac;
12399 
12400 			val = USEC_2_TICKS(tp->t_srtt);
12401 			frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12402 			tp->t_srtt = val << TCP_RTT_SHIFT;
12403 			/*
12404 			 * frac is the fractional part here is left
12405 			 * over from converting to hz and shifting.
12406 			 * We need to convert this to the 5 bit
12407 			 * remainder.
12408 			 */
12409 			if (frac) {
12410 				if (hz == 1000) {
12411 					frac = (((uint64_t)frac *  (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12412 				} else {
12413 					frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12414 				}
12415 				tp->t_srtt += frac;
12416 			}
12417 		}
12418 		if (tp->t_rttvar) {
12419 			uint32_t val, frac;
12420 
12421 			val = USEC_2_TICKS(tp->t_rttvar);
12422 			frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12423 			tp->t_rttvar = val <<  TCP_RTTVAR_SHIFT;
12424 			/*
12425 			 * frac is the fractional part here is left
12426 			 * over from converting to hz and shifting.
12427 			 * We need to convert this to the 5 bit
12428 			 * remainder.
12429 			 */
12430 			if (frac) {
12431 				if (hz == 1000) {
12432 					frac = (((uint64_t)frac *  (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12433 				} else {
12434 					frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12435 				}
12436 				tp->t_rttvar += frac;
12437 			}
12438 		}
12439 		tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur);
12440 		tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
12441 		if (rack->rc_always_pace) {
12442 			tcp_decrement_paced_conn();
12443 			rack_undo_cc_pacing(rack);
12444 			rack->rc_always_pace = 0;
12445 		}
12446 		/* Clean up any options if they were not applied */
12447 		while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
12448 			struct deferred_opt_list *dol;
12449 
12450 			dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
12451 			TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
12452 			free(dol, M_TCPDO);
12453 		}
12454 		/* rack does not use force data but other stacks may clear it */
12455 		if (rack->r_ctl.crte != NULL) {
12456 			tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
12457 			rack->rack_hdrw_pacing = 0;
12458 			rack->r_ctl.crte = NULL;
12459 		}
12460 #ifdef TCP_BLACKBOX
12461 		tcp_log_flowend(tp);
12462 #endif
12463 		RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
12464 			rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12465 #ifdef INVARIANTS
12466 			if (rm != rsm) {
12467 				panic("At fini, rack:%p rsm:%p rm:%p",
12468 				      rack, rsm, rm);
12469 			}
12470 #endif
12471 			uma_zfree(rack_zone, rsm);
12472 		}
12473 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12474 		while (rsm) {
12475 			TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
12476 			uma_zfree(rack_zone, rsm);
12477 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12478 		}
12479 		rack->rc_free_cnt = 0;
12480 		uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12481 		tp->t_fb_ptr = NULL;
12482 	}
12483 	if (tp->t_inpcb) {
12484 		tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12485 		tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
12486 		tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
12487 		tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP;
12488 		/* Cancel the GP measurement in progress */
12489 		tp->t_flags &= ~TF_GPUTINPROG;
12490 		tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS;
12491 	}
12492 	/* Make sure snd_nxt is correctly set */
12493 	tp->snd_nxt = tp->snd_max;
12494 }
12495 
12496 static void
12497 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
12498 {
12499 	switch (tp->t_state) {
12500 	case TCPS_SYN_SENT:
12501 		rack->r_state = TCPS_SYN_SENT;
12502 		rack->r_substate = rack_do_syn_sent;
12503 		break;
12504 	case TCPS_SYN_RECEIVED:
12505 		rack->r_state = TCPS_SYN_RECEIVED;
12506 		rack->r_substate = rack_do_syn_recv;
12507 		break;
12508 	case TCPS_ESTABLISHED:
12509 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12510 		rack->r_state = TCPS_ESTABLISHED;
12511 		rack->r_substate = rack_do_established;
12512 		break;
12513 	case TCPS_CLOSE_WAIT:
12514 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12515 		rack->r_state = TCPS_CLOSE_WAIT;
12516 		rack->r_substate = rack_do_close_wait;
12517 		break;
12518 	case TCPS_FIN_WAIT_1:
12519 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12520 		rack->r_state = TCPS_FIN_WAIT_1;
12521 		rack->r_substate = rack_do_fin_wait_1;
12522 		break;
12523 	case TCPS_CLOSING:
12524 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12525 		rack->r_state = TCPS_CLOSING;
12526 		rack->r_substate = rack_do_closing;
12527 		break;
12528 	case TCPS_LAST_ACK:
12529 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12530 		rack->r_state = TCPS_LAST_ACK;
12531 		rack->r_substate = rack_do_lastack;
12532 		break;
12533 	case TCPS_FIN_WAIT_2:
12534 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
12535 		rack->r_state = TCPS_FIN_WAIT_2;
12536 		rack->r_substate = rack_do_fin_wait_2;
12537 		break;
12538 	case TCPS_LISTEN:
12539 	case TCPS_CLOSED:
12540 	case TCPS_TIME_WAIT:
12541 	default:
12542 		break;
12543 	};
12544 	if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
12545 		rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
12546 
12547 }
12548 
12549 static void
12550 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
12551 {
12552 	/*
12553 	 * We received an ack, and then did not
12554 	 * call send or were bounced out due to the
12555 	 * hpts was running. Now a timer is up as well, is
12556 	 * it the right timer?
12557 	 */
12558 	struct rack_sendmap *rsm;
12559 	int tmr_up;
12560 
12561 	tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
12562 	if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
12563 		return;
12564 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
12565 	if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
12566 	    (tmr_up == PACE_TMR_RXT)) {
12567 		/* Should be an RXT */
12568 		return;
12569 	}
12570 	if (rsm == NULL) {
12571 		/* Nothing outstanding? */
12572 		if (tp->t_flags & TF_DELACK) {
12573 			if (tmr_up == PACE_TMR_DELACK)
12574 				/* We are supposed to have delayed ack up and we do */
12575 				return;
12576 		} else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
12577 			/*
12578 			 * if we hit enobufs then we would expect the possiblity
12579 			 * of nothing outstanding and the RXT up (and the hptsi timer).
12580 			 */
12581 			return;
12582 		} else if (((V_tcp_always_keepalive ||
12583 			     rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
12584 			    (tp->t_state <= TCPS_CLOSING)) &&
12585 			   (tmr_up == PACE_TMR_KEEP) &&
12586 			   (tp->snd_max == tp->snd_una)) {
12587 			/* We should have keep alive up and we do */
12588 			return;
12589 		}
12590 	}
12591 	if (SEQ_GT(tp->snd_max, tp->snd_una) &&
12592 		   ((tmr_up == PACE_TMR_TLP) ||
12593 		    (tmr_up == PACE_TMR_RACK) ||
12594 		    (tmr_up == PACE_TMR_RXT))) {
12595 		/*
12596 		 * Either a Rack, TLP or RXT is fine if  we
12597 		 * have outstanding data.
12598 		 */
12599 		return;
12600 	} else if (tmr_up == PACE_TMR_DELACK) {
12601 		/*
12602 		 * If the delayed ack was going to go off
12603 		 * before the rtx/tlp/rack timer were going to
12604 		 * expire, then that would be the timer in control.
12605 		 * Note we don't check the time here trusting the
12606 		 * code is correct.
12607 		 */
12608 		return;
12609 	}
12610 	/*
12611 	 * Ok the timer originally started is not what we want now.
12612 	 * We will force the hpts to be stopped if any, and restart
12613 	 * with the slot set to what was in the saved slot.
12614 	 */
12615 	if (rack->rc_inp->inp_in_hpts) {
12616 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
12617 			uint32_t us_cts;
12618 
12619 			us_cts = tcp_get_usecs(NULL);
12620 			if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12621 				rack->r_early = 1;
12622 				rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
12623 			}
12624 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12625 		}
12626 		tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
12627 	}
12628 	rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12629 	rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12630 }
12631 
12632 
12633 static void
12634 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq)
12635 {
12636 	tp->snd_wnd = tiwin;
12637 	rack_validate_fo_sendwin_up(tp, rack);
12638 	tp->snd_wl1 = seq;
12639 	tp->snd_wl2 = ack;
12640 	if (tp->snd_wnd > tp->max_sndwnd)
12641 		tp->max_sndwnd = tp->snd_wnd;
12642 	if (tp->snd_wnd < (tp->snd_max - high_seq)) {
12643 		/* The peer collapsed the window */
12644 		rack_collapsed_window(rack);
12645 	} else if (rack->rc_has_collapsed)
12646 		rack_un_collapse_window(rack);
12647 	/* Do we exit persists? */
12648 	if ((rack->rc_in_persist != 0) &&
12649 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12650 				rack->r_ctl.rc_pace_min_segs))) {
12651 		rack_exit_persist(tp, rack, cts);
12652 	}
12653 	/* Do we enter persists? */
12654 	if ((rack->rc_in_persist == 0) &&
12655 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12656 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
12657 	    (tp->snd_max == tp->snd_una) &&
12658 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
12659 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
12660 		/*
12661 		 * Here the rwnd is less than
12662 		 * the pacing size, we are established,
12663 		 * nothing is outstanding, and there is
12664 		 * data to send. Enter persists.
12665 		 */
12666 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12667 	}
12668 }
12669 
12670 static void
12671 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
12672 {
12673 
12674 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
12675 		union tcp_log_stackspecific log;
12676 		struct timeval ltv;
12677 		char tcp_hdr_buf[60];
12678 		struct tcphdr *th;
12679 		struct timespec ts;
12680 		uint32_t orig_snd_una;
12681 		uint8_t xx = 0;
12682 
12683 #ifdef NETFLIX_HTTP_LOGGING
12684 		struct http_sendfile_track *http_req;
12685 
12686 		if (SEQ_GT(ae->ack, tp->snd_una)) {
12687 			http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
12688 		} else {
12689 			http_req = tcp_http_find_req_for_seq(tp, ae->ack);
12690 		}
12691 #endif
12692 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
12693 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
12694 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
12695 		if (rack->rack_no_prr == 0)
12696 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
12697 		else
12698 			log.u_bbr.flex1 = 0;
12699 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
12700 		log.u_bbr.use_lt_bw <<= 1;
12701 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
12702 		log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
12703 		log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
12704 		log.u_bbr.pkts_out = tp->t_maxseg;
12705 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
12706 		log.u_bbr.flex7 = 1;
12707 		log.u_bbr.lost = ae->flags;
12708 		log.u_bbr.cwnd_gain = ackval;
12709 		log.u_bbr.pacing_gain = 0x2;
12710 		if (ae->flags & TSTMP_HDWR) {
12711 			/* Record the hardware timestamp if present */
12712 			log.u_bbr.flex3 = M_TSTMP;
12713 			ts.tv_sec = ae->timestamp / 1000000000;
12714 			ts.tv_nsec = ae->timestamp % 1000000000;
12715 			ltv.tv_sec = ts.tv_sec;
12716 			ltv.tv_usec = ts.tv_nsec / 1000;
12717 			log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
12718 		} else if (ae->flags & TSTMP_LRO) {
12719 			/* Record the LRO the arrival timestamp */
12720 			log.u_bbr.flex3 = M_TSTMP_LRO;
12721 			ts.tv_sec = ae->timestamp / 1000000000;
12722 			ts.tv_nsec = ae->timestamp % 1000000000;
12723 			ltv.tv_sec = ts.tv_sec;
12724 			ltv.tv_usec = ts.tv_nsec / 1000;
12725 			log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
12726 		}
12727 		log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
12728 		/* Log the rcv time */
12729 		log.u_bbr.delRate = ae->timestamp;
12730 #ifdef NETFLIX_HTTP_LOGGING
12731 		log.u_bbr.applimited = tp->t_http_closed;
12732 		log.u_bbr.applimited <<= 8;
12733 		log.u_bbr.applimited |= tp->t_http_open;
12734 		log.u_bbr.applimited <<= 8;
12735 		log.u_bbr.applimited |= tp->t_http_req;
12736 		if (http_req) {
12737 			/* Copy out any client req info */
12738 			/* seconds */
12739 			log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
12740 			/* useconds */
12741 			log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
12742 			log.u_bbr.rttProp = http_req->timestamp;
12743 			log.u_bbr.cur_del_rate = http_req->start;
12744 			if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
12745 				log.u_bbr.flex8 |= 1;
12746 			} else {
12747 				log.u_bbr.flex8 |= 2;
12748 				log.u_bbr.bw_inuse = http_req->end;
12749 			}
12750 			log.u_bbr.flex6 = http_req->start_seq;
12751 			if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
12752 				log.u_bbr.flex8 |= 4;
12753 				log.u_bbr.epoch = http_req->end_seq;
12754 			}
12755 		}
12756 #endif
12757 		memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
12758 		th = (struct tcphdr *)tcp_hdr_buf;
12759 		th->th_seq = ae->seq;
12760 		th->th_ack = ae->ack;
12761 		th->th_win = ae->win;
12762 		/* Now fill in the ports */
12763 		th->th_sport = tp->t_inpcb->inp_fport;
12764 		th->th_dport = tp->t_inpcb->inp_lport;
12765 		th->th_flags = ae->flags & 0xff;
12766 		/* Now do we have a timestamp option? */
12767 		if (ae->flags & HAS_TSTMP) {
12768 			u_char *cp;
12769 			uint32_t val;
12770 
12771 			th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
12772 			cp = (u_char *)(th + 1);
12773 			*cp = TCPOPT_NOP;
12774 			cp++;
12775 			*cp = TCPOPT_NOP;
12776 			cp++;
12777 			*cp = TCPOPT_TIMESTAMP;
12778 			cp++;
12779 			*cp = TCPOLEN_TIMESTAMP;
12780 			cp++;
12781 			val = htonl(ae->ts_value);
12782 			bcopy((char *)&val,
12783 			      (char *)cp, sizeof(uint32_t));
12784 			val = htonl(ae->ts_echo);
12785 			bcopy((char *)&val,
12786 			      (char *)(cp + 4), sizeof(uint32_t));
12787 		} else
12788 			th->th_off = (sizeof(struct tcphdr) >> 2);
12789 
12790 		/*
12791 		 * For sane logging we need to play a little trick.
12792 		 * If the ack were fully processed we would have moved
12793 		 * snd_una to high_seq, but since compressed acks are
12794 		 * processed in two phases, at this point (logging) snd_una
12795 		 * won't be advanced. So we would see multiple acks showing
12796 		 * the advancement. We can prevent that by "pretending" that
12797 		 * snd_una was advanced and then un-advancing it so that the
12798 		 * logging code has the right value for tlb_snd_una.
12799 		 */
12800 		if (tp->snd_una != high_seq) {
12801 			orig_snd_una = tp->snd_una;
12802 			tp->snd_una = high_seq;
12803 			xx = 1;
12804 		} else
12805 			xx = 0;
12806 		TCP_LOG_EVENTP(tp, th,
12807 			       &tp->t_inpcb->inp_socket->so_rcv,
12808 			       &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0,
12809 			       0, &log, true, &ltv);
12810 		if (xx) {
12811 			tp->snd_una = orig_snd_una;
12812 		}
12813 	}
12814 
12815 }
12816 
12817 static int
12818 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
12819 {
12820 	/*
12821 	 * Handle a "special" compressed ack mbuf. Each incoming
12822 	 * ack has only four possible dispositions:
12823 	 *
12824 	 * A) It moves the cum-ack forward
12825 	 * B) It is behind the cum-ack.
12826 	 * C) It is a window-update ack.
12827 	 * D) It is a dup-ack.
12828 	 *
12829 	 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
12830 	 * in the incoming mbuf. We also need to still pay attention
12831 	 * to nxt_pkt since there may be another packet after this
12832 	 * one.
12833 	 */
12834 #ifdef TCP_ACCOUNTING
12835 	uint64_t ts_val;
12836 	uint64_t rdstc;
12837 #endif
12838 	int segsiz;
12839 	struct timespec ts;
12840 	struct tcp_rack *rack;
12841 	struct tcp_ackent *ae;
12842 	uint32_t tiwin, us_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
12843 	int cnt, i, did_out, ourfinisacked = 0;
12844 	int win_up_req = 0;
12845 	struct tcpopt to_holder, *to = NULL;
12846 	int nsegs = 0;
12847 	int under_pacing = 1;
12848 	int recovery = 0;
12849 	int idx;
12850 #ifdef TCP_ACCOUNTING
12851 	sched_pin();
12852 #endif
12853 	rack = (struct tcp_rack *)tp->t_fb_ptr;
12854 	if (rack->gp_ready &&
12855 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
12856 		under_pacing = 0;
12857 	else
12858 		under_pacing = 1;
12859 
12860 	if (rack->r_state != tp->t_state)
12861 		rack_set_state(tp, rack);
12862 	to = &to_holder;
12863 	to->to_flags = 0;
12864 	KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
12865 		("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
12866 	cnt = m->m_len / sizeof(struct tcp_ackent);
12867 	idx = cnt / 5;
12868 	if (idx >= MAX_NUM_OF_CNTS)
12869 		idx = MAX_NUM_OF_CNTS - 1;
12870 	counter_u64_add(rack_proc_comp_ack[idx], 1);
12871 	counter_u64_add(rack_multi_single_eq, cnt);
12872 	high_seq = tp->snd_una;
12873 	the_win = tp->snd_wnd;
12874 	win_seq = tp->snd_wl1;
12875 	win_upd_ack = tp->snd_wl2;
12876 	cts = us_cts = tcp_tv_to_usectick(tv);
12877 	segsiz = ctf_fixed_maxseg(tp);
12878 	if ((rack->rc_gp_dyn_mul) &&
12879 	    (rack->use_fixed_rate == 0) &&
12880 	    (rack->rc_always_pace)) {
12881 		/* Check in on probertt */
12882 		rack_check_probe_rtt(rack, us_cts);
12883 	}
12884 	for (i = 0; i < cnt; i++) {
12885 #ifdef TCP_ACCOUNTING
12886 		ts_val = get_cyclecount();
12887 #endif
12888 		rack_clear_rate_sample(rack);
12889 		ae = ((mtod(m, struct tcp_ackent *)) + i);
12890 		/* Setup the window */
12891 		tiwin = ae->win << tp->snd_scale;
12892 		/* figure out the type of ack */
12893 		if (SEQ_LT(ae->ack, high_seq)) {
12894 			/* Case B*/
12895 			ae->ack_val_set = ACK_BEHIND;
12896 		} else if (SEQ_GT(ae->ack, high_seq)) {
12897 			/* Case A */
12898 			ae->ack_val_set = ACK_CUMACK;
12899 		} else if (tiwin == the_win) {
12900 			/* Case D */
12901 			ae->ack_val_set = ACK_DUPACK;
12902 		} else {
12903 			/* Case C */
12904 			ae->ack_val_set = ACK_RWND;
12905 		}
12906 		rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
12907 		/* Validate timestamp */
12908 		if (ae->flags & HAS_TSTMP) {
12909 			/* Setup for a timestamp */
12910 			to->to_flags = TOF_TS;
12911 			ae->ts_echo -= tp->ts_offset;
12912 			to->to_tsecr = ae->ts_echo;
12913 			to->to_tsval = ae->ts_value;
12914 			/*
12915 			 * If echoed timestamp is later than the current time, fall back to
12916 			 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
12917 			 * were used when this connection was established.
12918 			 */
12919 			if (TSTMP_GT(ae->ts_echo, cts))
12920 				ae->ts_echo = 0;
12921 			if (tp->ts_recent &&
12922 			    TSTMP_LT(ae->ts_value, tp->ts_recent)) {
12923 				if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
12924 #ifdef TCP_ACCOUNTING
12925 					rdstc = get_cyclecount();
12926 					if (rdstc > ts_val) {
12927 						counter_u64_add(tcp_proc_time[ae->ack_val_set] ,
12928 								(rdstc - ts_val));
12929 						if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12930 							tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
12931 						}
12932 					}
12933 #endif
12934 					continue;
12935 				}
12936 			}
12937 			if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
12938 			    SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
12939 				tp->ts_recent_age = tcp_ts_getticks();
12940 				tp->ts_recent = ae->ts_value;
12941 			}
12942 		} else {
12943 			/* Setup for a no options */
12944 			to->to_flags = 0;
12945 		}
12946 		/* Update the rcv time and perform idle reduction possibly */
12947 		if  (tp->t_idle_reduce &&
12948 		     (tp->snd_max == tp->snd_una) &&
12949 		     ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
12950 			counter_u64_add(rack_input_idle_reduces, 1);
12951 			rack_cc_after_idle(rack, tp);
12952 		}
12953 		tp->t_rcvtime = ticks;
12954 		/* Now what about ECN? */
12955 		if (tp->t_flags2 & TF2_ECN_PERMIT) {
12956 			if (ae->flags & TH_CWR) {
12957 				tp->t_flags2 &= ~TF2_ECN_SND_ECE;
12958 				tp->t_flags |= TF_ACKNOW;
12959 			}
12960 			switch (ae->codepoint & IPTOS_ECN_MASK) {
12961 			case IPTOS_ECN_CE:
12962 				tp->t_flags2 |= TF2_ECN_SND_ECE;
12963 				KMOD_TCPSTAT_INC(tcps_ecn_ce);
12964 				break;
12965 			case IPTOS_ECN_ECT0:
12966 				KMOD_TCPSTAT_INC(tcps_ecn_ect0);
12967 				break;
12968 			case IPTOS_ECN_ECT1:
12969 				KMOD_TCPSTAT_INC(tcps_ecn_ect1);
12970 				break;
12971 			}
12972 
12973 			/* Process a packet differently from RFC3168. */
12974 			cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint);
12975 			/* Congestion experienced. */
12976 			if (ae->flags & TH_ECE) {
12977 				rack_cong_signal(tp,  CC_ECN, ae->ack);
12978 			}
12979 		}
12980 #ifdef TCP_ACCOUNTING
12981 		/* Count for the specific type of ack in */
12982 		counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1);
12983 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12984 			tp->tcp_cnt_counters[ae->ack_val_set]++;
12985 		}
12986 #endif
12987 		/*
12988 		 * Note how we could move up these in the determination
12989 		 * above, but we don't so that way the timestamp checks (and ECN)
12990 		 * is done first before we do any processing on the ACK.
12991 		 * The non-compressed path through the code has this
12992 		 * weakness (noted by @jtl) that it actually does some
12993 		 * processing before verifying the timestamp information.
12994 		 * We don't take that path here which is why we set
12995 		 * the ack_val_set first, do the timestamp and ecn
12996 		 * processing, and then look at what we have setup.
12997 		 */
12998 		if (ae->ack_val_set == ACK_BEHIND) {
12999 			/*
13000 			 * Case B flag reordering, if window is not closed
13001 			 * or it could be a keep-alive or persists
13002 			 */
13003 			if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
13004 				counter_u64_add(rack_reorder_seen, 1);
13005 				rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13006 			}
13007 		} else if (ae->ack_val_set == ACK_DUPACK) {
13008 			/* Case D */
13009 
13010 			rack_strike_dupack(rack);
13011 		} else if (ae->ack_val_set == ACK_RWND) {
13012 			/* Case C */
13013 
13014 			win_up_req = 1;
13015 			win_upd_ack = ae->ack;
13016 			win_seq = ae->seq;
13017 			the_win = tiwin;
13018 		} else {
13019 			/* Case A */
13020 
13021 			if (SEQ_GT(ae->ack, tp->snd_max)) {
13022 				/*
13023 				 * We just send an ack since the incoming
13024 				 * ack is beyond the largest seq we sent.
13025 				 */
13026 				if ((tp->t_flags & TF_ACKNOW) == 0) {
13027 					ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
13028 					if (tp->t_flags && TF_ACKNOW)
13029 						rack->r_wanted_output = 1;
13030 				}
13031 			} else {
13032 				nsegs++;
13033 				/* If the window changed setup to update */
13034 				if (tiwin != tp->snd_wnd) {
13035 					win_up_req = 1;
13036 					win_upd_ack = ae->ack;
13037 					win_seq = ae->seq;
13038 					the_win = tiwin;
13039 				}
13040 #ifdef TCP_ACCOUNTING
13041 				/* Account for the acks */
13042 				if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13043 					tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
13044 				}
13045 				counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN],
13046 						(((ae->ack - high_seq) + segsiz - 1) / segsiz));
13047 #endif
13048 				high_seq = ae->ack;
13049 				/* Setup our act_rcv_time */
13050 				if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13051 					ts.tv_sec = ae->timestamp / 1000000000;
13052 					ts.tv_nsec = ae->timestamp % 1000000000;
13053 					rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13054 					rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13055 				} else {
13056 					rack->r_ctl.act_rcv_time = *tv;
13057 				}
13058 				rack_process_to_cumack(tp, rack, ae->ack, cts, to);
13059 			}
13060 		}
13061 		/* And lets be sure to commit the rtt measurements for this ack */
13062 		tcp_rack_xmit_timer_commit(rack, tp);
13063 #ifdef TCP_ACCOUNTING
13064 		rdstc = get_cyclecount();
13065 		if (rdstc > ts_val) {
13066 			counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val));
13067 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13068 				tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13069 				if (ae->ack_val_set == ACK_CUMACK)
13070 					tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
13071 			}
13072 		}
13073 #endif
13074 	}
13075 #ifdef TCP_ACCOUNTING
13076 	ts_val = get_cyclecount();
13077 #endif
13078 	acked_amount = acked = (high_seq - tp->snd_una);
13079 	if (win_up_req) {
13080 		rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13081 	}
13082 	if (acked) {
13083 		if (rack->sack_attack_disable == 0)
13084 			rack_do_decay(rack);
13085 		if (acked >= segsiz) {
13086 			/*
13087 			 * You only get credit for
13088 			 * MSS and greater (and you get extra
13089 			 * credit for larger cum-ack moves).
13090 			 */
13091 			int ac;
13092 
13093 			ac = acked / segsiz;
13094 			rack->r_ctl.ack_count += ac;
13095 			counter_u64_add(rack_ack_total, ac);
13096 		}
13097 		if (rack->r_ctl.ack_count > 0xfff00000) {
13098 			/*
13099 			 * reduce the number to keep us under
13100 			 * a uint32_t.
13101 			 */
13102 			rack->r_ctl.ack_count /= 2;
13103 			rack->r_ctl.sack_count /= 2;
13104 		}
13105 		if (tp->t_flags & TF_NEEDSYN) {
13106 			/*
13107 			 * T/TCP: Connection was half-synchronized, and our SYN has
13108 			 * been ACK'd (so connection is now fully synchronized).  Go
13109 			 * to non-starred state, increment snd_una for ACK of SYN,
13110 			 * and check if we can do window scaling.
13111 			 */
13112 			tp->t_flags &= ~TF_NEEDSYN;
13113 			tp->snd_una++;
13114 			acked_amount = acked = (high_seq - tp->snd_una);
13115 		}
13116 		if (acked > sbavail(&so->so_snd))
13117 			acked_amount = sbavail(&so->so_snd);
13118 #ifdef NETFLIX_EXP_DETECTION
13119 		/*
13120 		 * We only care on a cum-ack move if we are in a sack-disabled
13121 		 * state. We have already added in to the ack_count, and we never
13122 		 * would disable on a cum-ack move, so we only care to do the
13123 		 * detection if it may "undo" it, i.e. we were in disabled already.
13124 		 */
13125 		if (rack->sack_attack_disable)
13126 			rack_do_detection(tp, rack, acked_amount, segsiz);
13127 #endif
13128 		if (IN_FASTRECOVERY(tp->t_flags) &&
13129 		    (rack->rack_no_prr == 0))
13130 			rack_update_prr(tp, rack, acked_amount, high_seq);
13131 		if (IN_RECOVERY(tp->t_flags)) {
13132 			if (SEQ_LT(high_seq, tp->snd_recover) &&
13133 			    (SEQ_LT(high_seq, tp->snd_max))) {
13134 				tcp_rack_partialack(tp);
13135 			} else {
13136 				rack_post_recovery(tp, high_seq);
13137 				recovery = 1;
13138 			}
13139 		}
13140 		/* Handle the rack-log-ack part (sendmap) */
13141 		if ((sbused(&so->so_snd) == 0) &&
13142 		    (acked > acked_amount) &&
13143 		    (tp->t_state >= TCPS_FIN_WAIT_1) &&
13144 		    (tp->t_flags & TF_SENTFIN)) {
13145 			/*
13146 			 * We must be sure our fin
13147 			 * was sent and acked (we can be
13148 			 * in FIN_WAIT_1 without having
13149 			 * sent the fin).
13150 			 */
13151 			ourfinisacked = 1;
13152 			/*
13153 			 * Lets make sure snd_una is updated
13154 			 * since most likely acked_amount = 0 (it
13155 			 * should be).
13156 			 */
13157 			tp->snd_una = high_seq;
13158 		}
13159 		/* Did we make a RTO error? */
13160 		if ((tp->t_flags & TF_PREVVALID) &&
13161 		    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
13162 			tp->t_flags &= ~TF_PREVVALID;
13163 			if (tp->t_rxtshift == 1 &&
13164 			    (int)(ticks - tp->t_badrxtwin) < 0)
13165 				rack_cong_signal(tp, CC_RTO_ERR, high_seq);
13166 		}
13167 		/* Handle the data in the socket buffer */
13168 		KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
13169 		KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
13170 		if (acked_amount > 0) {
13171 			struct mbuf *mfree;
13172 
13173 			rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
13174 			SOCKBUF_LOCK(&so->so_snd);
13175 			mfree = sbcut_locked(&so->so_snd, acked);
13176 			tp->snd_una = high_seq;
13177 			/* Note we want to hold the sb lock through the sendmap adjust */
13178 			rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
13179 			/* Wake up the socket if we have room to write more */
13180 			rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
13181 			sowwakeup_locked(so);
13182 			m_freem(mfree);
13183 		}
13184 		/* update progress */
13185 		tp->t_acktime = ticks;
13186 		rack_log_progress_event(rack, tp, tp->t_acktime,
13187 					PROGRESS_UPDATE, __LINE__);
13188 		/* Clear out shifts and such */
13189 		tp->t_rxtshift = 0;
13190 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
13191 				   rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
13192 		rack->rc_tlp_in_progress = 0;
13193 		rack->r_ctl.rc_tlp_cnt_out = 0;
13194 		/* Send recover and snd_nxt must be dragged along */
13195 		if (SEQ_GT(tp->snd_una, tp->snd_recover))
13196 			tp->snd_recover = tp->snd_una;
13197 		if (SEQ_LT(tp->snd_nxt, tp->snd_una))
13198 			tp->snd_nxt = tp->snd_una;
13199 		/*
13200 		 * If the RXT timer is running we want to
13201 		 * stop it, so we can restart a TLP (or new RXT).
13202 		 */
13203 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
13204 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13205 #ifdef NETFLIX_HTTP_LOGGING
13206 		tcp_http_check_for_comp(rack->rc_tp, high_seq);
13207 #endif
13208 		tp->snd_wl2 = high_seq;
13209 		tp->t_dupacks = 0;
13210 		if (under_pacing &&
13211 		    (rack->use_fixed_rate == 0) &&
13212 		    (rack->in_probe_rtt == 0) &&
13213 		    rack->rc_gp_dyn_mul &&
13214 		    rack->rc_always_pace) {
13215 			/* Check if we are dragging bottom */
13216 			rack_check_bottom_drag(tp, rack, so, acked);
13217 		}
13218 		if (tp->snd_una == tp->snd_max) {
13219 			tp->t_flags &= ~TF_PREVVALID;
13220 			rack->r_ctl.retran_during_recovery = 0;
13221 			rack->r_ctl.dsack_byte_cnt = 0;
13222 			rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13223 			if (rack->r_ctl.rc_went_idle_time == 0)
13224 				rack->r_ctl.rc_went_idle_time = 1;
13225 			rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13226 			if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
13227 				tp->t_acktime = 0;
13228 			/* Set so we might enter persists... */
13229 			rack->r_wanted_output = 1;
13230 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13231 			sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
13232 			if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13233 			    (sbavail(&so->so_snd) == 0) &&
13234 			    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
13235 				/*
13236 				 * The socket was gone and the
13237 				 * peer sent data (not now in the past), time to
13238 				 * reset him.
13239 				 */
13240 				rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13241 				/* tcp_close will kill the inp pre-log the Reset */
13242 				tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13243 #ifdef TCP_ACCOUNTING
13244 				rdstc = get_cyclecount();
13245 				if (rdstc > ts_val) {
13246 					counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13247 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13248 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13249 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13250 					}
13251 				}
13252 #endif
13253 				m_freem(m);
13254 				tp = tcp_close(tp);
13255 				if (tp == NULL) {
13256 #ifdef TCP_ACCOUNTING
13257 					sched_unpin();
13258 #endif
13259 					return (1);
13260 				}
13261 				/*
13262 				 * We would normally do drop-with-reset which would
13263 				 * send back a reset. We can't since we don't have
13264 				 * all the needed bits. Instead lets arrange for
13265 				 * a call to tcp_output(). That way since we
13266 				 * are in the closed state we will generate a reset.
13267 				 *
13268 				 * Note if tcp_accounting is on we don't unpin since
13269 				 * we do that after the goto label.
13270 				 */
13271 				goto send_out_a_rst;
13272 			}
13273 			if ((sbused(&so->so_snd) == 0) &&
13274 			    (tp->t_state >= TCPS_FIN_WAIT_1) &&
13275 			    (tp->t_flags & TF_SENTFIN)) {
13276 				/*
13277 				 * If we can't receive any more data, then closing user can
13278 				 * proceed. Starting the timer is contrary to the
13279 				 * specification, but if we don't get a FIN we'll hang
13280 				 * forever.
13281 				 *
13282 				 */
13283 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13284 					soisdisconnected(so);
13285 					tcp_timer_activate(tp, TT_2MSL,
13286 							   (tcp_fast_finwait2_recycle ?
13287 							    tcp_finwait2_timeout :
13288 							    TP_MAXIDLE(tp)));
13289 				}
13290 				if (ourfinisacked == 0) {
13291 					/*
13292 					 * We don't change to fin-wait-2 if we have our fin acked
13293 					 * which means we are probably in TCPS_CLOSING.
13294 					 */
13295 					tcp_state_change(tp, TCPS_FIN_WAIT_2);
13296 				}
13297 			}
13298 		}
13299 		/* Wake up the socket if we have room to write more */
13300 		if (sbavail(&so->so_snd)) {
13301 			rack->r_wanted_output = 1;
13302 			if (ctf_progress_timeout_check(tp, true)) {
13303 				rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13304 							tp, tick, PROGRESS_DROP, __LINE__);
13305 				tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
13306 				/*
13307 				 * We cheat here and don't send a RST, we should send one
13308 				 * when the pacer drops the connection.
13309 				 */
13310 #ifdef TCP_ACCOUNTING
13311 				rdstc = get_cyclecount();
13312 				if (rdstc > ts_val) {
13313 					counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13314 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13315 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13316 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13317 					}
13318 				}
13319 				sched_unpin();
13320 #endif
13321 				INP_WUNLOCK(rack->rc_inp);
13322 				m_freem(m);
13323 				return (1);
13324 			}
13325 		}
13326 		if (ourfinisacked) {
13327 			switch(tp->t_state) {
13328 			case TCPS_CLOSING:
13329 #ifdef TCP_ACCOUNTING
13330 				rdstc = get_cyclecount();
13331 				if (rdstc > ts_val) {
13332 					counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13333 							(rdstc - ts_val));
13334 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13335 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13336 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13337 					}
13338 				}
13339 				sched_unpin();
13340 #endif
13341 				tcp_twstart(tp);
13342 				m_freem(m);
13343 				return (1);
13344 				break;
13345 			case TCPS_LAST_ACK:
13346 #ifdef TCP_ACCOUNTING
13347 				rdstc = get_cyclecount();
13348 				if (rdstc > ts_val) {
13349 					counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13350 							(rdstc - ts_val));
13351 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13352 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13353 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13354 					}
13355 				}
13356 				sched_unpin();
13357 #endif
13358 				tp = tcp_close(tp);
13359 				ctf_do_drop(m, tp);
13360 				return (1);
13361 				break;
13362 			case TCPS_FIN_WAIT_1:
13363 #ifdef TCP_ACCOUNTING
13364 				rdstc = get_cyclecount();
13365 				if (rdstc > ts_val) {
13366 					counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13367 							(rdstc - ts_val));
13368 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13369 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13370 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13371 					}
13372 				}
13373 #endif
13374 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13375 					soisdisconnected(so);
13376 					tcp_timer_activate(tp, TT_2MSL,
13377 							   (tcp_fast_finwait2_recycle ?
13378 							    tcp_finwait2_timeout :
13379 							    TP_MAXIDLE(tp)));
13380 				}
13381 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
13382 				break;
13383 			default:
13384 				break;
13385 			}
13386 		}
13387 		if (rack->r_fast_output) {
13388 			/*
13389 			 * We re doing fast output.. can we expand that?
13390 			 */
13391 			rack_gain_for_fastoutput(rack, tp, so, acked_amount);
13392 		}
13393 #ifdef TCP_ACCOUNTING
13394 		rdstc = get_cyclecount();
13395 		if (rdstc > ts_val) {
13396 			counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13397 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13398 				tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13399 				tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13400 			}
13401 		}
13402 
13403 	} else if (win_up_req) {
13404 		rdstc = get_cyclecount();
13405 		if (rdstc > ts_val) {
13406 			counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val));
13407 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13408 				tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
13409 			}
13410 		}
13411 #endif
13412 	}
13413 	/* Now is there a next packet, if so we are done */
13414 	m_freem(m);
13415 	did_out = 0;
13416 	if (nxt_pkt) {
13417 #ifdef TCP_ACCOUNTING
13418 		sched_unpin();
13419 #endif
13420 		rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
13421 		return (0);
13422 	}
13423 	rack_handle_might_revert(tp, rack);
13424 	ctf_calc_rwin(so, tp);
13425 	if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13426 	send_out_a_rst:
13427 		(void)tp->t_fb->tfb_tcp_output(tp);
13428 		did_out = 1;
13429 	}
13430 	rack_free_trim(rack);
13431 #ifdef TCP_ACCOUNTING
13432 	sched_unpin();
13433 #endif
13434 	rack_timer_audit(tp, rack, &so->so_snd);
13435 	rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
13436 	return (0);
13437 }
13438 
13439 
13440 static int
13441 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
13442     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
13443     int32_t nxt_pkt, struct timeval *tv)
13444 {
13445 #ifdef TCP_ACCOUNTING
13446 	uint64_t ts_val;
13447 #endif
13448 	int32_t thflags, retval, did_out = 0;
13449 	int32_t way_out = 0;
13450 	uint32_t cts;
13451 	uint32_t tiwin;
13452 	struct timespec ts;
13453 	struct tcpopt to;
13454 	struct tcp_rack *rack;
13455 	struct rack_sendmap *rsm;
13456 	int32_t prev_state = 0;
13457 #ifdef TCP_ACCOUNTING
13458 	int ack_val_set = 0xf;
13459 #endif
13460 	uint32_t us_cts;
13461 	/*
13462 	 * tv passed from common code is from either M_TSTMP_LRO or
13463 	 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
13464 	 */
13465 	if (m->m_flags & M_ACKCMP) {
13466 		return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
13467 	}
13468 	if (m->m_flags & M_ACKCMP) {
13469 		panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
13470 	}
13471 	counter_u64_add(rack_proc_non_comp_ack, 1);
13472 	thflags = th->th_flags;
13473 #ifdef TCP_ACCOUNTING
13474 	sched_pin();
13475 	if (thflags & TH_ACK)
13476 		ts_val = get_cyclecount();
13477 #endif
13478 	cts = tcp_tv_to_usectick(tv);
13479 	rack = (struct tcp_rack *)tp->t_fb_ptr;
13480 
13481 	if ((m->m_flags & M_TSTMP) ||
13482 	    (m->m_flags & M_TSTMP_LRO)) {
13483 		mbuf_tstmp2timespec(m, &ts);
13484 		rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13485 		rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13486 	} else
13487 		rack->r_ctl.act_rcv_time = *tv;
13488 	kern_prefetch(rack, &prev_state);
13489 	prev_state = 0;
13490 	/*
13491 	 * Unscale the window into a 32-bit value. For the SYN_SENT state
13492 	 * the scale is zero.
13493 	 */
13494 	tiwin = th->th_win << tp->snd_scale;
13495 	/*
13496 	 * Parse options on any incoming segment.
13497 	 */
13498 	memset(&to, 0, sizeof(to));
13499 	tcp_dooptions(&to, (u_char *)(th + 1),
13500 	    (th->th_off << 2) - sizeof(struct tcphdr),
13501 	    (thflags & TH_SYN) ? TO_SYN : 0);
13502 #ifdef TCP_ACCOUNTING
13503 	if (thflags & TH_ACK) {
13504 		/*
13505 		 * We have a tradeoff here. We can either do what we are
13506 		 * doing i.e. pinning to this CPU and then doing the accounting
13507 		 * <or> we could do a critical enter, setup the rdtsc and cpu
13508 		 * as in below, and then validate we are on the same CPU on
13509 		 * exit. I have choosen to not do the critical enter since
13510 		 * that often will gain you a context switch, and instead lock
13511 		 * us (line above this if) to the same CPU with sched_pin(). This
13512 		 * means we may be context switched out for a higher priority
13513 		 * interupt but we won't be moved to another CPU.
13514 		 *
13515 		 * If this occurs (which it won't very often since we most likely
13516 		 * are running this code in interupt context and only a higher
13517 		 * priority will bump us ... clock?) we will falsely add in
13518 		 * to the time the interupt processing time plus the ack processing
13519 		 * time. This is ok since its a rare event.
13520 		 */
13521 		ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
13522 						    ctf_fixed_maxseg(tp));
13523 	}
13524 #endif
13525 	NET_EPOCH_ASSERT();
13526 	INP_WLOCK_ASSERT(tp->t_inpcb);
13527 	KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
13528 	    __func__));
13529 	KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
13530 	    __func__));
13531 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13532 		union tcp_log_stackspecific log;
13533 		struct timeval ltv;
13534 #ifdef NETFLIX_HTTP_LOGGING
13535 		struct http_sendfile_track *http_req;
13536 
13537 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
13538 			http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
13539 		} else {
13540 			http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
13541 		}
13542 #endif
13543 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13544 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
13545 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
13546 		if (rack->rack_no_prr == 0)
13547 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13548 		else
13549 			log.u_bbr.flex1 = 0;
13550 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
13551 		log.u_bbr.use_lt_bw <<= 1;
13552 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
13553 		log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
13554 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
13555 		log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
13556 		log.u_bbr.flex3 = m->m_flags;
13557 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
13558 		log.u_bbr.lost = thflags;
13559 		log.u_bbr.pacing_gain = 0x1;
13560 #ifdef TCP_ACCOUNTING
13561 		log.u_bbr.cwnd_gain = ack_val_set;
13562 #endif
13563 		log.u_bbr.flex7 = 2;
13564 		if (m->m_flags & M_TSTMP) {
13565 			/* Record the hardware timestamp if present */
13566 			mbuf_tstmp2timespec(m, &ts);
13567 			ltv.tv_sec = ts.tv_sec;
13568 			ltv.tv_usec = ts.tv_nsec / 1000;
13569 			log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
13570 		} else if (m->m_flags & M_TSTMP_LRO) {
13571 			/* Record the LRO the arrival timestamp */
13572 			mbuf_tstmp2timespec(m, &ts);
13573 			ltv.tv_sec = ts.tv_sec;
13574 			ltv.tv_usec = ts.tv_nsec / 1000;
13575 			log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
13576 		}
13577 		log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
13578 		/* Log the rcv time */
13579 		log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
13580 #ifdef NETFLIX_HTTP_LOGGING
13581 		log.u_bbr.applimited = tp->t_http_closed;
13582 		log.u_bbr.applimited <<= 8;
13583 		log.u_bbr.applimited |= tp->t_http_open;
13584 		log.u_bbr.applimited <<= 8;
13585 		log.u_bbr.applimited |= tp->t_http_req;
13586 		if (http_req) {
13587 			/* Copy out any client req info */
13588 			/* seconds */
13589 			log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
13590 			/* useconds */
13591 			log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
13592 			log.u_bbr.rttProp = http_req->timestamp;
13593 			log.u_bbr.cur_del_rate = http_req->start;
13594 			if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
13595 				log.u_bbr.flex8 |= 1;
13596 			} else {
13597 				log.u_bbr.flex8 |= 2;
13598 				log.u_bbr.bw_inuse = http_req->end;
13599 			}
13600 			log.u_bbr.flex6 = http_req->start_seq;
13601 			if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
13602 				log.u_bbr.flex8 |= 4;
13603 				log.u_bbr.epoch = http_req->end_seq;
13604 			}
13605 		}
13606 #endif
13607 		TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
13608 		    tlen, &log, true, &ltv);
13609 	}
13610 	if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
13611 		way_out = 4;
13612 		retval = 0;
13613 		goto done_with_input;
13614 	}
13615 	/*
13616 	 * If a segment with the ACK-bit set arrives in the SYN-SENT state
13617 	 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
13618 	 */
13619 	if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
13620 	    (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
13621 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13622 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13623 #ifdef TCP_ACCOUNTING
13624 		sched_unpin();
13625 #endif
13626 		return (1);
13627 	}
13628 
13629 	/*
13630 	 * Parse options on any incoming segment.
13631 	 */
13632 	tcp_dooptions(&to, (u_char *)(th + 1),
13633 	    (th->th_off << 2) - sizeof(struct tcphdr),
13634 	    (thflags & TH_SYN) ? TO_SYN : 0);
13635 
13636 	/*
13637 	 * If timestamps were negotiated during SYN/ACK and a
13638 	 * segment without a timestamp is received, silently drop
13639 	 * the segment, unless it is a RST segment or missing timestamps are
13640 	 * tolerated.
13641 	 * See section 3.2 of RFC 7323.
13642 	 */
13643 	if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
13644 	    ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
13645 		way_out = 5;
13646 		retval = 0;
13647 		goto done_with_input;
13648 	}
13649 
13650 	/*
13651 	 * Segment received on connection. Reset idle time and keep-alive
13652 	 * timer. XXX: This should be done after segment validation to
13653 	 * ignore broken/spoofed segs.
13654 	 */
13655 	if  (tp->t_idle_reduce &&
13656 	     (tp->snd_max == tp->snd_una) &&
13657 	     ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
13658 		counter_u64_add(rack_input_idle_reduces, 1);
13659 		rack_cc_after_idle(rack, tp);
13660 	}
13661 	tp->t_rcvtime = ticks;
13662 #ifdef STATS
13663 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
13664 #endif
13665 	if (tiwin > rack->r_ctl.rc_high_rwnd)
13666 		rack->r_ctl.rc_high_rwnd = tiwin;
13667 	/*
13668 	 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
13669 	 * this to occur after we've validated the segment.
13670 	 */
13671 	if (tp->t_flags2 & TF2_ECN_PERMIT) {
13672 		if (thflags & TH_CWR) {
13673 			tp->t_flags2 &= ~TF2_ECN_SND_ECE;
13674 			tp->t_flags |= TF_ACKNOW;
13675 		}
13676 		switch (iptos & IPTOS_ECN_MASK) {
13677 		case IPTOS_ECN_CE:
13678 			tp->t_flags2 |= TF2_ECN_SND_ECE;
13679 			KMOD_TCPSTAT_INC(tcps_ecn_ce);
13680 			break;
13681 		case IPTOS_ECN_ECT0:
13682 			KMOD_TCPSTAT_INC(tcps_ecn_ect0);
13683 			break;
13684 		case IPTOS_ECN_ECT1:
13685 			KMOD_TCPSTAT_INC(tcps_ecn_ect1);
13686 			break;
13687 		}
13688 
13689 		/* Process a packet differently from RFC3168. */
13690 		cc_ecnpkt_handler(tp, th, iptos);
13691 
13692 		/* Congestion experienced. */
13693 		if (thflags & TH_ECE) {
13694 			rack_cong_signal(tp, CC_ECN, th->th_ack);
13695 		}
13696 	}
13697 
13698 	/*
13699 	 * If echoed timestamp is later than the current time, fall back to
13700 	 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
13701 	 * were used when this connection was established.
13702 	 */
13703 	if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
13704 		to.to_tsecr -= tp->ts_offset;
13705 		if (TSTMP_GT(to.to_tsecr, cts))
13706 			to.to_tsecr = 0;
13707 	}
13708 
13709 	/*
13710 	 * If its the first time in we need to take care of options and
13711 	 * verify we can do SACK for rack!
13712 	 */
13713 	if (rack->r_state == 0) {
13714 		/* Should be init'd by rack_init() */
13715 		KASSERT(rack->rc_inp != NULL,
13716 		    ("%s: rack->rc_inp unexpectedly NULL", __func__));
13717 		if (rack->rc_inp == NULL) {
13718 			rack->rc_inp = tp->t_inpcb;
13719 		}
13720 
13721 		/*
13722 		 * Process options only when we get SYN/ACK back. The SYN
13723 		 * case for incoming connections is handled in tcp_syncache.
13724 		 * According to RFC1323 the window field in a SYN (i.e., a
13725 		 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
13726 		 * this is traditional behavior, may need to be cleaned up.
13727 		 */
13728 		if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
13729 			/* Handle parallel SYN for ECN */
13730 			if (!(thflags & TH_ACK) &&
13731 			    ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
13732 			    ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
13733 				tp->t_flags2 |= TF2_ECN_PERMIT;
13734 				tp->t_flags2 |= TF2_ECN_SND_ECE;
13735 				TCPSTAT_INC(tcps_ecn_shs);
13736 			}
13737 			if ((to.to_flags & TOF_SCALE) &&
13738 			    (tp->t_flags & TF_REQ_SCALE)) {
13739 				tp->t_flags |= TF_RCVD_SCALE;
13740 				tp->snd_scale = to.to_wscale;
13741 			} else
13742 				tp->t_flags &= ~TF_REQ_SCALE;
13743 			/*
13744 			 * Initial send window.  It will be updated with the
13745 			 * next incoming segment to the scaled value.
13746 			 */
13747 			tp->snd_wnd = th->th_win;
13748 			rack_validate_fo_sendwin_up(tp, rack);
13749 			if ((to.to_flags & TOF_TS) &&
13750 			    (tp->t_flags & TF_REQ_TSTMP)) {
13751 				tp->t_flags |= TF_RCVD_TSTMP;
13752 				tp->ts_recent = to.to_tsval;
13753 				tp->ts_recent_age = cts;
13754 			} else
13755 				tp->t_flags &= ~TF_REQ_TSTMP;
13756 			if (to.to_flags & TOF_MSS) {
13757 				tcp_mss(tp, to.to_mss);
13758 			}
13759 			if ((tp->t_flags & TF_SACK_PERMIT) &&
13760 			    (to.to_flags & TOF_SACKPERM) == 0)
13761 				tp->t_flags &= ~TF_SACK_PERMIT;
13762 			if (IS_FASTOPEN(tp->t_flags)) {
13763 				if (to.to_flags & TOF_FASTOPEN) {
13764 					uint16_t mss;
13765 
13766 					if (to.to_flags & TOF_MSS)
13767 						mss = to.to_mss;
13768 					else
13769 						if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
13770 							mss = TCP6_MSS;
13771 						else
13772 							mss = TCP_MSS;
13773 					tcp_fastopen_update_cache(tp, mss,
13774 					    to.to_tfo_len, to.to_tfo_cookie);
13775 				} else
13776 					tcp_fastopen_disable_path(tp);
13777 			}
13778 		}
13779 		/*
13780 		 * At this point we are at the initial call. Here we decide
13781 		 * if we are doing RACK or not. We do this by seeing if
13782 		 * TF_SACK_PERMIT is set and the sack-not-required is clear.
13783 		 * The code now does do dup-ack counting so if you don't
13784 		 * switch back you won't get rack & TLP, but you will still
13785 		 * get this stack.
13786 		 */
13787 
13788 		if ((rack_sack_not_required == 0) &&
13789 		    ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
13790 			tcp_switch_back_to_default(tp);
13791 			(*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
13792 			    tlen, iptos);
13793 #ifdef TCP_ACCOUNTING
13794 			sched_unpin();
13795 #endif
13796 			return (1);
13797 		}
13798 		tcp_set_hpts(tp->t_inpcb);
13799 		sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
13800 	}
13801 	if (thflags & TH_FIN)
13802 		tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
13803 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13804 	if ((rack->rc_gp_dyn_mul) &&
13805 	    (rack->use_fixed_rate == 0) &&
13806 	    (rack->rc_always_pace)) {
13807 		/* Check in on probertt */
13808 		rack_check_probe_rtt(rack, us_cts);
13809 	}
13810 	if (rack->forced_ack) {
13811 		uint32_t us_rtt;
13812 
13813 		/*
13814 		 * A persist or keep-alive was forced out, update our
13815 		 * min rtt time. Note we do not worry about lost
13816 		 * retransmissions since KEEP-ALIVES and persists
13817 		 * are usually way long on times of sending (though
13818 		 * if we were really paranoid or worried we could
13819 		 * at least use timestamps if available to validate).
13820 		 */
13821 		rack->forced_ack = 0;
13822 		us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
13823 		if (us_rtt == 0)
13824 			us_rtt = 1;
13825 		rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3);
13826 		rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13827 	}
13828 	/*
13829 	 * This is the one exception case where we set the rack state
13830 	 * always. All other times (timers etc) we must have a rack-state
13831 	 * set (so we assure we have done the checks above for SACK).
13832 	 */
13833 	rack->r_ctl.rc_rcvtime = cts;
13834 	if (rack->r_state != tp->t_state)
13835 		rack_set_state(tp, rack);
13836 	if (SEQ_GT(th->th_ack, tp->snd_una) &&
13837 	    (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
13838 		kern_prefetch(rsm, &prev_state);
13839 	prev_state = rack->r_state;
13840 	rack_clear_rate_sample(rack);
13841 	retval = (*rack->r_substate) (m, th, so,
13842 	    tp, &to, drop_hdrlen,
13843 	    tlen, tiwin, thflags, nxt_pkt, iptos);
13844 #ifdef INVARIANTS
13845 	if ((retval == 0) &&
13846 	    (tp->t_inpcb == NULL)) {
13847 		panic("retval:%d tp:%p t_inpcb:NULL state:%d",
13848 		    retval, tp, prev_state);
13849 	}
13850 #endif
13851 	if (retval == 0) {
13852 		/*
13853 		 * If retval is 1 the tcb is unlocked and most likely the tp
13854 		 * is gone.
13855 		 */
13856 		INP_WLOCK_ASSERT(tp->t_inpcb);
13857 		if ((rack->rc_gp_dyn_mul) &&
13858 		    (rack->rc_always_pace) &&
13859 		    (rack->use_fixed_rate == 0) &&
13860 		    rack->in_probe_rtt &&
13861 		    (rack->r_ctl.rc_time_probertt_starts == 0)) {
13862 			/*
13863 			 * If we are going for target, lets recheck before
13864 			 * we output.
13865 			 */
13866 			rack_check_probe_rtt(rack, us_cts);
13867 		}
13868 		if (rack->set_pacing_done_a_iw == 0) {
13869 			/* How much has been acked? */
13870 			if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
13871 				/* We have enough to set in the pacing segment size */
13872 				rack->set_pacing_done_a_iw = 1;
13873 				rack_set_pace_segments(tp, rack, __LINE__, NULL);
13874 			}
13875 		}
13876 		tcp_rack_xmit_timer_commit(rack, tp);
13877 #ifdef TCP_ACCOUNTING
13878 		/*
13879 		 * If we set the ack_val_se to what ack processing we are doing
13880 		 * we also want to track how many cycles we burned. Note
13881 		 * the bits after tcp_output we let be "free". This is because
13882 		 * we are also tracking the tcp_output times as well. Note the
13883 		 * use of 0xf here since we only have 11 counter (0 - 0xa) and
13884 		 * 0xf cannot be returned and is what we initialize it too to
13885 		 * indicate we are not doing the tabulations.
13886 		 */
13887 		if (ack_val_set != 0xf) {
13888 			uint64_t crtsc;
13889 
13890 			crtsc = get_cyclecount();
13891 			counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13892 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13893 				tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
13894 			}
13895 		}
13896 #endif
13897 		if (nxt_pkt == 0) {
13898 			if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13899 do_output_now:
13900 				did_out = 1;
13901 				(void)tp->t_fb->tfb_tcp_output(tp);
13902 			}
13903 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
13904 			rack_free_trim(rack);
13905 		}
13906 		if ((nxt_pkt == 0) &&
13907 		    ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
13908 		    (SEQ_GT(tp->snd_max, tp->snd_una) ||
13909 		     (tp->t_flags & TF_DELACK) ||
13910 		     ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
13911 		      (tp->t_state <= TCPS_CLOSING)))) {
13912 			/* We could not send (probably in the hpts but stopped the timer earlier)? */
13913 			if ((tp->snd_max == tp->snd_una) &&
13914 			    ((tp->t_flags & TF_DELACK) == 0) &&
13915 			    (rack->rc_inp->inp_in_hpts) &&
13916 			    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
13917 				/* keep alive not needed if we are hptsi output yet */
13918 				;
13919 			} else {
13920 				int late = 0;
13921 				if (rack->rc_inp->inp_in_hpts) {
13922 					if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
13923 						us_cts = tcp_get_usecs(NULL);
13924 						if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
13925 							rack->r_early = 1;
13926 							rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
13927 						} else
13928 							late = 1;
13929 						rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
13930 					}
13931 					tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
13932 				}
13933 				if (late && (did_out == 0)) {
13934 					/*
13935 					 * We are late in the sending
13936 					 * and we did not call the output
13937 					 * (this probably should not happen).
13938 					 */
13939 					goto do_output_now;
13940 				}
13941 				rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
13942 			}
13943 			way_out = 1;
13944 		} else if (nxt_pkt == 0) {
13945 			/* Do we have the correct timer running? */
13946 			rack_timer_audit(tp, rack, &so->so_snd);
13947 			way_out = 2;
13948 		}
13949 	done_with_input:
13950 		rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, m->m_pkthdr.lro_nsegs));
13951 		if (did_out)
13952 			rack->r_wanted_output = 0;
13953 #ifdef INVARIANTS
13954 		if (tp->t_inpcb == NULL) {
13955 			panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
13956 			      did_out,
13957 			      retval, tp, prev_state);
13958 		}
13959 #endif
13960 #ifdef TCP_ACCOUNTING
13961 	} else {
13962 		/*
13963 		 * Track the time (see above).
13964 		 */
13965 		if (ack_val_set != 0xf) {
13966 			uint64_t crtsc;
13967 
13968 			crtsc = get_cyclecount();
13969 			counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13970 			/*
13971 			 * Note we *DO NOT* increment the per-tcb counters since
13972 			 * in the else the TP may be gone!!
13973 			 */
13974 		}
13975 #endif
13976 	}
13977 #ifdef TCP_ACCOUNTING
13978 	sched_unpin();
13979 #endif
13980 	return (retval);
13981 }
13982 
13983 void
13984 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
13985     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
13986 {
13987 	struct timeval tv;
13988 
13989 	/* First lets see if we have old packets */
13990 	if (tp->t_in_pkt) {
13991 		if (ctf_do_queued_segments(so, tp, 1)) {
13992 			m_freem(m);
13993 			return;
13994 		}
13995 	}
13996 	if (m->m_flags & M_TSTMP_LRO) {
13997 		tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
13998 		tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
13999 	} else {
14000 		/* Should not be should we kassert instead? */
14001 		tcp_get_usecs(&tv);
14002 	}
14003 	if (rack_do_segment_nounlock(m, th, so, tp,
14004 				     drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
14005 		tcp_handle_wakeup(tp, so);
14006 		INP_WUNLOCK(tp->t_inpcb);
14007 	}
14008 }
14009 
14010 struct rack_sendmap *
14011 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
14012 {
14013 	struct rack_sendmap *rsm = NULL;
14014 	int32_t idx;
14015 	uint32_t srtt = 0, thresh = 0, ts_low = 0;
14016 
14017 	/* Return the next guy to be re-transmitted */
14018 	if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
14019 		return (NULL);
14020 	}
14021 	if (tp->t_flags & TF_SENTFIN) {
14022 		/* retran the end FIN? */
14023 		return (NULL);
14024 	}
14025 	/* ok lets look at this one */
14026 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
14027 	if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
14028 		goto check_it;
14029 	}
14030 	rsm = rack_find_lowest_rsm(rack);
14031 	if (rsm == NULL) {
14032 		return (NULL);
14033 	}
14034 check_it:
14035 	if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
14036 	    (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
14037 		/*
14038 		 * No sack so we automatically do the 3 strikes and
14039 		 * retransmit (no rack timer would be started).
14040 		 */
14041 
14042 		return (rsm);
14043 	}
14044 	if (rsm->r_flags & RACK_ACKED) {
14045 		return (NULL);
14046 	}
14047 	if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
14048 	    (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
14049 		/* Its not yet ready */
14050 		return (NULL);
14051 	}
14052 	srtt = rack_grab_rtt(tp, rack);
14053 	idx = rsm->r_rtr_cnt - 1;
14054 	ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
14055 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
14056 	if ((tsused == ts_low) ||
14057 	    (TSTMP_LT(tsused, ts_low))) {
14058 		/* No time since sending */
14059 		return (NULL);
14060 	}
14061 	if ((tsused - ts_low) < thresh) {
14062 		/* It has not been long enough yet */
14063 		return (NULL);
14064 	}
14065 	if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
14066 	    ((rsm->r_flags & RACK_SACK_PASSED) &&
14067 	     (rack->sack_attack_disable == 0))) {
14068 		/*
14069 		 * We have passed the dup-ack threshold <or>
14070 		 * a SACK has indicated this is missing.
14071 		 * Note that if you are a declared attacker
14072 		 * it is only the dup-ack threshold that
14073 		 * will cause retransmits.
14074 		 */
14075 		/* log retransmit reason */
14076 		rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
14077 		rack->r_fast_output = 0;
14078 		return (rsm);
14079 	}
14080 	return (NULL);
14081 }
14082 
14083 static void
14084 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
14085 			   uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
14086 			   int line, struct rack_sendmap *rsm)
14087 {
14088 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
14089 		union tcp_log_stackspecific log;
14090 		struct timeval tv;
14091 
14092 		memset(&log, 0, sizeof(log));
14093 		log.u_bbr.flex1 = slot;
14094 		log.u_bbr.flex2 = len;
14095 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
14096 		log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
14097 		log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
14098 		log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
14099 		log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
14100 		log.u_bbr.use_lt_bw <<= 1;
14101 		log.u_bbr.use_lt_bw |= rack->r_late;
14102 		log.u_bbr.use_lt_bw <<= 1;
14103 		log.u_bbr.use_lt_bw |= rack->r_early;
14104 		log.u_bbr.use_lt_bw <<= 1;
14105 		log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
14106 		log.u_bbr.use_lt_bw <<= 1;
14107 		log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
14108 		log.u_bbr.use_lt_bw <<= 1;
14109 		log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
14110 		log.u_bbr.use_lt_bw <<= 1;
14111 		log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
14112 		log.u_bbr.use_lt_bw <<= 1;
14113 		log.u_bbr.use_lt_bw |= rack->gp_ready;
14114 		log.u_bbr.pkt_epoch = line;
14115 		log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
14116 		log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
14117 		log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
14118 		log.u_bbr.bw_inuse = bw_est;
14119 		log.u_bbr.delRate = bw;
14120 		if (rack->r_ctl.gp_bw == 0)
14121 			log.u_bbr.cur_del_rate = 0;
14122 		else
14123 			log.u_bbr.cur_del_rate = rack_get_bw(rack);
14124 		log.u_bbr.rttProp = len_time;
14125 		log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
14126 		log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
14127 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
14128 		if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
14129 			/* We are in slow start */
14130 			log.u_bbr.flex7 = 1;
14131 		} else {
14132 			/* we are on congestion avoidance */
14133 			log.u_bbr.flex7 = 0;
14134 		}
14135 		log.u_bbr.flex8 = method;
14136 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14137 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14138 		log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
14139 		log.u_bbr.cwnd_gain <<= 1;
14140 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
14141 		log.u_bbr.cwnd_gain <<= 1;
14142 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
14143 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
14144 		    &rack->rc_inp->inp_socket->so_rcv,
14145 		    &rack->rc_inp->inp_socket->so_snd,
14146 		    BBR_LOG_HPTSI_CALC, 0,
14147 		    0, &log, false, &tv);
14148 	}
14149 }
14150 
14151 static uint32_t
14152 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
14153 {
14154 	uint32_t new_tso, user_max;
14155 
14156 	user_max = rack->rc_user_set_max_segs * mss;
14157 	if (rack->rc_force_max_seg) {
14158 		return (user_max);
14159 	}
14160 	if (rack->use_fixed_rate &&
14161 	    ((rack->r_ctl.crte == NULL) ||
14162 	     (bw != rack->r_ctl.crte->rate))) {
14163 		/* Use the user mss since we are not exactly matched */
14164 		return (user_max);
14165 	}
14166 	new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
14167 	if (new_tso > user_max)
14168 		new_tso = user_max;
14169 	return (new_tso);
14170 }
14171 
14172 static int32_t
14173 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
14174 {
14175 	uint64_t lentim, fill_bw;
14176 
14177 	/* Lets first see if we are full, if so continue with normal rate */
14178 	rack->r_via_fill_cw = 0;
14179 	if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
14180 		return (slot);
14181 	if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
14182 		return (slot);
14183 	if (rack->r_ctl.rc_last_us_rtt == 0)
14184 		return (slot);
14185 	if (rack->rc_pace_fill_if_rttin_range &&
14186 	    (rack->r_ctl.rc_last_us_rtt >=
14187 	     (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
14188 		/* The rtt is huge, N * smallest, lets not fill */
14189 		return (slot);
14190 	}
14191 	/*
14192 	 * first lets calculate the b/w based on the last us-rtt
14193 	 * and the sndwnd.
14194 	 */
14195 	fill_bw = rack->r_ctl.cwnd_to_use;
14196 	/* Take the rwnd if its smaller */
14197 	if (fill_bw > rack->rc_tp->snd_wnd)
14198 		fill_bw = rack->rc_tp->snd_wnd;
14199 	if (rack->r_fill_less_agg) {
14200 		/*
14201 		 * Now take away the inflight (this will reduce our
14202 		 * aggressiveness and yeah, if we get that much out in 1RTT
14203 		 * we will have had acks come back and still be behind).
14204 		 */
14205 		fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14206 	}
14207 	/* Now lets make it into a b/w */
14208 	fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
14209 	fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
14210 	/* We are below the min b/w */
14211 	if (non_paced)
14212 		*rate_wanted = fill_bw;
14213 	if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
14214 		return (slot);
14215 	if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap))
14216 		fill_bw = rack->r_ctl.bw_rate_cap;
14217 	rack->r_via_fill_cw = 1;
14218 	if (rack->r_rack_hw_rate_caps &&
14219 	    (rack->r_ctl.crte != NULL)) {
14220 		uint64_t high_rate;
14221 
14222 		high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
14223 		if (fill_bw > high_rate) {
14224 			/* We are capping bw at the highest rate table entry */
14225 			if (*rate_wanted > high_rate) {
14226 				/* The original rate was also capped */
14227 				rack->r_via_fill_cw = 0;
14228 			}
14229 			rack_log_hdwr_pacing(rack,
14230 					     fill_bw, high_rate, __LINE__,
14231 					     0, 3);
14232 			fill_bw = high_rate;
14233 			if (capped)
14234 				*capped = 1;
14235 		}
14236 	} else if ((rack->r_ctl.crte == NULL) &&
14237 		   (rack->rack_hdrw_pacing == 0) &&
14238 		   (rack->rack_hdw_pace_ena) &&
14239 		   rack->r_rack_hw_rate_caps &&
14240 		   (rack->rack_attempt_hdwr_pace == 0) &&
14241 		   (rack->rc_inp->inp_route.ro_nh != NULL) &&
14242 		   (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14243 		/*
14244 		 * Ok we may have a first attempt that is greater than our top rate
14245 		 * lets check.
14246 		 */
14247 		uint64_t high_rate;
14248 
14249 		high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
14250 		if (high_rate) {
14251 			if (fill_bw > high_rate) {
14252 				fill_bw = high_rate;
14253 				if (capped)
14254 					*capped = 1;
14255 			}
14256 		}
14257 	}
14258 	/*
14259 	 * Ok fill_bw holds our mythical b/w to fill the cwnd
14260 	 * in a rtt, what does that time wise equate too?
14261 	 */
14262 	lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
14263 	lentim /= fill_bw;
14264 	*rate_wanted = fill_bw;
14265 	if (non_paced || (lentim < slot)) {
14266 		rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
14267 					   0, lentim, 12, __LINE__, NULL);
14268 		return ((int32_t)lentim);
14269 	} else
14270 		return (slot);
14271 }
14272 
14273 static int32_t
14274 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
14275 {
14276 	struct rack_sendmap *lrsm;
14277 	int32_t slot = 0;
14278 	int can_start_hw_pacing = 1;
14279 	int err;
14280 
14281 	if (rack->rc_always_pace == 0) {
14282 		/*
14283 		 * We use the most optimistic possible cwnd/srtt for
14284 		 * sending calculations. This will make our
14285 		 * calculation anticipate getting more through
14286 		 * quicker then possible. But thats ok we don't want
14287 		 * the peer to have a gap in data sending.
14288 		 */
14289 		uint32_t srtt, cwnd, tr_perms = 0;
14290 		int32_t reduce = 0;
14291 
14292 	old_method:
14293 		/*
14294 		 * We keep no precise pacing with the old method
14295 		 * instead we use the pacer to mitigate bursts.
14296 		 */
14297 		if (rack->r_ctl.rc_rack_min_rtt)
14298 			srtt = rack->r_ctl.rc_rack_min_rtt;
14299 		else
14300 			srtt = max(tp->t_srtt, 1);
14301 		if (rack->r_ctl.rc_rack_largest_cwnd)
14302 			cwnd = rack->r_ctl.rc_rack_largest_cwnd;
14303 		else
14304 			cwnd = rack->r_ctl.cwnd_to_use;
14305 		/* Inflate cwnd by 1000 so srtt of usecs is in ms */
14306 		tr_perms = (cwnd * 1000) / srtt;
14307 		if (tr_perms == 0) {
14308 			tr_perms = ctf_fixed_maxseg(tp);
14309 		}
14310 		/*
14311 		 * Calculate how long this will take to drain, if
14312 		 * the calculation comes out to zero, thats ok we
14313 		 * will use send_a_lot to possibly spin around for
14314 		 * more increasing tot_len_this_send to the point
14315 		 * that its going to require a pace, or we hit the
14316 		 * cwnd. Which in that case we are just waiting for
14317 		 * a ACK.
14318 		 */
14319 		slot = len / tr_perms;
14320 		/* Now do we reduce the time so we don't run dry? */
14321 		if (slot && rack_slot_reduction) {
14322 			reduce = (slot / rack_slot_reduction);
14323 			if (reduce < slot) {
14324 				slot -= reduce;
14325 			} else
14326 				slot = 0;
14327 		}
14328 		slot *= HPTS_USEC_IN_MSEC;
14329 		if (rsm == NULL) {
14330 			/*
14331 			 * We always consider ourselves app limited with old style
14332 			 * that are not retransmits. This could be the initial
14333 			 * measurement, but thats ok its all setup and specially
14334 			 * handled. If another send leaks out, then that too will
14335 			 * be mark app-limited.
14336 			 */
14337 			lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14338 			if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) {
14339 				rack->r_ctl.rc_first_appl = lrsm;
14340 				lrsm->r_flags |= RACK_APP_LIMITED;
14341 				rack->r_ctl.rc_app_limited_cnt++;
14342 			}
14343 		}
14344 		if (rack->rc_pace_to_cwnd) {
14345 			uint64_t rate_wanted = 0;
14346 
14347 			slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
14348 			rack->rc_ack_can_sendout_data = 1;
14349 			rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL);
14350 		} else
14351 			rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL);
14352 	} else {
14353 		uint64_t bw_est, res, lentim, rate_wanted;
14354 		uint32_t orig_val, srtt, segs, oh;
14355 		int capped = 0;
14356 		int prev_fill;
14357 
14358 		if ((rack->r_rr_config == 1) && rsm) {
14359 			return (rack->r_ctl.rc_min_to);
14360 		}
14361 		if (rack->use_fixed_rate) {
14362 			rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
14363 		} else if ((rack->r_ctl.init_rate == 0) &&
14364 #ifdef NETFLIX_PEAKRATE
14365 			   (rack->rc_tp->t_maxpeakrate == 0) &&
14366 #endif
14367 			   (rack->r_ctl.gp_bw == 0)) {
14368 			/* no way to yet do an estimate */
14369 			bw_est = rate_wanted = 0;
14370 		} else {
14371 			bw_est = rack_get_bw(rack);
14372 			rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
14373 		}
14374 		if ((bw_est == 0) || (rate_wanted == 0) ||
14375 		    ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
14376 			/*
14377 			 * No way yet to make a b/w estimate or
14378 			 * our raise is set incorrectly.
14379 			 */
14380 			goto old_method;
14381 		}
14382 		/* We need to account for all the overheads */
14383 		segs = (len + segsiz - 1) / segsiz;
14384 		/*
14385 		 * We need the diff between 1514 bytes (e-mtu with e-hdr)
14386 		 * and how much data we put in each packet. Yes this
14387 		 * means we may be off if we are larger than 1500 bytes
14388 		 * or smaller. But this just makes us more conservative.
14389 		 */
14390 		if (rack_hw_rate_min &&
14391 		    (bw_est < rack_hw_rate_min))
14392 			can_start_hw_pacing = 0;
14393 		if (ETHERNET_SEGMENT_SIZE > segsiz)
14394 			oh = ETHERNET_SEGMENT_SIZE - segsiz;
14395 		else
14396 			oh = 0;
14397 		segs *= oh;
14398 		lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
14399 		res = lentim / rate_wanted;
14400 		slot = (uint32_t)res;
14401 		orig_val = rack->r_ctl.rc_pace_max_segs;
14402 		if (rack->r_ctl.crte == NULL) {
14403 			/*
14404 			 * Only do this if we are not hardware pacing
14405 			 * since if we are doing hw-pacing below we will
14406 			 * set make a call after setting up or changing
14407 			 * the rate.
14408 			 */
14409 			rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
14410 		} else if (rack->rc_inp->inp_snd_tag == NULL) {
14411 			/*
14412 			 * We lost our rate somehow, this can happen
14413 			 * if the interface changed underneath us.
14414 			 */
14415 			tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14416 			rack->r_ctl.crte = NULL;
14417 			/* Lets re-allow attempting to setup pacing */
14418 			rack->rack_hdrw_pacing = 0;
14419 			rack->rack_attempt_hdwr_pace = 0;
14420 			rack_log_hdwr_pacing(rack,
14421 					     rate_wanted, bw_est, __LINE__,
14422 					     0, 6);
14423 		}
14424 		/* Did we change the TSO size, if so log it */
14425 		if (rack->r_ctl.rc_pace_max_segs != orig_val)
14426 			rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL);
14427 		prev_fill = rack->r_via_fill_cw;
14428 		if ((rack->rc_pace_to_cwnd) &&
14429 		    (capped == 0) &&
14430 		    (rack->use_fixed_rate == 0) &&
14431 		    (rack->in_probe_rtt == 0) &&
14432 		    (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
14433 			/*
14434 			 * We want to pace at our rate *or* faster to
14435 			 * fill the cwnd to the max if its not full.
14436 			 */
14437 			slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
14438 		}
14439 		if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
14440 		    (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14441 			if ((rack->rack_hdw_pace_ena) &&
14442 			    (can_start_hw_pacing > 0) &&
14443 			    (rack->rack_hdrw_pacing == 0) &&
14444 			    (rack->rack_attempt_hdwr_pace == 0)) {
14445 				/*
14446 				 * Lets attempt to turn on hardware pacing
14447 				 * if we can.
14448 				 */
14449 				rack->rack_attempt_hdwr_pace = 1;
14450 				rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
14451 								       rack->rc_inp->inp_route.ro_nh->nh_ifp,
14452 								       rate_wanted,
14453 								       RS_PACING_GEQ,
14454 								       &err, &rack->r_ctl.crte_prev_rate);
14455 				if (rack->r_ctl.crte) {
14456 					rack->rack_hdrw_pacing = 1;
14457 					rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz,
14458 												 0, rack->r_ctl.crte,
14459 												 NULL);
14460 					rack_log_hdwr_pacing(rack,
14461 							     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14462 							     err, 0);
14463 					rack->r_ctl.last_hw_bw_req = rate_wanted;
14464 				} else {
14465 					counter_u64_add(rack_hw_pace_init_fail, 1);
14466 				}
14467 			} else if (rack->rack_hdrw_pacing &&
14468 				   (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
14469 				/* Do we need to adjust our rate? */
14470 				const struct tcp_hwrate_limit_table *nrte;
14471 
14472 				if (rack->r_up_only &&
14473 				    (rate_wanted < rack->r_ctl.crte->rate)) {
14474 					/**
14475 					 * We have four possible states here
14476 					 * having to do with the previous time
14477 					 * and this time.
14478 					 *   previous  |  this-time
14479 					 * A)     0      |     0   -- fill_cw not in the picture
14480 					 * B)     1      |     0   -- we were doing a fill-cw but now are not
14481 					 * C)     1      |     1   -- all rates from fill_cw
14482 					 * D)     0      |     1   -- we were doing non-fill and now we are filling
14483 					 *
14484 					 * For case A, C and D we don't allow a drop. But for
14485 					 * case B where we now our on our steady rate we do
14486 					 * allow a drop.
14487 					 *
14488 					 */
14489 					if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
14490 						goto done_w_hdwr;
14491 				}
14492 				if ((rate_wanted > rack->r_ctl.crte->rate) ||
14493 				    (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
14494 					if (rack_hw_rate_to_low &&
14495 					    (bw_est < rack_hw_rate_to_low)) {
14496 						/*
14497 						 * The pacing rate is too low for hardware, but
14498 						 * do allow hardware pacing to be restarted.
14499 						 */
14500 						rack_log_hdwr_pacing(rack,
14501 							     bw_est, rack->r_ctl.crte->rate, __LINE__,
14502 							     0, 5);
14503 						tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14504 						rack->r_ctl.crte = NULL;
14505 						rack->rack_attempt_hdwr_pace = 0;
14506 						rack->rack_hdrw_pacing = 0;
14507 						rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14508 						goto done_w_hdwr;
14509 					}
14510 					nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
14511 								   rack->rc_tp,
14512 								   rack->rc_inp->inp_route.ro_nh->nh_ifp,
14513 								   rate_wanted,
14514 								   RS_PACING_GEQ,
14515 								   &err, &rack->r_ctl.crte_prev_rate);
14516 					if (nrte == NULL) {
14517 						/* Lost the rate */
14518 						rack->rack_hdrw_pacing = 0;
14519 						rack->r_ctl.crte = NULL;
14520 						rack_log_hdwr_pacing(rack,
14521 								     rate_wanted, 0, __LINE__,
14522 								     err, 1);
14523 						rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14524 						counter_u64_add(rack_hw_pace_lost, 1);
14525 					} else if (nrte != rack->r_ctl.crte) {
14526 						rack->r_ctl.crte = nrte;
14527 						rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted,
14528 													 segsiz, 0,
14529 													 rack->r_ctl.crte,
14530 													 NULL);
14531 						rack_log_hdwr_pacing(rack,
14532 								     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14533 								     err, 2);
14534 						rack->r_ctl.last_hw_bw_req = rate_wanted;
14535 					}
14536 				} else {
14537 					/* We just need to adjust the segment size */
14538 					rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14539 					rack_log_hdwr_pacing(rack,
14540 							     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14541 							     0, 4);
14542 					rack->r_ctl.last_hw_bw_req = rate_wanted;
14543 				}
14544 			}
14545 		}
14546 		if ((rack->r_ctl.crte != NULL) &&
14547 		    (rack->r_ctl.crte->rate == rate_wanted)) {
14548 			/*
14549 			 * We need to add a extra if the rates
14550 			 * are exactly matched. The idea is
14551 			 * we want the software to make sure the
14552 			 * queue is empty before adding more, this
14553 			 * gives us N MSS extra pace times where
14554 			 * N is our sysctl
14555 			 */
14556 			slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots);
14557 		}
14558 done_w_hdwr:
14559 		if (rack_limit_time_with_srtt &&
14560 		    (rack->use_fixed_rate == 0) &&
14561 #ifdef NETFLIX_PEAKRATE
14562 		    (rack->rc_tp->t_maxpeakrate == 0) &&
14563 #endif
14564 		    (rack->rack_hdrw_pacing == 0)) {
14565 			/*
14566 			 * Sanity check, we do not allow the pacing delay
14567 			 * to be longer than the SRTT of the path. If it is
14568 			 * a slow path, then adding a packet should increase
14569 			 * the RTT and compensate for this i.e. the srtt will
14570 			 * be greater so the allowed pacing time will be greater.
14571 			 *
14572 			 * Note this restriction is not for where a peak rate
14573 			 * is set, we are doing fixed pacing or hardware pacing.
14574 			 */
14575 			if (rack->rc_tp->t_srtt)
14576 				srtt = rack->rc_tp->t_srtt;
14577 			else
14578 				srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC;	/* its in ms convert */
14579 			if (srtt < slot) {
14580 				rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL);
14581 				slot = srtt;
14582 			}
14583 		}
14584 		rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm);
14585 	}
14586 	if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
14587 		/*
14588 		 * If this rate is seeing enobufs when it
14589 		 * goes to send then either the nic is out
14590 		 * of gas or we are mis-estimating the time
14591 		 * somehow and not letting the queue empty
14592 		 * completely. Lets add to the pacing time.
14593 		 */
14594 		int hw_boost_delay;
14595 
14596 		hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
14597 		if (hw_boost_delay > rack_enobuf_hw_max)
14598 			hw_boost_delay = rack_enobuf_hw_max;
14599 		else if (hw_boost_delay < rack_enobuf_hw_min)
14600 			hw_boost_delay = rack_enobuf_hw_min;
14601 		slot += hw_boost_delay;
14602 	}
14603 	if (slot)
14604 		counter_u64_add(rack_calc_nonzero, 1);
14605 	else
14606 		counter_u64_add(rack_calc_zero, 1);
14607 	return (slot);
14608 }
14609 
14610 static void
14611 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
14612     tcp_seq startseq, uint32_t sb_offset)
14613 {
14614 	struct rack_sendmap *my_rsm = NULL;
14615 	struct rack_sendmap fe;
14616 
14617 	if (tp->t_state < TCPS_ESTABLISHED) {
14618 		/*
14619 		 * We don't start any measurements if we are
14620 		 * not at least established.
14621 		 */
14622 		return;
14623 	}
14624 	tp->t_flags |= TF_GPUTINPROG;
14625 	rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
14626 	rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
14627 	tp->gput_seq = startseq;
14628 	rack->app_limited_needs_set = 0;
14629 	if (rack->in_probe_rtt)
14630 		rack->measure_saw_probe_rtt = 1;
14631 	else if ((rack->measure_saw_probe_rtt) &&
14632 		 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
14633 		rack->measure_saw_probe_rtt = 0;
14634 	if (rack->rc_gp_filled)
14635 		tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14636 	else {
14637 		/* Special case initial measurement */
14638 		struct timeval tv;
14639 
14640 		tp->gput_ts = tcp_get_usecs(&tv);
14641 		rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14642 	}
14643 	/*
14644 	 * We take a guess out into the future,
14645 	 * if we have no measurement and no
14646 	 * initial rate, we measure the first
14647 	 * initial-windows worth of data to
14648 	 * speed up getting some GP measurement and
14649 	 * thus start pacing.
14650 	 */
14651 	if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
14652 		rack->app_limited_needs_set = 1;
14653 		tp->gput_ack = startseq + max(rc_init_window(rack),
14654 					      (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
14655 		rack_log_pacing_delay_calc(rack,
14656 					   tp->gput_seq,
14657 					   tp->gput_ack,
14658 					   0,
14659 					   tp->gput_ts,
14660 					   rack->r_ctl.rc_app_limited_cnt,
14661 					   9,
14662 					   __LINE__, NULL);
14663 		return;
14664 	}
14665 	if (sb_offset) {
14666 		/*
14667 		 * We are out somewhere in the sb
14668 		 * can we use the already outstanding data?
14669 		 */
14670 
14671 		if (rack->r_ctl.rc_app_limited_cnt == 0) {
14672 			/*
14673 			 * Yes first one is good and in this case
14674 			 * the tp->gput_ts is correctly set based on
14675 			 * the last ack that arrived (no need to
14676 			 * set things up when an ack comes in).
14677 			 */
14678 			my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14679 			if ((my_rsm == NULL) ||
14680 			    (my_rsm->r_rtr_cnt != 1)) {
14681 				/* retransmission? */
14682 				goto use_latest;
14683 			}
14684 		} else {
14685 			if (rack->r_ctl.rc_first_appl == NULL) {
14686 				/*
14687 				 * If rc_first_appl is NULL
14688 				 * then the cnt should be 0.
14689 				 * This is probably an error, maybe
14690 				 * a KASSERT would be approprate.
14691 				 */
14692 				goto use_latest;
14693 			}
14694 			/*
14695 			 * If we have a marker pointer to the last one that is
14696 			 * app limited we can use that, but we need to set
14697 			 * things up so that when it gets ack'ed we record
14698 			 * the ack time (if its not already acked).
14699 			 */
14700 			rack->app_limited_needs_set = 1;
14701 			/*
14702 			 * We want to get to the rsm that is either
14703 			 * next with space i.e. over 1 MSS or the one
14704 			 * after that (after the app-limited).
14705 			 */
14706 			my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14707 					 rack->r_ctl.rc_first_appl);
14708 			if (my_rsm) {
14709 				if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
14710 					/* Have to use the next one */
14711 					my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14712 							 my_rsm);
14713 				else {
14714 					/* Use after the first MSS of it is acked */
14715 					tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
14716 					goto start_set;
14717 				}
14718 			}
14719 			if ((my_rsm == NULL) ||
14720 			    (my_rsm->r_rtr_cnt != 1)) {
14721 				/*
14722 				 * Either its a retransmit or
14723 				 * the last is the app-limited one.
14724 				 */
14725 				goto use_latest;
14726 			}
14727 		}
14728 		tp->gput_seq = my_rsm->r_start;
14729 start_set:
14730 		if (my_rsm->r_flags & RACK_ACKED) {
14731 			/*
14732 			 * This one has been acked use the arrival ack time
14733 			 */
14734 			tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14735 			rack->app_limited_needs_set = 0;
14736 		}
14737 		rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14738 		tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
14739 		rack_log_pacing_delay_calc(rack,
14740 					   tp->gput_seq,
14741 					   tp->gput_ack,
14742 					   (uint64_t)my_rsm,
14743 					   tp->gput_ts,
14744 					   rack->r_ctl.rc_app_limited_cnt,
14745 					   9,
14746 					   __LINE__, NULL);
14747 		return;
14748 	}
14749 
14750 use_latest:
14751 	/*
14752 	 * We don't know how long we may have been
14753 	 * idle or if this is the first-send. Lets
14754 	 * setup the flag so we will trim off
14755 	 * the first ack'd data so we get a true
14756 	 * measurement.
14757 	 */
14758 	rack->app_limited_needs_set = 1;
14759 	tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
14760 	/* Find this guy so we can pull the send time */
14761 	fe.r_start = startseq;
14762 	my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
14763 	if (my_rsm) {
14764 		rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14765 		if (my_rsm->r_flags & RACK_ACKED) {
14766 			/*
14767 			 * Unlikely since its probably what was
14768 			 * just transmitted (but I am paranoid).
14769 			 */
14770 			tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14771 			rack->app_limited_needs_set = 0;
14772 		}
14773 		if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
14774 			/* This also is unlikely */
14775 			tp->gput_seq = my_rsm->r_start;
14776 		}
14777 	} else {
14778 		/*
14779 		 * TSNH unless we have some send-map limit,
14780 		 * and even at that it should not be hitting
14781 		 * that limit (we should have stopped sending).
14782 		 */
14783 		struct timeval tv;
14784 
14785 		microuptime(&tv);
14786 		rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14787 	}
14788 	rack_log_pacing_delay_calc(rack,
14789 				   tp->gput_seq,
14790 				   tp->gput_ack,
14791 				   (uint64_t)my_rsm,
14792 				   tp->gput_ts,
14793 				   rack->r_ctl.rc_app_limited_cnt,
14794 				   9, __LINE__, NULL);
14795 }
14796 
14797 static inline uint32_t
14798 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack,  uint32_t cwnd_to_use,
14799     uint32_t avail, int32_t sb_offset)
14800 {
14801 	uint32_t len;
14802 	uint32_t sendwin;
14803 
14804 	if (tp->snd_wnd > cwnd_to_use)
14805 		sendwin = cwnd_to_use;
14806 	else
14807 		sendwin = tp->snd_wnd;
14808 	if (ctf_outstanding(tp) >= tp->snd_wnd) {
14809 		/* We never want to go over our peers rcv-window */
14810 		len = 0;
14811 	} else {
14812 		uint32_t flight;
14813 
14814 		flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
14815 		if (flight >= sendwin) {
14816 			/*
14817 			 * We have in flight what we are allowed by cwnd (if
14818 			 * it was rwnd blocking it would have hit above out
14819 			 * >= tp->snd_wnd).
14820 			 */
14821 			return (0);
14822 		}
14823 		len = sendwin - flight;
14824 		if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
14825 			/* We would send too much (beyond the rwnd) */
14826 			len = tp->snd_wnd - ctf_outstanding(tp);
14827 		}
14828 		if ((len + sb_offset) > avail) {
14829 			/*
14830 			 * We don't have that much in the SB, how much is
14831 			 * there?
14832 			 */
14833 			len = avail - sb_offset;
14834 		}
14835 	}
14836 	return (len);
14837 }
14838 
14839 static void
14840 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
14841 	     unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
14842 	     int rsm_is_null, int optlen, int line, uint16_t mode)
14843 {
14844 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
14845 		union tcp_log_stackspecific log;
14846 		struct timeval tv;
14847 
14848 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14849 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
14850 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
14851 		log.u_bbr.flex1 = error;
14852 		log.u_bbr.flex2 = flags;
14853 		log.u_bbr.flex3 = rsm_is_null;
14854 		log.u_bbr.flex4 = ipoptlen;
14855 		log.u_bbr.flex5 = tp->rcv_numsacks;
14856 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
14857 		log.u_bbr.flex7 = optlen;
14858 		log.u_bbr.flex8 = rack->r_fsb_inited;
14859 		log.u_bbr.applimited = rack->r_fast_output;
14860 		log.u_bbr.bw_inuse = rack_get_bw(rack);
14861 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
14862 		log.u_bbr.cwnd_gain = mode;
14863 		log.u_bbr.pkts_out = orig_len;
14864 		log.u_bbr.lt_epoch = len;
14865 		log.u_bbr.delivered = line;
14866 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14867 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14868 		tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
14869 			       len, &log, false, NULL, NULL, 0, &tv);
14870 	}
14871 }
14872 
14873 
14874 static struct mbuf *
14875 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
14876 		   struct rack_fast_send_blk *fsb,
14877 		   int32_t seglimit, int32_t segsize)
14878 {
14879 #ifdef KERN_TLS
14880 	struct ktls_session *tls, *ntls;
14881 	struct mbuf *start;
14882 #endif
14883 	struct mbuf *m, *n, **np, *smb;
14884 	struct mbuf *top;
14885 	int32_t off, soff;
14886 	int32_t len = *plen;
14887 	int32_t fragsize;
14888 	int32_t len_cp = 0;
14889 	uint32_t mlen, frags;
14890 
14891 	soff = off = the_off;
14892 	smb = m = the_m;
14893 	np = &top;
14894 	top = NULL;
14895 #ifdef KERN_TLS
14896 	if (hw_tls && (m->m_flags & M_EXTPG))
14897 		tls = m->m_epg_tls;
14898 	else
14899 		tls = NULL;
14900 	start = m;
14901 #endif
14902 	while (len > 0) {
14903 		if (m == NULL) {
14904 			*plen = len_cp;
14905 			break;
14906 		}
14907 #ifdef KERN_TLS
14908 		if (hw_tls) {
14909 			if (m->m_flags & M_EXTPG)
14910 				ntls = m->m_epg_tls;
14911 			else
14912 				ntls = NULL;
14913 
14914 			/*
14915 			 * Avoid mixing TLS records with handshake
14916 			 * data or TLS records from different
14917 			 * sessions.
14918 			 */
14919 			if (tls != ntls) {
14920 				MPASS(m != start);
14921 				*plen = len_cp;
14922 				break;
14923 			}
14924 		}
14925 #endif
14926 		mlen = min(len, m->m_len - off);
14927 		if (seglimit) {
14928 			/*
14929 			 * For M_EXTPG mbufs, add 3 segments
14930 			 * + 1 in case we are crossing page boundaries
14931 			 * + 2 in case the TLS hdr/trailer are used
14932 			 * It is cheaper to just add the segments
14933 			 * than it is to take the cache miss to look
14934 			 * at the mbuf ext_pgs state in detail.
14935 			 */
14936 			if (m->m_flags & M_EXTPG) {
14937 				fragsize = min(segsize, PAGE_SIZE);
14938 				frags = 3;
14939 			} else {
14940 				fragsize = segsize;
14941 				frags = 0;
14942 			}
14943 
14944 			/* Break if we really can't fit anymore. */
14945 			if ((frags + 1) >= seglimit) {
14946 				*plen =	len_cp;
14947 				break;
14948 			}
14949 
14950 			/*
14951 			 * Reduce size if you can't copy the whole
14952 			 * mbuf. If we can't copy the whole mbuf, also
14953 			 * adjust len so the loop will end after this
14954 			 * mbuf.
14955 			 */
14956 			if ((frags + howmany(mlen, fragsize)) >= seglimit) {
14957 				mlen = (seglimit - frags - 1) * fragsize;
14958 				len = mlen;
14959 				*plen = len_cp + len;
14960 			}
14961 			frags += howmany(mlen, fragsize);
14962 			if (frags == 0)
14963 				frags++;
14964 			seglimit -= frags;
14965 			KASSERT(seglimit > 0,
14966 			    ("%s: seglimit went too low", __func__));
14967 		}
14968 		n = m_get(M_NOWAIT, m->m_type);
14969 		*np = n;
14970 		if (n == NULL)
14971 			goto nospace;
14972 		n->m_len = mlen;
14973 		soff += mlen;
14974 		len_cp += n->m_len;
14975 		if (m->m_flags & (M_EXT|M_EXTPG)) {
14976 			n->m_data = m->m_data + off;
14977 			mb_dupcl(n, m);
14978 		} else {
14979 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
14980 			    (u_int)n->m_len);
14981 		}
14982 		len -= n->m_len;
14983 		off = 0;
14984 		m = m->m_next;
14985 		np = &n->m_next;
14986 		if (len || (soff == smb->m_len)) {
14987 			/*
14988 			 * We have more so we move forward  or
14989 			 * we have consumed the entire mbuf and
14990 			 * len has fell to 0.
14991 			 */
14992 			soff = 0;
14993 			smb = m;
14994 		}
14995 
14996 	}
14997 	if (fsb != NULL) {
14998 		fsb->m = smb;
14999 		fsb->off = soff;
15000 		if (smb) {
15001 			/*
15002 			 * Save off the size of the mbuf. We do
15003 			 * this so that we can recognize when it
15004 			 * has been trimmed by sbcut() as acks
15005 			 * come in.
15006 			 */
15007 			fsb->o_m_len = smb->m_len;
15008 		} else {
15009 			/*
15010 			 * This is the case where the next mbuf went to NULL. This
15011 			 * means with this copy we have sent everything in the sb.
15012 			 * In theory we could clear the fast_output flag, but lets
15013 			 * not since its possible that we could get more added
15014 			 * and acks that call the extend function which would let
15015 			 * us send more.
15016 			 */
15017 			fsb->o_m_len = 0;
15018 		}
15019 	}
15020 	return (top);
15021 nospace:
15022 	if (top)
15023 		m_freem(top);
15024 	return (NULL);
15025 
15026 }
15027 
15028 /*
15029  * This is a copy of m_copym(), taking the TSO segment size/limit
15030  * constraints into account, and advancing the sndptr as it goes.
15031  */
15032 static struct mbuf *
15033 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
15034 		int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
15035 {
15036 	struct mbuf *m, *n;
15037 	int32_t soff;
15038 
15039 	soff = rack->r_ctl.fsb.off;
15040 	m = rack->r_ctl.fsb.m;
15041 	if (rack->r_ctl.fsb.o_m_len != m->m_len) {
15042 		/*
15043 		 * The mbuf had the front of it chopped off by an ack
15044 		 * we need to adjust the soff/off by that difference.
15045 		 */
15046 		uint32_t delta;
15047 
15048 		delta = rack->r_ctl.fsb.o_m_len - m->m_len;
15049 		soff -= delta;
15050 	}
15051 	KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
15052 	KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
15053 	KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
15054 				 __FUNCTION__,
15055 				 rack, *plen, m, m->m_len));
15056 	/* Save off the right location before we copy and advance */
15057 	*s_soff = soff;
15058 	*s_mb = rack->r_ctl.fsb.m;
15059 	n = rack_fo_base_copym(m, soff, plen,
15060 			       &rack->r_ctl.fsb,
15061 			       seglimit, segsize);
15062 	return (n);
15063 }
15064 
15065 static int
15066 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
15067 		     uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len)
15068 {
15069 	/*
15070 	 * Enter the fast retransmit path. We are given that a sched_pin is
15071 	 * in place (if accounting is compliled in) and the cycle count taken
15072 	 * at the entry is in the ts_val. The concept her is that the rsm
15073 	 * now holds the mbuf offsets and such so we can directly transmit
15074 	 * without a lot of overhead, the len field is already set for
15075 	 * us to prohibit us from sending too much (usually its 1MSS).
15076 	 */
15077 	struct ip *ip = NULL;
15078 	struct udphdr *udp = NULL;
15079 	struct tcphdr *th = NULL;
15080 	struct mbuf *m = NULL;
15081 	struct inpcb *inp;
15082 	uint8_t *cpto;
15083 	struct tcp_log_buffer *lgb;
15084 #ifdef TCP_ACCOUNTING
15085 	uint64_t crtsc;
15086 	int cnt_thru = 1;
15087 #endif
15088 	int doing_tlp = 0;
15089 	struct tcpopt to;
15090 	u_char opt[TCP_MAXOLEN];
15091 	uint32_t hdrlen, optlen;
15092 	int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0;
15093 	uint32_t us_cts;
15094 	uint32_t if_hw_tsomaxsegcount = 0, startseq;
15095 	uint32_t if_hw_tsomaxsegsize;
15096 
15097 #ifdef INET6
15098 	struct ip6_hdr *ip6 = NULL;
15099 
15100 	if (rack->r_is_v6) {
15101 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15102 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15103 	} else
15104 #endif				/* INET6 */
15105 	{
15106 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15107 		hdrlen = sizeof(struct tcpiphdr);
15108 	}
15109 	if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15110 		goto failed;
15111 	}
15112 	if (rsm->r_flags & RACK_TLP)
15113 		doing_tlp = 1;
15114 	startseq = rsm->r_start;
15115 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15116 	inp = rack->rc_inp;
15117 	to.to_flags = 0;
15118 	flags = tcp_outflags[tp->t_state];
15119 	if (flags & (TH_SYN|TH_RST)) {
15120 		goto failed;
15121 	}
15122 	if (rsm->r_flags & RACK_HAS_FIN) {
15123 		/* We can't send a FIN here */
15124 		goto failed;
15125 	}
15126 	if (flags & TH_FIN) {
15127 		/* We never send a FIN */
15128 		flags &= ~TH_FIN;
15129 	}
15130 	if (tp->t_flags & TF_RCVD_TSTMP) {
15131 		to.to_tsval = ms_cts + tp->ts_offset;
15132 		to.to_tsecr = tp->ts_recent;
15133 		to.to_flags = TOF_TS;
15134 	}
15135 	optlen = tcp_addoptions(&to, opt);
15136 	hdrlen += optlen;
15137 	udp = rack->r_ctl.fsb.udp;
15138 	if (udp)
15139 		hdrlen += sizeof(struct udphdr);
15140 	if (rack->r_ctl.rc_pace_max_segs)
15141 		max_val = rack->r_ctl.rc_pace_max_segs;
15142 	else if (rack->rc_user_set_max_segs)
15143 		max_val = rack->rc_user_set_max_segs * segsiz;
15144 	else
15145 		max_val = len;
15146 	if ((tp->t_flags & TF_TSO) &&
15147 	    V_tcp_do_tso &&
15148 	    (len > segsiz) &&
15149 	    (tp->t_port == 0))
15150 		tso = 1;
15151 #ifdef INET6
15152 	if (MHLEN < hdrlen + max_linkhdr)
15153 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15154 	else
15155 #endif
15156 		m = m_gethdr(M_NOWAIT, MT_DATA);
15157 	if (m == NULL)
15158 		goto failed;
15159 	m->m_data += max_linkhdr;
15160 	m->m_len = hdrlen;
15161 	th = rack->r_ctl.fsb.th;
15162 	/* Establish the len to send */
15163 	if (len > max_val)
15164 		len = max_val;
15165 	if ((tso) && (len + optlen > tp->t_maxseg)) {
15166 		uint32_t if_hw_tsomax;
15167 		int32_t max_len;
15168 
15169 		/* extract TSO information */
15170 		if_hw_tsomax = tp->t_tsomax;
15171 		if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15172 		if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15173 		/*
15174 		 * Check if we should limit by maximum payload
15175 		 * length:
15176 		 */
15177 		if (if_hw_tsomax != 0) {
15178 			/* compute maximum TSO length */
15179 			max_len = (if_hw_tsomax - hdrlen -
15180 				   max_linkhdr);
15181 			if (max_len <= 0) {
15182 				goto failed;
15183 			} else if (len > max_len) {
15184 				len = max_len;
15185 			}
15186 		}
15187 		if (len <= segsiz) {
15188 			/*
15189 			 * In case there are too many small fragments don't
15190 			 * use TSO:
15191 			 */
15192 			tso = 0;
15193 		}
15194 	} else {
15195 		tso = 0;
15196 	}
15197 	if ((tso == 0) && (len > segsiz))
15198 		len = segsiz;
15199 	us_cts = tcp_get_usecs(tv);
15200 	if ((len == 0) ||
15201 	    (len <= MHLEN - hdrlen - max_linkhdr)) {
15202 		goto failed;
15203 	}
15204 	th->th_seq = htonl(rsm->r_start);
15205 	th->th_ack = htonl(tp->rcv_nxt);
15206 	/*
15207 	 * The PUSH bit should only be applied
15208 	 * if the full retransmission is made. If
15209 	 * we are sending less than this is the
15210 	 * left hand edge and should not have
15211 	 * the PUSH bit.
15212 	 */
15213 	if ((rsm->r_flags & RACK_HAD_PUSH) &&
15214 	    (len == (rsm->r_end - rsm->r_start)))
15215 		flags |= TH_PUSH;
15216 	th->th_flags = flags;
15217 	th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15218 	if (th->th_win == 0) {
15219 		tp->t_sndzerowin++;
15220 		tp->t_flags |= TF_RXWIN0SENT;
15221 	} else
15222 		tp->t_flags &= ~TF_RXWIN0SENT;
15223 	if (rsm->r_flags & RACK_TLP) {
15224 		/*
15225 		 * TLP should not count in retran count, but
15226 		 * in its own bin
15227 		 */
15228 		counter_u64_add(rack_tlp_retran, 1);
15229 		counter_u64_add(rack_tlp_retran_bytes, len);
15230 	} else {
15231 		tp->t_sndrexmitpack++;
15232 		KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
15233 		KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
15234 	}
15235 #ifdef STATS
15236 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
15237 				 len);
15238 #endif
15239 	if (rsm->m == NULL)
15240 		goto failed;
15241 	if (rsm->orig_m_len != rsm->m->m_len) {
15242 		/* Fix up the orig_m_len and possibly the mbuf offset */
15243 		rack_adjust_orig_mlen(rsm);
15244 	}
15245 	m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize);
15246 	if (len <= segsiz) {
15247 		/*
15248 		 * Must have ran out of mbufs for the copy
15249 		 * shorten it to no longer need tso. Lets
15250 		 * not put on sendalot since we are low on
15251 		 * mbufs.
15252 		 */
15253 		tso = 0;
15254 	}
15255 	if ((m->m_next == NULL) || (len <= 0)){
15256 		goto failed;
15257 	}
15258 	if (udp) {
15259 		if (rack->r_is_v6)
15260 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
15261 		else
15262 			ulen = hdrlen + len - sizeof(struct ip);
15263 		udp->uh_ulen = htons(ulen);
15264 	}
15265 	m->m_pkthdr.rcvif = (struct ifnet *)0;
15266 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
15267 #ifdef INET6
15268 	if (rack->r_is_v6) {
15269 		if (tp->t_port) {
15270 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15271 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15272 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15273 			th->th_sum = htons(0);
15274 			UDPSTAT_INC(udps_opackets);
15275 		} else {
15276 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15277 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15278 			th->th_sum = in6_cksum_pseudo(ip6,
15279 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15280 						      0);
15281 		}
15282 	}
15283 #endif
15284 #if defined(INET6) && defined(INET)
15285 	else
15286 #endif
15287 #ifdef INET
15288 	{
15289 		if (tp->t_port) {
15290 			m->m_pkthdr.csum_flags = CSUM_UDP;
15291 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15292 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15293 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15294 			th->th_sum = htons(0);
15295 			UDPSTAT_INC(udps_opackets);
15296 		} else {
15297 			m->m_pkthdr.csum_flags = CSUM_TCP;
15298 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15299 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
15300 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15301 									IPPROTO_TCP + len + optlen));
15302 		}
15303 		/* IP version must be set here for ipv4/ipv6 checking later */
15304 		KASSERT(ip->ip_v == IPVERSION,
15305 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
15306 	}
15307 #endif
15308 	if (tso) {
15309 		KASSERT(len > tp->t_maxseg - optlen,
15310 			("%s: len <= tso_segsz tp:%p", __func__, tp));
15311 		m->m_pkthdr.csum_flags |= CSUM_TSO;
15312 		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15313 	}
15314 #ifdef INET6
15315 	if (rack->r_is_v6) {
15316 		ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15317 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15318 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15319 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15320 		else
15321 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15322 	}
15323 #endif
15324 #if defined(INET) && defined(INET6)
15325 	else
15326 #endif
15327 #ifdef INET
15328 	{
15329 		ip->ip_len = htons(m->m_pkthdr.len);
15330 		ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15331 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15332 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15333 			if (tp->t_port == 0 || len < V_tcp_minmss) {
15334 				ip->ip_off |= htons(IP_DF);
15335 			}
15336 		} else {
15337 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15338 		}
15339 	}
15340 #endif
15341 	/* Time to copy in our header */
15342 	cpto = mtod(m, uint8_t *);
15343 	memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15344 	th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15345 	if (optlen) {
15346 		bcopy(opt, th + 1, optlen);
15347 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15348 	} else {
15349 		th->th_off = sizeof(struct tcphdr) >> 2;
15350 	}
15351 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15352 		union tcp_log_stackspecific log;
15353 
15354 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15355 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15356 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15357 		if (rack->rack_no_prr)
15358 			log.u_bbr.flex1 = 0;
15359 		else
15360 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15361 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15362 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15363 		log.u_bbr.flex4 = max_val;
15364 		log.u_bbr.flex5 = 0;
15365 		/* Save off the early/late values */
15366 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15367 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15368 		log.u_bbr.bw_inuse = rack_get_bw(rack);
15369 		log.u_bbr.flex8 = 1;
15370 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15371 		log.u_bbr.flex7 = 55;
15372 		log.u_bbr.pkts_out = tp->t_maxseg;
15373 		log.u_bbr.timeStamp = cts;
15374 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15375 		log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15376 		log.u_bbr.delivered = 0;
15377 		lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15378 				     len, &log, false, NULL, NULL, 0, tv);
15379 	} else
15380 		lgb = NULL;
15381 #ifdef INET6
15382 	if (rack->r_is_v6) {
15383 		error = ip6_output(m, NULL,
15384 				   &inp->inp_route6,
15385 				   0, NULL, NULL, inp);
15386 	}
15387 #endif
15388 #if defined(INET) && defined(INET6)
15389 	else
15390 #endif
15391 #ifdef INET
15392 	{
15393 		error = ip_output(m, NULL,
15394 				  &inp->inp_route,
15395 				  0, 0, inp);
15396 	}
15397 #endif
15398 	m = NULL;
15399 	if (lgb) {
15400 		lgb->tlb_errno = error;
15401 		lgb = NULL;
15402 	}
15403 	if (error) {
15404 		goto failed;
15405 	}
15406 	rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
15407 			rsm, RACK_SENT_FP, rsm->m, rsm->soff);
15408 	if (doing_tlp && (rack->fast_rsm_hack == 0)) {
15409 		rack->rc_tlp_in_progress = 1;
15410 		rack->r_ctl.rc_tlp_cnt_out++;
15411 	}
15412 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15413 	rack->forced_ack = 0;	/* If we send something zap the FA flag */
15414 	if (IN_FASTRECOVERY(tp->t_flags) && rsm)
15415 		rack->r_ctl.retran_during_recovery += len;
15416 	{
15417 		int idx;
15418 
15419 		idx = (len / segsiz) + 3;
15420 		if (idx >= TCP_MSS_ACCT_ATIMER)
15421 			counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15422 		else
15423 			counter_u64_add(rack_out_size[idx], 1);
15424 	}
15425 	if (tp->t_rtttime == 0) {
15426 		tp->t_rtttime = ticks;
15427 		tp->t_rtseq = startseq;
15428 		KMOD_TCPSTAT_INC(tcps_segstimed);
15429 	}
15430 	counter_u64_add(rack_fto_rsm_send, 1);
15431 	if (error && (error == ENOBUFS)) {
15432 		slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
15433 		if (rack->rc_enobuf < 0x7f)
15434 			rack->rc_enobuf++;
15435 		if (slot < (10 * HPTS_USEC_IN_MSEC))
15436 			slot = 10 * HPTS_USEC_IN_MSEC;
15437 	} else
15438 		slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
15439 	if ((slot == 0) ||
15440 	    (rack->rc_always_pace == 0) ||
15441 	    (rack->r_rr_config == 1)) {
15442 		/*
15443 		 * We have no pacing set or we
15444 		 * are using old-style rack or
15445 		 * we are overriden to use the old 1ms pacing.
15446 		 */
15447 		slot = rack->r_ctl.rc_min_to;
15448 	}
15449 	rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
15450 	if (rack->r_must_retran) {
15451 		rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
15452 		if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
15453 			/*
15454 			 * We have retransmitted all we need.
15455 			 */
15456 			rack->r_must_retran = 0;
15457 			rack->r_ctl.rc_out_at_rto = 0;
15458 		}
15459 	}
15460 #ifdef TCP_ACCOUNTING
15461 	crtsc = get_cyclecount();
15462 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15463 		tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15464 	}
15465 	counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15466 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15467 		tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15468 	}
15469 	counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15470 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15471 		tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
15472 	}
15473 	counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz));
15474 	sched_unpin();
15475 #endif
15476 	return (0);
15477 failed:
15478 	if (m)
15479 		m_free(m);
15480 	return (-1);
15481 }
15482 
15483 static void
15484 rack_sndbuf_autoscale(struct tcp_rack *rack)
15485 {
15486 	/*
15487 	 * Automatic sizing of send socket buffer.  Often the send buffer
15488 	 * size is not optimally adjusted to the actual network conditions
15489 	 * at hand (delay bandwidth product).  Setting the buffer size too
15490 	 * small limits throughput on links with high bandwidth and high
15491 	 * delay (eg. trans-continental/oceanic links).  Setting the
15492 	 * buffer size too big consumes too much real kernel memory,
15493 	 * especially with many connections on busy servers.
15494 	 *
15495 	 * The criteria to step up the send buffer one notch are:
15496 	 *  1. receive window of remote host is larger than send buffer
15497 	 *     (with a fudge factor of 5/4th);
15498 	 *  2. send buffer is filled to 7/8th with data (so we actually
15499 	 *     have data to make use of it);
15500 	 *  3. send buffer fill has not hit maximal automatic size;
15501 	 *  4. our send window (slow start and cogestion controlled) is
15502 	 *     larger than sent but unacknowledged data in send buffer.
15503 	 *
15504 	 * Note that the rack version moves things much faster since
15505 	 * we want to avoid hitting cache lines in the rack_fast_output()
15506 	 * path so this is called much less often and thus moves
15507 	 * the SB forward by a percentage.
15508 	 */
15509 	struct socket *so;
15510 	struct tcpcb *tp;
15511 	uint32_t sendwin, scaleup;
15512 
15513 	tp = rack->rc_tp;
15514 	so = rack->rc_inp->inp_socket;
15515 	sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
15516 	if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
15517 		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
15518 		    sbused(&so->so_snd) >=
15519 		    (so->so_snd.sb_hiwat / 8 * 7) &&
15520 		    sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
15521 		    sendwin >= (sbused(&so->so_snd) -
15522 		    (tp->snd_nxt - tp->snd_una))) {
15523 			if (rack_autosndbuf_inc)
15524 				scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
15525 			else
15526 				scaleup = V_tcp_autosndbuf_inc;
15527 			if (scaleup < V_tcp_autosndbuf_inc)
15528 				scaleup = V_tcp_autosndbuf_inc;
15529 			scaleup += so->so_snd.sb_hiwat;
15530 			if (scaleup > V_tcp_autosndbuf_max)
15531 				scaleup = V_tcp_autosndbuf_max;
15532 			if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread))
15533 				so->so_snd.sb_flags &= ~SB_AUTOSIZE;
15534 		}
15535 	}
15536 }
15537 
15538 static int
15539 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
15540 		 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
15541 {
15542 	/*
15543 	 * Enter to do fast output. We are given that the sched_pin is
15544 	 * in place (if accounting is compiled in) and the cycle count taken
15545 	 * at entry is in place in ts_val. The idea here is that
15546 	 * we know how many more bytes needs to be sent (presumably either
15547 	 * during pacing or to fill the cwnd and that was greater than
15548 	 * the max-burst). We have how much to send and all the info we
15549 	 * need to just send.
15550 	 */
15551 	struct ip *ip = NULL;
15552 	struct udphdr *udp = NULL;
15553 	struct tcphdr *th = NULL;
15554 	struct mbuf *m, *s_mb;
15555 	struct inpcb *inp;
15556 	uint8_t *cpto;
15557 	struct tcp_log_buffer *lgb;
15558 #ifdef TCP_ACCOUNTING
15559 	uint64_t crtsc;
15560 #endif
15561 	struct tcpopt to;
15562 	u_char opt[TCP_MAXOLEN];
15563 	uint32_t hdrlen, optlen;
15564 	int cnt_thru = 1;
15565 	int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0;
15566 	uint32_t us_cts, s_soff;
15567 	uint32_t if_hw_tsomaxsegcount = 0, startseq;
15568 	uint32_t if_hw_tsomaxsegsize;
15569 	uint16_t add_flag = RACK_SENT_FP;
15570 #ifdef INET6
15571 	struct ip6_hdr *ip6 = NULL;
15572 
15573 	if (rack->r_is_v6) {
15574 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15575 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15576 	} else
15577 #endif				/* INET6 */
15578 	{
15579 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15580 		hdrlen = sizeof(struct tcpiphdr);
15581 	}
15582 	if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15583 		m = NULL;
15584 		goto failed;
15585 	}
15586 	startseq = tp->snd_max;
15587 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15588 	inp = rack->rc_inp;
15589 	len = rack->r_ctl.fsb.left_to_send;
15590 	to.to_flags = 0;
15591 	flags = rack->r_ctl.fsb.tcp_flags;
15592 	if (tp->t_flags & TF_RCVD_TSTMP) {
15593 		to.to_tsval = ms_cts + tp->ts_offset;
15594 		to.to_tsecr = tp->ts_recent;
15595 		to.to_flags = TOF_TS;
15596 	}
15597 	optlen = tcp_addoptions(&to, opt);
15598 	hdrlen += optlen;
15599 	udp = rack->r_ctl.fsb.udp;
15600 	if (udp)
15601 		hdrlen += sizeof(struct udphdr);
15602 	if (rack->r_ctl.rc_pace_max_segs)
15603 		max_val = rack->r_ctl.rc_pace_max_segs;
15604 	else if (rack->rc_user_set_max_segs)
15605 		max_val = rack->rc_user_set_max_segs * segsiz;
15606 	else
15607 		max_val = len;
15608 	if ((tp->t_flags & TF_TSO) &&
15609 	    V_tcp_do_tso &&
15610 	    (len > segsiz) &&
15611 	    (tp->t_port == 0))
15612 		tso = 1;
15613 again:
15614 #ifdef INET6
15615 	if (MHLEN < hdrlen + max_linkhdr)
15616 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15617 	else
15618 #endif
15619 		m = m_gethdr(M_NOWAIT, MT_DATA);
15620 	if (m == NULL)
15621 		goto failed;
15622 	m->m_data += max_linkhdr;
15623 	m->m_len = hdrlen;
15624 	th = rack->r_ctl.fsb.th;
15625 	/* Establish the len to send */
15626 	if (len > max_val)
15627 		len = max_val;
15628 	if ((tso) && (len + optlen > tp->t_maxseg)) {
15629 		uint32_t if_hw_tsomax;
15630 		int32_t max_len;
15631 
15632 		/* extract TSO information */
15633 		if_hw_tsomax = tp->t_tsomax;
15634 		if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15635 		if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15636 		/*
15637 		 * Check if we should limit by maximum payload
15638 		 * length:
15639 		 */
15640 		if (if_hw_tsomax != 0) {
15641 			/* compute maximum TSO length */
15642 			max_len = (if_hw_tsomax - hdrlen -
15643 				   max_linkhdr);
15644 			if (max_len <= 0) {
15645 				goto failed;
15646 			} else if (len > max_len) {
15647 				len = max_len;
15648 			}
15649 		}
15650 		if (len <= segsiz) {
15651 			/*
15652 			 * In case there are too many small fragments don't
15653 			 * use TSO:
15654 			 */
15655 			tso = 0;
15656 		}
15657 	} else {
15658 		tso = 0;
15659 	}
15660 	if ((tso == 0) && (len > segsiz))
15661 		len = segsiz;
15662 	us_cts = tcp_get_usecs(tv);
15663 	if ((len == 0) ||
15664 	    (len <= MHLEN - hdrlen - max_linkhdr)) {
15665 		goto failed;
15666 	}
15667 	sb_offset = tp->snd_max - tp->snd_una;
15668 	th->th_seq = htonl(tp->snd_max);
15669 	th->th_ack = htonl(tp->rcv_nxt);
15670 	th->th_flags = flags;
15671 	th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15672 	if (th->th_win == 0) {
15673 		tp->t_sndzerowin++;
15674 		tp->t_flags |= TF_RXWIN0SENT;
15675 	} else
15676 		tp->t_flags &= ~TF_RXWIN0SENT;
15677 	tp->snd_up = tp->snd_una;	/* drag it along, its deprecated */
15678 	KMOD_TCPSTAT_INC(tcps_sndpack);
15679 	KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
15680 #ifdef STATS
15681 	stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
15682 				 len);
15683 #endif
15684 	if (rack->r_ctl.fsb.m == NULL)
15685 		goto failed;
15686 
15687 	/* s_mb and s_soff are saved for rack_log_output */
15688 	m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, &s_mb, &s_soff);
15689 	if (len <= segsiz) {
15690 		/*
15691 		 * Must have ran out of mbufs for the copy
15692 		 * shorten it to no longer need tso. Lets
15693 		 * not put on sendalot since we are low on
15694 		 * mbufs.
15695 		 */
15696 		tso = 0;
15697 	}
15698 	if (rack->r_ctl.fsb.rfo_apply_push &&
15699 	    (len == rack->r_ctl.fsb.left_to_send)) {
15700 		th->th_flags |= TH_PUSH;
15701 		add_flag |= RACK_HAD_PUSH;
15702 	}
15703 	if ((m->m_next == NULL) || (len <= 0)){
15704 		goto failed;
15705 	}
15706 	if (udp) {
15707 		if (rack->r_is_v6)
15708 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
15709 		else
15710 			ulen = hdrlen + len - sizeof(struct ip);
15711 		udp->uh_ulen = htons(ulen);
15712 	}
15713 	m->m_pkthdr.rcvif = (struct ifnet *)0;
15714 	if (tp->t_state == TCPS_ESTABLISHED &&
15715 	    (tp->t_flags2 & TF2_ECN_PERMIT)) {
15716 		/*
15717 		 * If the peer has ECN, mark data packets with ECN capable
15718 		 * transmission (ECT). Ignore pure ack packets,
15719 		 * retransmissions.
15720 		 */
15721 		if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) {
15722 #ifdef INET6
15723 			if (rack->r_is_v6)
15724 				ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
15725 			else
15726 #endif
15727 				ip->ip_tos |= IPTOS_ECN_ECT0;
15728 			KMOD_TCPSTAT_INC(tcps_ecn_ect0);
15729 			/*
15730 			 * Reply with proper ECN notifications.
15731 			 * Only set CWR on new data segments.
15732 			 */
15733 			if (tp->t_flags2 & TF2_ECN_SND_CWR) {
15734 				flags |= TH_CWR;
15735 				tp->t_flags2 &= ~TF2_ECN_SND_CWR;
15736 			}
15737 		}
15738 		if (tp->t_flags2 & TF2_ECN_SND_ECE)
15739 			flags |= TH_ECE;
15740 	}
15741 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
15742 #ifdef INET6
15743 	if (rack->r_is_v6) {
15744 		if (tp->t_port) {
15745 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15746 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15747 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15748 			th->th_sum = htons(0);
15749 			UDPSTAT_INC(udps_opackets);
15750 		} else {
15751 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15752 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15753 			th->th_sum = in6_cksum_pseudo(ip6,
15754 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15755 						      0);
15756 		}
15757 	}
15758 #endif
15759 #if defined(INET6) && defined(INET)
15760 	else
15761 #endif
15762 #ifdef INET
15763 	{
15764 		if (tp->t_port) {
15765 			m->m_pkthdr.csum_flags = CSUM_UDP;
15766 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15767 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15768 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15769 			th->th_sum = htons(0);
15770 			UDPSTAT_INC(udps_opackets);
15771 		} else {
15772 			m->m_pkthdr.csum_flags = CSUM_TCP;
15773 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15774 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
15775 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15776 									IPPROTO_TCP + len + optlen));
15777 		}
15778 		/* IP version must be set here for ipv4/ipv6 checking later */
15779 		KASSERT(ip->ip_v == IPVERSION,
15780 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
15781 	}
15782 #endif
15783 	if (tso) {
15784 		KASSERT(len > tp->t_maxseg - optlen,
15785 			("%s: len <= tso_segsz tp:%p", __func__, tp));
15786 		m->m_pkthdr.csum_flags |= CSUM_TSO;
15787 		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15788 	}
15789 #ifdef INET6
15790 	if (rack->r_is_v6) {
15791 		ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15792 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15793 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15794 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15795 		else
15796 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15797 	}
15798 #endif
15799 #if defined(INET) && defined(INET6)
15800 	else
15801 #endif
15802 #ifdef INET
15803 	{
15804 		ip->ip_len = htons(m->m_pkthdr.len);
15805 		ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15806 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15807 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15808 			if (tp->t_port == 0 || len < V_tcp_minmss) {
15809 				ip->ip_off |= htons(IP_DF);
15810 			}
15811 		} else {
15812 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15813 		}
15814 	}
15815 #endif
15816 	/* Time to copy in our header */
15817 	cpto = mtod(m, uint8_t *);
15818 	memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15819 	th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15820 	if (optlen) {
15821 		bcopy(opt, th + 1, optlen);
15822 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15823 	} else {
15824 		th->th_off = sizeof(struct tcphdr) >> 2;
15825 	}
15826 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15827 		union tcp_log_stackspecific log;
15828 
15829 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15830 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15831 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15832 		if (rack->rack_no_prr)
15833 			log.u_bbr.flex1 = 0;
15834 		else
15835 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15836 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15837 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15838 		log.u_bbr.flex4 = max_val;
15839 		log.u_bbr.flex5 = 0;
15840 		/* Save off the early/late values */
15841 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15842 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15843 		log.u_bbr.bw_inuse = rack_get_bw(rack);
15844 		log.u_bbr.flex8 = 0;
15845 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15846 		log.u_bbr.flex7 = 44;
15847 		log.u_bbr.pkts_out = tp->t_maxseg;
15848 		log.u_bbr.timeStamp = cts;
15849 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15850 		log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15851 		log.u_bbr.delivered = 0;
15852 		lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15853 				     len, &log, false, NULL, NULL, 0, tv);
15854 	} else
15855 		lgb = NULL;
15856 #ifdef INET6
15857 	if (rack->r_is_v6) {
15858 		error = ip6_output(m, NULL,
15859 				   &inp->inp_route6,
15860 				   0, NULL, NULL, inp);
15861 	}
15862 #endif
15863 #if defined(INET) && defined(INET6)
15864 	else
15865 #endif
15866 #ifdef INET
15867 	{
15868 		error = ip_output(m, NULL,
15869 				  &inp->inp_route,
15870 				  0, 0, inp);
15871 	}
15872 #endif
15873 	if (lgb) {
15874 		lgb->tlb_errno = error;
15875 		lgb = NULL;
15876 	}
15877 	if (error) {
15878 		*send_err = error;
15879 		m = NULL;
15880 		goto failed;
15881 	}
15882 	rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
15883 			NULL, add_flag, s_mb, s_soff);
15884 	m = NULL;
15885 	if (tp->snd_una == tp->snd_max) {
15886 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
15887 		rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
15888 		tp->t_acktime = ticks;
15889 	}
15890 	rack->forced_ack = 0;	/* If we send something zap the FA flag */
15891 	tot_len += len;
15892 	if ((tp->t_flags & TF_GPUTINPROG) == 0)
15893 		rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
15894 	tp->snd_max += len;
15895 	tp->snd_nxt = tp->snd_max;
15896 	{
15897 		int idx;
15898 
15899 		idx = (len / segsiz) + 3;
15900 		if (idx >= TCP_MSS_ACCT_ATIMER)
15901 			counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15902 		else
15903 			counter_u64_add(rack_out_size[idx], 1);
15904 	}
15905 	if (len <= rack->r_ctl.fsb.left_to_send)
15906 		rack->r_ctl.fsb.left_to_send -= len;
15907 	else
15908 		rack->r_ctl.fsb.left_to_send = 0;
15909 	if (rack->r_ctl.fsb.left_to_send < segsiz) {
15910 		rack->r_fast_output = 0;
15911 		rack->r_ctl.fsb.left_to_send = 0;
15912 		/* At the end of fast_output scale up the sb */
15913 		SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
15914 		rack_sndbuf_autoscale(rack);
15915 		SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
15916 	}
15917 	if (tp->t_rtttime == 0) {
15918 		tp->t_rtttime = ticks;
15919 		tp->t_rtseq = startseq;
15920 		KMOD_TCPSTAT_INC(tcps_segstimed);
15921 	}
15922 	if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
15923 	    (max_val > len) &&
15924 	    (tso == 0)) {
15925 		max_val -= len;
15926 		len = segsiz;
15927 		th = rack->r_ctl.fsb.th;
15928 		cnt_thru++;
15929 		goto again;
15930 	}
15931 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15932 	counter_u64_add(rack_fto_send, 1);
15933 	slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
15934 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
15935 #ifdef TCP_ACCOUNTING
15936 	crtsc = get_cyclecount();
15937 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15938 		tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15939 	}
15940 	counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15941 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15942 		tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15943 	}
15944 	counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15945 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15946 		tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
15947 	}
15948 	counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz));
15949 	sched_unpin();
15950 #endif
15951 	return (0);
15952 failed:
15953 	if (m)
15954 		m_free(m);
15955 	rack->r_fast_output = 0;
15956 	return (-1);
15957 }
15958 
15959 static int
15960 rack_output(struct tcpcb *tp)
15961 {
15962 	struct socket *so;
15963 	uint32_t recwin;
15964 	uint32_t sb_offset, s_moff = 0;
15965 	int32_t len, flags, error = 0;
15966 	struct mbuf *m, *s_mb = NULL;
15967 	struct mbuf *mb;
15968 	uint32_t if_hw_tsomaxsegcount = 0;
15969 	uint32_t if_hw_tsomaxsegsize;
15970 	int32_t segsiz, minseg;
15971 	long tot_len_this_send = 0;
15972 #ifdef INET
15973 	struct ip *ip = NULL;
15974 #endif
15975 #ifdef TCPDEBUG
15976 	struct ipovly *ipov = NULL;
15977 #endif
15978 	struct udphdr *udp = NULL;
15979 	struct tcp_rack *rack;
15980 	struct tcphdr *th;
15981 	uint8_t pass = 0;
15982 	uint8_t mark = 0;
15983 	uint8_t wanted_cookie = 0;
15984 	u_char opt[TCP_MAXOLEN];
15985 	unsigned ipoptlen, optlen, hdrlen, ulen=0;
15986 	uint32_t rack_seq;
15987 
15988 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
15989 	unsigned ipsec_optlen = 0;
15990 
15991 #endif
15992 	int32_t idle, sendalot;
15993 	int32_t sub_from_prr = 0;
15994 	volatile int32_t sack_rxmit;
15995 	struct rack_sendmap *rsm = NULL;
15996 	int32_t tso, mtu;
15997 	struct tcpopt to;
15998 	int32_t slot = 0;
15999 	int32_t sup_rack = 0;
16000 	uint32_t cts, ms_cts, delayed, early;
16001 	uint16_t add_flag = RACK_SENT_SP;
16002 	uint8_t hpts_calling,  doing_tlp = 0;
16003 	uint32_t cwnd_to_use, pace_max_seg;
16004 	int32_t do_a_prefetch = 0;
16005 	int32_t prefetch_rsm = 0;
16006 	int32_t orig_len = 0;
16007 	struct timeval tv;
16008 	int32_t prefetch_so_done = 0;
16009 	struct tcp_log_buffer *lgb;
16010 	struct inpcb *inp;
16011 	struct sockbuf *sb;
16012 	uint64_t ts_val = 0;
16013 #ifdef TCP_ACCOUNTING
16014 	uint64_t crtsc;
16015 #endif
16016 #ifdef INET6
16017 	struct ip6_hdr *ip6 = NULL;
16018 	int32_t isipv6;
16019 #endif
16020 	uint8_t filled_all = 0;
16021 	bool hw_tls = false;
16022 
16023 	/* setup and take the cache hits here */
16024 	rack = (struct tcp_rack *)tp->t_fb_ptr;
16025 #ifdef TCP_ACCOUNTING
16026 	sched_pin();
16027 	ts_val = get_cyclecount();
16028 #endif
16029 	hpts_calling = rack->rc_inp->inp_hpts_calls;
16030 	NET_EPOCH_ASSERT();
16031 	INP_WLOCK_ASSERT(rack->rc_inp);
16032 #ifdef TCP_OFFLOAD
16033 	if (tp->t_flags & TF_TOE) {
16034 #ifdef TCP_ACCOUNTING
16035 		sched_unpin();
16036 #endif
16037 		return (tcp_offload_output(tp));
16038 	}
16039 #endif
16040 	/*
16041 	 * For TFO connections in SYN_RECEIVED, only allow the initial
16042 	 * SYN|ACK and those sent by the retransmit timer.
16043 	 */
16044 	if (IS_FASTOPEN(tp->t_flags) &&
16045 	    (tp->t_state == TCPS_SYN_RECEIVED) &&
16046 	    SEQ_GT(tp->snd_max, tp->snd_una) &&    /* initial SYN|ACK sent */
16047 	    (rack->r_ctl.rc_resend == NULL)) {         /* not a retransmit */
16048 #ifdef TCP_ACCOUNTING
16049 		sched_unpin();
16050 #endif
16051 		return (0);
16052 	}
16053 #ifdef INET6
16054 	if (rack->r_state) {
16055 		/* Use the cache line loaded if possible */
16056 		isipv6 = rack->r_is_v6;
16057 	} else {
16058 		isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
16059 	}
16060 #endif
16061 	early = 0;
16062 	cts = tcp_get_usecs(&tv);
16063 	ms_cts = tcp_tv_to_mssectick(&tv);
16064 	if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
16065 	    rack->rc_inp->inp_in_hpts) {
16066 		/*
16067 		 * We are on the hpts for some timer but not hptsi output.
16068 		 * Remove from the hpts unconditionally.
16069 		 */
16070 		rack_timer_cancel(tp, rack, cts, __LINE__);
16071 	}
16072 	/* Are we pacing and late? */
16073 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16074 	    TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
16075 		/* We are delayed */
16076 		delayed = cts - rack->r_ctl.rc_last_output_to;
16077 	} else {
16078 		delayed = 0;
16079 	}
16080 	/* Do the timers, which may override the pacer */
16081 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
16082 		if (rack_process_timers(tp, rack, cts, hpts_calling)) {
16083 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
16084 #ifdef TCP_ACCOUNTING
16085 			sched_unpin();
16086 #endif
16087 			return (0);
16088 		}
16089 	}
16090 	if (rack->rc_in_persist) {
16091 		if (rack->rc_inp->inp_in_hpts == 0) {
16092 			/* Timer is not running */
16093 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16094 		}
16095 #ifdef TCP_ACCOUNTING
16096 		sched_unpin();
16097 #endif
16098 		return (0);
16099 	}
16100 	if ((rack->r_timer_override) ||
16101 	    (rack->rc_ack_can_sendout_data) ||
16102 	    (delayed) ||
16103 	    (tp->t_state < TCPS_ESTABLISHED)) {
16104 		rack->rc_ack_can_sendout_data = 0;
16105 		if (rack->rc_inp->inp_in_hpts)
16106 			tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
16107 	} else if (rack->rc_inp->inp_in_hpts) {
16108 		/*
16109 		 * On the hpts you can't pass even if ACKNOW is on, we will
16110 		 * when the hpts fires.
16111 		 */
16112 #ifdef TCP_ACCOUNTING
16113 		crtsc = get_cyclecount();
16114 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16115 			tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
16116 		}
16117 		counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val));
16118 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16119 			tp->tcp_cnt_counters[SND_BLOCKED]++;
16120 		}
16121 		counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1);
16122 		sched_unpin();
16123 #endif
16124 		counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
16125 		return (0);
16126 	}
16127 	rack->rc_inp->inp_hpts_calls = 0;
16128 	/* Finish out both pacing early and late accounting */
16129 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16130 	    TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
16131 		early = rack->r_ctl.rc_last_output_to - cts;
16132 	} else
16133 		early = 0;
16134 	if (delayed) {
16135 		rack->r_ctl.rc_agg_delayed += delayed;
16136 		rack->r_late = 1;
16137 	} else if (early) {
16138 		rack->r_ctl.rc_agg_early += early;
16139 		rack->r_early = 1;
16140 	}
16141 	/* Now that early/late accounting is done turn off the flag */
16142 	rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
16143 	rack->r_wanted_output = 0;
16144 	rack->r_timer_override = 0;
16145 	if ((tp->t_state != rack->r_state) &&
16146 	    TCPS_HAVEESTABLISHED(tp->t_state)) {
16147 		rack_set_state(tp, rack);
16148 	}
16149 	if ((rack->r_fast_output) &&
16150 	    (tp->rcv_numsacks == 0)) {
16151 		int ret;
16152 
16153 		error = 0;
16154 		ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
16155 		if (ret >= 0)
16156 			return(ret);
16157 		else if (error) {
16158 			inp = rack->rc_inp;
16159 			so = inp->inp_socket;
16160 			sb = &so->so_snd;
16161 			goto nomore;
16162 		}
16163 	}
16164 	inp = rack->rc_inp;
16165 	/*
16166 	 * For TFO connections in SYN_SENT or SYN_RECEIVED,
16167 	 * only allow the initial SYN or SYN|ACK and those sent
16168 	 * by the retransmit timer.
16169 	 */
16170 	if (IS_FASTOPEN(tp->t_flags) &&
16171 	    ((tp->t_state == TCPS_SYN_RECEIVED) ||
16172 	     (tp->t_state == TCPS_SYN_SENT)) &&
16173 	    SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
16174 	    (tp->t_rxtshift == 0)) {              /* not a retransmit */
16175 		cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16176 		so = inp->inp_socket;
16177 		sb = &so->so_snd;
16178 		goto just_return_nolock;
16179 	}
16180 	/*
16181 	 * Determine length of data that should be transmitted, and flags
16182 	 * that will be used. If there is some data or critical controls
16183 	 * (SYN, RST) to send, then transmit; otherwise, investigate
16184 	 * further.
16185 	 */
16186 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
16187 	if (tp->t_idle_reduce) {
16188 		if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
16189 			rack_cc_after_idle(rack, tp);
16190 	}
16191 	tp->t_flags &= ~TF_LASTIDLE;
16192 	if (idle) {
16193 		if (tp->t_flags & TF_MORETOCOME) {
16194 			tp->t_flags |= TF_LASTIDLE;
16195 			idle = 0;
16196 		}
16197 	}
16198 	if ((tp->snd_una == tp->snd_max) &&
16199 	    rack->r_ctl.rc_went_idle_time &&
16200 	    TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
16201 		idle = cts - rack->r_ctl.rc_went_idle_time;
16202 		if (idle > rack_min_probertt_hold) {
16203 			/* Count as a probe rtt */
16204 			if (rack->in_probe_rtt == 0) {
16205 				rack->r_ctl.rc_lower_rtt_us_cts = cts;
16206 				rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
16207 				rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
16208 				rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
16209 			} else {
16210 				rack_exit_probertt(rack, cts);
16211 			}
16212 		}
16213 		idle = 0;
16214 	}
16215 	if (rack_use_fsb && (rack->r_fsb_inited == 0))
16216 		rack_init_fsb_block(tp, rack);
16217 again:
16218 	/*
16219 	 * If we've recently taken a timeout, snd_max will be greater than
16220 	 * snd_nxt.  There may be SACK information that allows us to avoid
16221 	 * resending already delivered data.  Adjust snd_nxt accordingly.
16222 	 */
16223 	sendalot = 0;
16224 	cts = tcp_get_usecs(&tv);
16225 	ms_cts = tcp_tv_to_mssectick(&tv);
16226 	tso = 0;
16227 	mtu = 0;
16228 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16229 	minseg = segsiz;
16230 	if (rack->r_ctl.rc_pace_max_segs == 0)
16231 		pace_max_seg = rack->rc_user_set_max_segs * segsiz;
16232 	else
16233 		pace_max_seg = rack->r_ctl.rc_pace_max_segs;
16234 	sb_offset = tp->snd_max - tp->snd_una;
16235 	cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16236 	flags = tcp_outflags[tp->t_state];
16237 	while (rack->rc_free_cnt < rack_free_cache) {
16238 		rsm = rack_alloc(rack);
16239 		if (rsm == NULL) {
16240 			if (inp->inp_hpts_calls)
16241 				/* Retry in a ms */
16242 				slot = (1 * HPTS_USEC_IN_MSEC);
16243 			so = inp->inp_socket;
16244 			sb = &so->so_snd;
16245 			goto just_return_nolock;
16246 		}
16247 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
16248 		rack->rc_free_cnt++;
16249 		rsm = NULL;
16250 	}
16251 	if (inp->inp_hpts_calls)
16252 		inp->inp_hpts_calls = 0;
16253 	sack_rxmit = 0;
16254 	len = 0;
16255 	rsm = NULL;
16256 	if (flags & TH_RST) {
16257 		SOCKBUF_LOCK(&inp->inp_socket->so_snd);
16258 		so = inp->inp_socket;
16259 		sb = &so->so_snd;
16260 		goto send;
16261 	}
16262 	if (rack->r_ctl.rc_resend) {
16263 		/* Retransmit timer */
16264 		rsm = rack->r_ctl.rc_resend;
16265 		rack->r_ctl.rc_resend = NULL;
16266 		rsm->r_flags &= ~RACK_TLP;
16267 		len = rsm->r_end - rsm->r_start;
16268 		sack_rxmit = 1;
16269 		sendalot = 0;
16270 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16271 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16272 			 __func__, __LINE__,
16273 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
16274 		sb_offset = rsm->r_start - tp->snd_una;
16275 		if (len >= segsiz)
16276 			len = segsiz;
16277 	} else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
16278 		/* We have a retransmit that takes precedence */
16279 		rsm->r_flags &= ~RACK_TLP;
16280 		if ((!IN_FASTRECOVERY(tp->t_flags)) &&
16281 		    ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
16282 			/* Enter recovery if not induced by a time-out */
16283 			rack->r_ctl.rc_rsm_start = rsm->r_start;
16284 			rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
16285 			rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
16286 			rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
16287 		}
16288 #ifdef INVARIANTS
16289 		if (SEQ_LT(rsm->r_start, tp->snd_una)) {
16290 			panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
16291 			      tp, rack, rsm, rsm->r_start, tp->snd_una);
16292 		}
16293 #endif
16294 		len = rsm->r_end - rsm->r_start;
16295 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16296 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16297 			 __func__, __LINE__,
16298 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
16299 		sb_offset = rsm->r_start - tp->snd_una;
16300 		sendalot = 0;
16301 		if (len >= segsiz)
16302 			len = segsiz;
16303 		if (len > 0) {
16304 			sack_rxmit = 1;
16305 			KMOD_TCPSTAT_INC(tcps_sack_rexmits);
16306 			KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
16307 			    min(len, segsiz));
16308 			counter_u64_add(rack_rtm_prr_retran, 1);
16309 		}
16310 	} else if (rack->r_ctl.rc_tlpsend) {
16311 		/* Tail loss probe */
16312 		long cwin;
16313 		long tlen;
16314 
16315 		doing_tlp = 1;
16316 		/*
16317 		 * Check if we can do a TLP with a RACK'd packet
16318 		 * this can happen if we are not doing the rack
16319 		 * cheat and we skipped to a TLP and it
16320 		 * went off.
16321 		 */
16322 		rsm = rack->r_ctl.rc_tlpsend;
16323 		rsm->r_flags |= RACK_TLP;
16324 
16325 		rack->r_ctl.rc_tlpsend = NULL;
16326 		sack_rxmit = 1;
16327 		tlen = rsm->r_end - rsm->r_start;
16328 		if (tlen > segsiz)
16329 			tlen = segsiz;
16330 		tp->t_sndtlppack++;
16331 		tp->t_sndtlpbyte += tlen;
16332 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16333 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16334 			 __func__, __LINE__,
16335 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
16336 		sb_offset = rsm->r_start - tp->snd_una;
16337 		cwin = min(tp->snd_wnd, tlen);
16338 		len = cwin;
16339 	}
16340 	if (rack->r_must_retran &&
16341 	    (rsm == NULL)) {
16342 		/*
16343 		 * Non-Sack and we had a RTO or MTU change, we
16344 		 * need to retransmit until we reach
16345 		 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto).
16346 		 */
16347 		if (SEQ_GT(tp->snd_max, tp->snd_una)) {
16348 			int sendwin, flight;
16349 
16350 			sendwin = min(tp->snd_wnd, tp->snd_cwnd);
16351 			flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
16352 			if (flight >= sendwin) {
16353 				so = inp->inp_socket;
16354 				sb = &so->so_snd;
16355 				goto just_return_nolock;
16356 			}
16357 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
16358 			KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack));
16359 			if (rsm == NULL) {
16360 				/* TSNH */
16361 				rack->r_must_retran = 0;
16362 				rack->r_ctl.rc_out_at_rto = 0;
16363 				rack->r_must_retran = 0;
16364 				so = inp->inp_socket;
16365 				sb = &so->so_snd;
16366 				goto just_return_nolock;
16367 			}
16368 			sack_rxmit = 1;
16369 			len = rsm->r_end - rsm->r_start;
16370 			sendalot = 0;
16371 			sb_offset = rsm->r_start - tp->snd_una;
16372 			if (len >= segsiz)
16373 				len = segsiz;
16374 		} else {
16375 			/* We must be done if there is nothing outstanding */
16376 			rack->r_must_retran = 0;
16377 			rack->r_ctl.rc_out_at_rto = 0;
16378 		}
16379 	}
16380 	/*
16381 	 * Enforce a connection sendmap count limit if set
16382 	 * as long as we are not retransmiting.
16383 	 */
16384 	if ((rsm == NULL) &&
16385 	    (rack->do_detection == 0) &&
16386 	    (V_tcp_map_entries_limit > 0) &&
16387 	    (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
16388 		counter_u64_add(rack_to_alloc_limited, 1);
16389 		if (!rack->alloc_limit_reported) {
16390 			rack->alloc_limit_reported = 1;
16391 			counter_u64_add(rack_alloc_limited_conns, 1);
16392 		}
16393 		so = inp->inp_socket;
16394 		sb = &so->so_snd;
16395 		goto just_return_nolock;
16396 	}
16397 	if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
16398 		/* we are retransmitting the fin */
16399 		len--;
16400 		if (len) {
16401 			/*
16402 			 * When retransmitting data do *not* include the
16403 			 * FIN. This could happen from a TLP probe.
16404 			 */
16405 			flags &= ~TH_FIN;
16406 		}
16407 	}
16408 #ifdef INVARIANTS
16409 	/* For debugging */
16410 	rack->r_ctl.rc_rsm_at_retran = rsm;
16411 #endif
16412 	if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
16413 	    ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
16414 		int ret;
16415 
16416 		ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len);
16417 		if (ret == 0)
16418 			return (0);
16419 	}
16420 	so = inp->inp_socket;
16421 	sb = &so->so_snd;
16422 	if (do_a_prefetch == 0) {
16423 		kern_prefetch(sb, &do_a_prefetch);
16424 		do_a_prefetch = 1;
16425 	}
16426 #ifdef NETFLIX_SHARED_CWND
16427 	if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
16428 	    rack->rack_enable_scwnd) {
16429 		/* We are doing cwnd sharing */
16430 		if (rack->gp_ready &&
16431 		    (rack->rack_attempted_scwnd == 0) &&
16432 		    (rack->r_ctl.rc_scw == NULL) &&
16433 		    tp->t_lib) {
16434 			/* The pcbid is in, lets make an attempt */
16435 			counter_u64_add(rack_try_scwnd, 1);
16436 			rack->rack_attempted_scwnd = 1;
16437 			rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
16438 								   &rack->r_ctl.rc_scw_index,
16439 								   segsiz);
16440 		}
16441 		if (rack->r_ctl.rc_scw &&
16442 		    (rack->rack_scwnd_is_idle == 1) &&
16443 		    sbavail(&so->so_snd)) {
16444 			/* we are no longer out of data */
16445 			tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
16446 			rack->rack_scwnd_is_idle = 0;
16447 		}
16448 		if (rack->r_ctl.rc_scw) {
16449 			/* First lets update and get the cwnd */
16450 			rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
16451 								    rack->r_ctl.rc_scw_index,
16452 								    tp->snd_cwnd, tp->snd_wnd, segsiz);
16453 		}
16454 	}
16455 #endif
16456 	/*
16457 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
16458 	 * state flags.
16459 	 */
16460 	if (tp->t_flags & TF_NEEDFIN)
16461 		flags |= TH_FIN;
16462 	if (tp->t_flags & TF_NEEDSYN)
16463 		flags |= TH_SYN;
16464 	if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
16465 		void *end_rsm;
16466 		end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
16467 		if (end_rsm)
16468 			kern_prefetch(end_rsm, &prefetch_rsm);
16469 		prefetch_rsm = 1;
16470 	}
16471 	SOCKBUF_LOCK(sb);
16472 	/*
16473 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
16474 	 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
16475 	 * negative length.  This can also occur when TCP opens up its
16476 	 * congestion window while receiving additional duplicate acks after
16477 	 * fast-retransmit because TCP will reset snd_nxt to snd_max after
16478 	 * the fast-retransmit.
16479 	 *
16480 	 * In the normal retransmit-FIN-only case, however, snd_nxt will be
16481 	 * set to snd_una, the sb_offset will be 0, and the length may wind
16482 	 * up 0.
16483 	 *
16484 	 * If sack_rxmit is true we are retransmitting from the scoreboard
16485 	 * in which case len is already set.
16486 	 */
16487 	if ((sack_rxmit == 0) &&
16488 	    (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
16489 		uint32_t avail;
16490 
16491 		avail = sbavail(sb);
16492 		if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
16493 			sb_offset = tp->snd_nxt - tp->snd_una;
16494 		else
16495 			sb_offset = 0;
16496 		if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
16497 			if (rack->r_ctl.rc_tlp_new_data) {
16498 				/* TLP is forcing out new data */
16499 				if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
16500 					rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
16501 				}
16502 				if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
16503 					if (tp->snd_wnd > sb_offset)
16504 						len = tp->snd_wnd - sb_offset;
16505 					else
16506 						len = 0;
16507 				} else {
16508 					len = rack->r_ctl.rc_tlp_new_data;
16509 				}
16510 				rack->r_ctl.rc_tlp_new_data = 0;
16511 				doing_tlp = 1;
16512 			}  else {
16513 				len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
16514 			}
16515 			if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
16516 				/*
16517 				 * For prr=off, we need to send only 1 MSS
16518 				 * at a time. We do this because another sack could
16519 				 * be arriving that causes us to send retransmits and
16520 				 * we don't want to be on a long pace due to a larger send
16521 				 * that keeps us from sending out the retransmit.
16522 				 */
16523 				len = segsiz;
16524 			}
16525 		} else {
16526 			uint32_t outstanding;
16527 			/*
16528 			 * We are inside of a Fast recovery episode, this
16529 			 * is caused by a SACK or 3 dup acks. At this point
16530 			 * we have sent all the retransmissions and we rely
16531 			 * on PRR to dictate what we will send in the form of
16532 			 * new data.
16533 			 */
16534 
16535 			outstanding = tp->snd_max - tp->snd_una;
16536 			if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
16537 				if (tp->snd_wnd > outstanding) {
16538 					len = tp->snd_wnd - outstanding;
16539 					/* Check to see if we have the data */
16540 					if ((sb_offset + len) > avail) {
16541 						/* It does not all fit */
16542 						if (avail > sb_offset)
16543 							len = avail - sb_offset;
16544 						else
16545 							len = 0;
16546 					}
16547 				} else {
16548 					len = 0;
16549 				}
16550 			} else if (avail > sb_offset) {
16551 				len = avail - sb_offset;
16552 			} else {
16553 				len = 0;
16554 			}
16555 			if (len > 0) {
16556 				if (len > rack->r_ctl.rc_prr_sndcnt) {
16557 					len = rack->r_ctl.rc_prr_sndcnt;
16558 				}
16559 				if (len > 0) {
16560 					sub_from_prr = 1;
16561 					counter_u64_add(rack_rtm_prr_newdata, 1);
16562 				}
16563 			}
16564 			if (len > segsiz) {
16565 				/*
16566 				 * We should never send more than a MSS when
16567 				 * retransmitting or sending new data in prr
16568 				 * mode unless the override flag is on. Most
16569 				 * likely the PRR algorithm is not going to
16570 				 * let us send a lot as well :-)
16571 				 */
16572 				if (rack->r_ctl.rc_prr_sendalot == 0) {
16573 					len = segsiz;
16574 				}
16575 			} else if (len < segsiz) {
16576 				/*
16577 				 * Do we send any? The idea here is if the
16578 				 * send empty's the socket buffer we want to
16579 				 * do it. However if not then lets just wait
16580 				 * for our prr_sndcnt to get bigger.
16581 				 */
16582 				long leftinsb;
16583 
16584 				leftinsb = sbavail(sb) - sb_offset;
16585 				if (leftinsb > len) {
16586 					/* This send does not empty the sb */
16587 					len = 0;
16588 				}
16589 			}
16590 		}
16591 	} else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
16592 		/*
16593 		 * If you have not established
16594 		 * and are not doing FAST OPEN
16595 		 * no data please.
16596 		 */
16597 		if ((sack_rxmit == 0) &&
16598 		    (!IS_FASTOPEN(tp->t_flags))){
16599 			len = 0;
16600 			sb_offset = 0;
16601 		}
16602 	}
16603 	if (prefetch_so_done == 0) {
16604 		kern_prefetch(so, &prefetch_so_done);
16605 		prefetch_so_done = 1;
16606 	}
16607 	/*
16608 	 * Lop off SYN bit if it has already been sent.  However, if this is
16609 	 * SYN-SENT state and if segment contains data and if we don't know
16610 	 * that foreign host supports TAO, suppress sending segment.
16611 	 */
16612 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
16613 	    ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
16614 		/*
16615 		 * When sending additional segments following a TFO SYN|ACK,
16616 		 * do not include the SYN bit.
16617 		 */
16618 		if (IS_FASTOPEN(tp->t_flags) &&
16619 		    (tp->t_state == TCPS_SYN_RECEIVED))
16620 			flags &= ~TH_SYN;
16621 	}
16622 	/*
16623 	 * Be careful not to send data and/or FIN on SYN segments. This
16624 	 * measure is needed to prevent interoperability problems with not
16625 	 * fully conformant TCP implementations.
16626 	 */
16627 	if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
16628 		len = 0;
16629 		flags &= ~TH_FIN;
16630 	}
16631 	/*
16632 	 * On TFO sockets, ensure no data is sent in the following cases:
16633 	 *
16634 	 *  - When retransmitting SYN|ACK on a passively-created socket
16635 	 *
16636 	 *  - When retransmitting SYN on an actively created socket
16637 	 *
16638 	 *  - When sending a zero-length cookie (cookie request) on an
16639 	 *    actively created socket
16640 	 *
16641 	 *  - When the socket is in the CLOSED state (RST is being sent)
16642 	 */
16643 	if (IS_FASTOPEN(tp->t_flags) &&
16644 	    (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
16645 	     ((tp->t_state == TCPS_SYN_SENT) &&
16646 	      (tp->t_tfo_client_cookie_len == 0)) ||
16647 	     (flags & TH_RST))) {
16648 		sack_rxmit = 0;
16649 		len = 0;
16650 	}
16651 	/* Without fast-open there should never be data sent on a SYN */
16652 	if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
16653 		tp->snd_nxt = tp->iss;
16654 		len = 0;
16655 	}
16656 	if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
16657 		/* We only send 1 MSS if we have a DSACK block */
16658 		add_flag |= RACK_SENT_W_DSACK;
16659 		len = segsiz;
16660 	}
16661 	orig_len = len;
16662 	if (len <= 0) {
16663 		/*
16664 		 * If FIN has been sent but not acked, but we haven't been
16665 		 * called to retransmit, len will be < 0.  Otherwise, window
16666 		 * shrank after we sent into it.  If window shrank to 0,
16667 		 * cancel pending retransmit, pull snd_nxt back to (closed)
16668 		 * window, and set the persist timer if it isn't already
16669 		 * going.  If the window didn't close completely, just wait
16670 		 * for an ACK.
16671 		 *
16672 		 * We also do a general check here to ensure that we will
16673 		 * set the persist timer when we have data to send, but a
16674 		 * 0-byte window. This makes sure the persist timer is set
16675 		 * even if the packet hits one of the "goto send" lines
16676 		 * below.
16677 		 */
16678 		len = 0;
16679 		if ((tp->snd_wnd == 0) &&
16680 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16681 		    (tp->snd_una == tp->snd_max) &&
16682 		    (sb_offset < (int)sbavail(sb))) {
16683 			rack_enter_persist(tp, rack, cts);
16684 		}
16685 	} else if ((rsm == NULL) &&
16686 		   (doing_tlp == 0) &&
16687 		   (len < pace_max_seg)) {
16688 		/*
16689 		 * We are not sending a maximum sized segment for
16690 		 * some reason. Should we not send anything (think
16691 		 * sws or persists)?
16692 		 */
16693 		if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16694 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16695 		    (len < minseg) &&
16696 		    (len < (int)(sbavail(sb) - sb_offset))) {
16697 			/*
16698 			 * Here the rwnd is less than
16699 			 * the minimum pacing size, this is not a retransmit,
16700 			 * we are established and
16701 			 * the send is not the last in the socket buffer
16702 			 * we send nothing, and we may enter persists
16703 			 * if nothing is outstanding.
16704 			 */
16705 			len = 0;
16706 			if (tp->snd_max == tp->snd_una) {
16707 				/*
16708 				 * Nothing out we can
16709 				 * go into persists.
16710 				 */
16711 				rack_enter_persist(tp, rack, cts);
16712 			}
16713 		     } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
16714 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16715 			   (len < (int)(sbavail(sb) - sb_offset)) &&
16716 			   (len < minseg)) {
16717 			/*
16718 			 * Here we are not retransmitting, and
16719 			 * the cwnd is not so small that we could
16720 			 * not send at least a min size (rxt timer
16721 			 * not having gone off), We have 2 segments or
16722 			 * more already in flight, its not the tail end
16723 			 * of the socket buffer  and the cwnd is blocking
16724 			 * us from sending out a minimum pacing segment size.
16725 			 * Lets not send anything.
16726 			 */
16727 			len = 0;
16728 		} else if (((tp->snd_wnd - ctf_outstanding(tp)) <
16729 			    min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16730 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16731 			   (len < (int)(sbavail(sb) - sb_offset)) &&
16732 			   (TCPS_HAVEESTABLISHED(tp->t_state))) {
16733 			/*
16734 			 * Here we have a send window but we have
16735 			 * filled it up and we can't send another pacing segment.
16736 			 * We also have in flight more than 2 segments
16737 			 * and we are not completing the sb i.e. we allow
16738 			 * the last bytes of the sb to go out even if
16739 			 * its not a full pacing segment.
16740 			 */
16741 			len = 0;
16742 		} else if ((rack->r_ctl.crte != NULL) &&
16743 			   (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
16744 			   (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
16745 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
16746 			   (len < (int)(sbavail(sb) - sb_offset))) {
16747 			/*
16748 			 * Here we are doing hardware pacing, this is not a TLP,
16749 			 * we are not sending a pace max segment size, there is rwnd
16750 			 * room to send at least N pace_max_seg, the cwnd is greater
16751 			 * than or equal to a full pacing segments plus 4 mss and we have 2 or
16752 			 * more segments in flight and its not the tail of the socket buffer.
16753 			 *
16754 			 * We don't want to send instead we need to get more ack's in to
16755 			 * allow us to send a full pacing segment. Normally, if we are pacing
16756 			 * about the right speed, we should have finished our pacing
16757 			 * send as most of the acks have come back if we are at the
16758 			 * right rate. This is a bit fuzzy since return path delay
16759 			 * can delay the acks, which is why we want to make sure we
16760 			 * have cwnd space to have a bit more than a max pace segments in flight.
16761 			 *
16762 			 * If we have not gotten our acks back we are pacing at too high a
16763 			 * rate delaying will not hurt and will bring our GP estimate down by
16764 			 * injecting the delay. If we don't do this we will send
16765 			 * 2 MSS out in response to the acks being clocked in which
16766 			 * defeats the point of hw-pacing (i.e. to help us get
16767 			 * larger TSO's out).
16768 			 */
16769 			len = 0;
16770 
16771 		}
16772 
16773 	}
16774 	/* len will be >= 0 after this point. */
16775 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
16776 	rack_sndbuf_autoscale(rack);
16777 	/*
16778 	 * Decide if we can use TCP Segmentation Offloading (if supported by
16779 	 * hardware).
16780 	 *
16781 	 * TSO may only be used if we are in a pure bulk sending state.  The
16782 	 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
16783 	 * options prevent using TSO.  With TSO the TCP header is the same
16784 	 * (except for the sequence number) for all generated packets.  This
16785 	 * makes it impossible to transmit any options which vary per
16786 	 * generated segment or packet.
16787 	 *
16788 	 * IPv4 handling has a clear separation of ip options and ip header
16789 	 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
16790 	 * the right thing below to provide length of just ip options and thus
16791 	 * checking for ipoptlen is enough to decide if ip options are present.
16792 	 */
16793 	ipoptlen = 0;
16794 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16795 	/*
16796 	 * Pre-calculate here as we save another lookup into the darknesses
16797 	 * of IPsec that way and can actually decide if TSO is ok.
16798 	 */
16799 #ifdef INET6
16800 	if (isipv6 && IPSEC_ENABLED(ipv6))
16801 		ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
16802 #ifdef INET
16803 	else
16804 #endif
16805 #endif				/* INET6 */
16806 #ifdef INET
16807 		if (IPSEC_ENABLED(ipv4))
16808 			ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
16809 #endif				/* INET */
16810 #endif
16811 
16812 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16813 	ipoptlen += ipsec_optlen;
16814 #endif
16815 	if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
16816 	    (tp->t_port == 0) &&
16817 	    ((tp->t_flags & TF_SIGNATURE) == 0) &&
16818 	    tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
16819 	    ipoptlen == 0)
16820 		tso = 1;
16821 	{
16822 		uint32_t outstanding;
16823 
16824 		outstanding = tp->snd_max - tp->snd_una;
16825 		if (tp->t_flags & TF_SENTFIN) {
16826 			/*
16827 			 * If we sent a fin, snd_max is 1 higher than
16828 			 * snd_una
16829 			 */
16830 			outstanding--;
16831 		}
16832 		if (sack_rxmit) {
16833 			if ((rsm->r_flags & RACK_HAS_FIN) == 0)
16834 				flags &= ~TH_FIN;
16835 		} else {
16836 			if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
16837 				   sbused(sb)))
16838 				flags &= ~TH_FIN;
16839 		}
16840 	}
16841 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
16842 	    (long)TCP_MAXWIN << tp->rcv_scale);
16843 
16844 	/*
16845 	 * Sender silly window avoidance.   We transmit under the following
16846 	 * conditions when len is non-zero:
16847 	 *
16848 	 * - We have a full segment (or more with TSO) - This is the last
16849 	 * buffer in a write()/send() and we are either idle or running
16850 	 * NODELAY - we've timed out (e.g. persist timer) - we have more
16851 	 * then 1/2 the maximum send window's worth of data (receiver may be
16852 	 * limited the window size) - we need to retransmit
16853 	 */
16854 	if (len) {
16855 		if (len >= segsiz) {
16856 			goto send;
16857 		}
16858 		/*
16859 		 * NOTE! on localhost connections an 'ack' from the remote
16860 		 * end may occur synchronously with the output and cause us
16861 		 * to flush a buffer queued with moretocome.  XXX
16862 		 *
16863 		 */
16864 		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
16865 		    (idle || (tp->t_flags & TF_NODELAY)) &&
16866 		    ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
16867 		    (tp->t_flags & TF_NOPUSH) == 0) {
16868 			pass = 2;
16869 			goto send;
16870 		}
16871 		if ((tp->snd_una == tp->snd_max) && len) {	/* Nothing outstanding */
16872 			pass = 22;
16873 			goto send;
16874 		}
16875 		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
16876 			pass = 4;
16877 			goto send;
16878 		}
16879 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {	/* retransmit case */
16880 			pass = 5;
16881 			goto send;
16882 		}
16883 		if (sack_rxmit) {
16884 			pass = 6;
16885 			goto send;
16886 		}
16887 		if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
16888 		    (ctf_outstanding(tp) < (segsiz * 2))) {
16889 			/*
16890 			 * We have less than two MSS outstanding (delayed ack)
16891 			 * and our rwnd will not let us send a full sized
16892 			 * MSS. Lets go ahead and let this small segment
16893 			 * out because we want to try to have at least two
16894 			 * packets inflight to not be caught by delayed ack.
16895 			 */
16896 			pass = 12;
16897 			goto send;
16898 		}
16899 	}
16900 	/*
16901 	 * Sending of standalone window updates.
16902 	 *
16903 	 * Window updates are important when we close our window due to a
16904 	 * full socket buffer and are opening it again after the application
16905 	 * reads data from it.  Once the window has opened again and the
16906 	 * remote end starts to send again the ACK clock takes over and
16907 	 * provides the most current window information.
16908 	 *
16909 	 * We must avoid the silly window syndrome whereas every read from
16910 	 * the receive buffer, no matter how small, causes a window update
16911 	 * to be sent.  We also should avoid sending a flurry of window
16912 	 * updates when the socket buffer had queued a lot of data and the
16913 	 * application is doing small reads.
16914 	 *
16915 	 * Prevent a flurry of pointless window updates by only sending an
16916 	 * update when we can increase the advertized window by more than
16917 	 * 1/4th of the socket buffer capacity.  When the buffer is getting
16918 	 * full or is very small be more aggressive and send an update
16919 	 * whenever we can increase by two mss sized segments. In all other
16920 	 * situations the ACK's to new incoming data will carry further
16921 	 * window increases.
16922 	 *
16923 	 * Don't send an independent window update if a delayed ACK is
16924 	 * pending (it will get piggy-backed on it) or the remote side
16925 	 * already has done a half-close and won't send more data.  Skip
16926 	 * this if the connection is in T/TCP half-open state.
16927 	 */
16928 	if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
16929 	    !(tp->t_flags & TF_DELACK) &&
16930 	    !TCPS_HAVERCVDFIN(tp->t_state)) {
16931 		/*
16932 		 * "adv" is the amount we could increase the window, taking
16933 		 * into account that we are limited by TCP_MAXWIN <<
16934 		 * tp->rcv_scale.
16935 		 */
16936 		int32_t adv;
16937 		int oldwin;
16938 
16939 		adv = recwin;
16940 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
16941 			oldwin = (tp->rcv_adv - tp->rcv_nxt);
16942 			if (adv > oldwin)
16943 			    adv -= oldwin;
16944 			else {
16945 				/* We can't increase the window */
16946 				adv = 0;
16947 			}
16948 		} else
16949 			oldwin = 0;
16950 
16951 		/*
16952 		 * If the new window size ends up being the same as or less
16953 		 * than the old size when it is scaled, then don't force
16954 		 * a window update.
16955 		 */
16956 		if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
16957 			goto dontupdate;
16958 
16959 		if (adv >= (int32_t)(2 * segsiz) &&
16960 		    (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
16961 		     recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
16962 		     so->so_rcv.sb_hiwat <= 8 * segsiz)) {
16963 			pass = 7;
16964 			goto send;
16965 		}
16966 		if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
16967 			pass = 23;
16968 			goto send;
16969 		}
16970 	}
16971 dontupdate:
16972 
16973 	/*
16974 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
16975 	 * is also a catch-all for the retransmit timer timeout case.
16976 	 */
16977 	if (tp->t_flags & TF_ACKNOW) {
16978 		pass = 8;
16979 		goto send;
16980 	}
16981 	if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
16982 		pass = 9;
16983 		goto send;
16984 	}
16985 	/*
16986 	 * If our state indicates that FIN should be sent and we have not
16987 	 * yet done so, then we need to send.
16988 	 */
16989 	if ((flags & TH_FIN) &&
16990 	    (tp->snd_nxt == tp->snd_una)) {
16991 		pass = 11;
16992 		goto send;
16993 	}
16994 	/*
16995 	 * No reason to send a segment, just return.
16996 	 */
16997 just_return:
16998 	SOCKBUF_UNLOCK(sb);
16999 just_return_nolock:
17000 	{
17001 		int app_limited = CTF_JR_SENT_DATA;
17002 
17003 		if (tot_len_this_send > 0) {
17004 			/* Make sure snd_nxt is up to max */
17005 			rack->r_ctl.fsb.recwin = recwin;
17006 			slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
17007 			if ((error == 0) &&
17008 			    rack_use_rfo &&
17009 			    ((flags & (TH_SYN|TH_FIN)) == 0) &&
17010 			    (ipoptlen == 0) &&
17011 			    (tp->snd_nxt == tp->snd_max) &&
17012 			    (tp->rcv_numsacks == 0) &&
17013 			    rack->r_fsb_inited &&
17014 			    TCPS_HAVEESTABLISHED(tp->t_state) &&
17015 			    (rack->r_must_retran == 0) &&
17016 			    ((tp->t_flags & TF_NEEDFIN) == 0) &&
17017 			    (len > 0) && (orig_len > 0) &&
17018 			    (orig_len > len) &&
17019 			    ((orig_len - len) >= segsiz) &&
17020 			    ((optlen == 0) ||
17021 			     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
17022 				/* We can send at least one more MSS using our fsb */
17023 
17024 				rack->r_fast_output = 1;
17025 				rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
17026 				rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
17027 				rack->r_ctl.fsb.tcp_flags = flags;
17028 				rack->r_ctl.fsb.left_to_send = orig_len - len;
17029 				KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
17030 					("rack:%p left_to_send:%u sbavail:%u out:%u",
17031 					rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
17032 					 (tp->snd_max - tp->snd_una)));
17033 				if (rack->r_ctl.fsb.left_to_send < segsiz)
17034 					rack->r_fast_output = 0;
17035 				else {
17036 					if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
17037 						rack->r_ctl.fsb.rfo_apply_push = 1;
17038 					else
17039 						rack->r_ctl.fsb.rfo_apply_push = 0;
17040 				}
17041 			} else
17042 				rack->r_fast_output = 0;
17043 
17044 
17045 			rack_log_fsb(rack, tp, so, flags,
17046 				     ipoptlen, orig_len, len, 0,
17047 				     1, optlen, __LINE__, 1);
17048 			if (SEQ_GT(tp->snd_max, tp->snd_nxt))
17049 				tp->snd_nxt = tp->snd_max;
17050 		} else {
17051 			int end_window = 0;
17052 			uint32_t seq = tp->gput_ack;
17053 
17054 			rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17055 			if (rsm) {
17056 				/*
17057 				 * Mark the last sent that we just-returned (hinting
17058 				 * that delayed ack may play a role in any rtt measurement).
17059 				 */
17060 				rsm->r_just_ret = 1;
17061 			}
17062 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
17063 			rack->r_ctl.rc_agg_delayed = 0;
17064 			rack->r_early = 0;
17065 			rack->r_late = 0;
17066 			rack->r_ctl.rc_agg_early = 0;
17067 			if ((ctf_outstanding(tp) +
17068 			     min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
17069 				 minseg)) >= tp->snd_wnd) {
17070 				/* We are limited by the rwnd */
17071 				app_limited = CTF_JR_RWND_LIMITED;
17072 				if (IN_FASTRECOVERY(tp->t_flags))
17073 				    rack->r_ctl.rc_prr_sndcnt = 0;
17074 			} else if (ctf_outstanding(tp) >= sbavail(sb)) {
17075 				/* We are limited by whats available -- app limited */
17076 				app_limited = CTF_JR_APP_LIMITED;
17077 				if (IN_FASTRECOVERY(tp->t_flags))
17078 				    rack->r_ctl.rc_prr_sndcnt = 0;
17079 			} else if ((idle == 0) &&
17080 				   ((tp->t_flags & TF_NODELAY) == 0) &&
17081 				   ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17082 				   (len < segsiz)) {
17083 				/*
17084 				 * No delay is not on and the
17085 				 * user is sending less than 1MSS. This
17086 				 * brings out SWS avoidance so we
17087 				 * don't send. Another app-limited case.
17088 				 */
17089 				app_limited = CTF_JR_APP_LIMITED;
17090 			} else if (tp->t_flags & TF_NOPUSH) {
17091 				/*
17092 				 * The user has requested no push of
17093 				 * the last segment and we are
17094 				 * at the last segment. Another app
17095 				 * limited case.
17096 				 */
17097 				app_limited = CTF_JR_APP_LIMITED;
17098 			} else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
17099 				/* Its the cwnd */
17100 				app_limited = CTF_JR_CWND_LIMITED;
17101 			} else if (IN_FASTRECOVERY(tp->t_flags) &&
17102 				   (rack->rack_no_prr == 0) &&
17103 				   (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
17104 				app_limited = CTF_JR_PRR;
17105 			} else {
17106 				/* Now why here are we not sending? */
17107 #ifdef NOW
17108 #ifdef INVARIANTS
17109 				panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
17110 #endif
17111 #endif
17112 				app_limited = CTF_JR_ASSESSING;
17113 			}
17114 			/*
17115 			 * App limited in some fashion, for our pacing GP
17116 			 * measurements we don't want any gap (even cwnd).
17117 			 * Close  down the measurement window.
17118 			 */
17119 			if (rack_cwnd_block_ends_measure &&
17120 			    ((app_limited == CTF_JR_CWND_LIMITED) ||
17121 			     (app_limited == CTF_JR_PRR))) {
17122 				/*
17123 				 * The reason we are not sending is
17124 				 * the cwnd (or prr). We have been configured
17125 				 * to end the measurement window in
17126 				 * this case.
17127 				 */
17128 				end_window = 1;
17129 			} else if (rack_rwnd_block_ends_measure &&
17130 				   (app_limited == CTF_JR_RWND_LIMITED)) {
17131 				/*
17132 				 * We are rwnd limited and have been
17133 				 * configured to end the measurement
17134 				 * window in this case.
17135 				 */
17136 				end_window = 1;
17137 			} else if (app_limited == CTF_JR_APP_LIMITED) {
17138 				/*
17139 				 * A true application limited period, we have
17140 				 * ran out of data.
17141 				 */
17142 				end_window = 1;
17143 			} else if (app_limited == CTF_JR_ASSESSING) {
17144 				/*
17145 				 * In the assessing case we hit the end of
17146 				 * the if/else and had no known reason
17147 				 * This will panic us under invariants..
17148 				 *
17149 				 * If we get this out in logs we need to
17150 				 * investagate which reason we missed.
17151 				 */
17152 				end_window = 1;
17153 			}
17154 			if (end_window) {
17155 				uint8_t log = 0;
17156 
17157 				if ((tp->t_flags & TF_GPUTINPROG) &&
17158 				    SEQ_GT(tp->gput_ack, tp->snd_max)) {
17159 					/* Mark the last packet has app limited */
17160 					tp->gput_ack = tp->snd_max;
17161 					log = 1;
17162 				}
17163 				rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17164 				if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
17165 					if (rack->r_ctl.rc_app_limited_cnt == 0)
17166 						rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
17167 					else {
17168 						/*
17169 						 * Go out to the end app limited and mark
17170 						 * this new one as next and move the end_appl up
17171 						 * to this guy.
17172 						 */
17173 						if (rack->r_ctl.rc_end_appl)
17174 							rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
17175 						rack->r_ctl.rc_end_appl = rsm;
17176 					}
17177 					rsm->r_flags |= RACK_APP_LIMITED;
17178 					rack->r_ctl.rc_app_limited_cnt++;
17179 				}
17180 				if (log)
17181 					rack_log_pacing_delay_calc(rack,
17182 								   rack->r_ctl.rc_app_limited_cnt, seq,
17183 								   tp->gput_ack, 0, 0, 4, __LINE__, NULL);
17184 			}
17185 		}
17186 		if (slot) {
17187 			/* set the rack tcb into the slot N */
17188 			counter_u64_add(rack_paced_segments, 1);
17189 		} else if (tot_len_this_send) {
17190 			counter_u64_add(rack_unpaced_segments, 1);
17191 		}
17192 		/* Check if we need to go into persists or not */
17193 		if ((tp->snd_max == tp->snd_una) &&
17194 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
17195 		    sbavail(sb) &&
17196 		    (sbavail(sb) > tp->snd_wnd) &&
17197 		    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
17198 			/* Yes lets make sure to move to persist before timer-start */
17199 			rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
17200 		}
17201 		rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
17202 		rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
17203 	}
17204 #ifdef NETFLIX_SHARED_CWND
17205 	if ((sbavail(sb) == 0) &&
17206 	    rack->r_ctl.rc_scw) {
17207 		tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17208 		rack->rack_scwnd_is_idle = 1;
17209 	}
17210 #endif
17211 #ifdef TCP_ACCOUNTING
17212 	if (tot_len_this_send > 0) {
17213 		crtsc = get_cyclecount();
17214 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17215 			tp->tcp_cnt_counters[SND_OUT_DATA]++;
17216 		}
17217 		counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
17218 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17219 			tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
17220 		}
17221 		counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
17222 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17223 			tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
17224 		}
17225 		counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz));
17226 	} else {
17227 		crtsc = get_cyclecount();
17228 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17229 			tp->tcp_cnt_counters[SND_LIMITED]++;
17230 		}
17231 		counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1);
17232 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17233 			tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
17234 		}
17235 		counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val));
17236 	}
17237 	sched_unpin();
17238 #endif
17239 	return (0);
17240 
17241 send:
17242 	if (rsm || sack_rxmit)
17243 		counter_u64_add(rack_nfto_resend, 1);
17244 	else
17245 		counter_u64_add(rack_non_fto_send, 1);
17246 	if ((flags & TH_FIN) &&
17247 	    sbavail(sb)) {
17248 		/*
17249 		 * We do not transmit a FIN
17250 		 * with data outstanding. We
17251 		 * need to make it so all data
17252 		 * is acked first.
17253 		 */
17254 		flags &= ~TH_FIN;
17255 	}
17256 	/* Enforce stack imposed max seg size if we have one */
17257 	if (rack->r_ctl.rc_pace_max_segs &&
17258 	    (len > rack->r_ctl.rc_pace_max_segs)) {
17259 		mark = 1;
17260 		len = rack->r_ctl.rc_pace_max_segs;
17261 	}
17262 	SOCKBUF_LOCK_ASSERT(sb);
17263 	if (len > 0) {
17264 		if (len >= segsiz)
17265 			tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
17266 		else
17267 			tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
17268 	}
17269 	/*
17270 	 * Before ESTABLISHED, force sending of initial options unless TCP
17271 	 * set not to do any options. NOTE: we assume that the IP/TCP header
17272 	 * plus TCP options always fit in a single mbuf, leaving room for a
17273 	 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
17274 	 * + optlen <= MCLBYTES
17275 	 */
17276 	optlen = 0;
17277 #ifdef INET6
17278 	if (isipv6)
17279 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
17280 	else
17281 #endif
17282 		hdrlen = sizeof(struct tcpiphdr);
17283 
17284 	/*
17285 	 * Compute options for segment. We only have to care about SYN and
17286 	 * established connection segments.  Options for SYN-ACK segments
17287 	 * are handled in TCP syncache.
17288 	 */
17289 	to.to_flags = 0;
17290 	if ((tp->t_flags & TF_NOOPT) == 0) {
17291 		/* Maximum segment size. */
17292 		if (flags & TH_SYN) {
17293 			tp->snd_nxt = tp->iss;
17294 			to.to_mss = tcp_mssopt(&inp->inp_inc);
17295 			if (tp->t_port)
17296 				to.to_mss -= V_tcp_udp_tunneling_overhead;
17297 			to.to_flags |= TOF_MSS;
17298 
17299 			/*
17300 			 * On SYN or SYN|ACK transmits on TFO connections,
17301 			 * only include the TFO option if it is not a
17302 			 * retransmit, as the presence of the TFO option may
17303 			 * have caused the original SYN or SYN|ACK to have
17304 			 * been dropped by a middlebox.
17305 			 */
17306 			if (IS_FASTOPEN(tp->t_flags) &&
17307 			    (tp->t_rxtshift == 0)) {
17308 				if (tp->t_state == TCPS_SYN_RECEIVED) {
17309 					to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
17310 					to.to_tfo_cookie =
17311 						(u_int8_t *)&tp->t_tfo_cookie.server;
17312 					to.to_flags |= TOF_FASTOPEN;
17313 					wanted_cookie = 1;
17314 				} else if (tp->t_state == TCPS_SYN_SENT) {
17315 					to.to_tfo_len =
17316 						tp->t_tfo_client_cookie_len;
17317 					to.to_tfo_cookie =
17318 						tp->t_tfo_cookie.client;
17319 					to.to_flags |= TOF_FASTOPEN;
17320 					wanted_cookie = 1;
17321 					/*
17322 					 * If we wind up having more data to
17323 					 * send with the SYN than can fit in
17324 					 * one segment, don't send any more
17325 					 * until the SYN|ACK comes back from
17326 					 * the other end.
17327 					 */
17328 					sendalot = 0;
17329 				}
17330 			}
17331 		}
17332 		/* Window scaling. */
17333 		if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
17334 			to.to_wscale = tp->request_r_scale;
17335 			to.to_flags |= TOF_SCALE;
17336 		}
17337 		/* Timestamps. */
17338 		if ((tp->t_flags & TF_RCVD_TSTMP) ||
17339 		    ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
17340 			to.to_tsval = ms_cts + tp->ts_offset;
17341 			to.to_tsecr = tp->ts_recent;
17342 			to.to_flags |= TOF_TS;
17343 		}
17344 		/* Set receive buffer autosizing timestamp. */
17345 		if (tp->rfbuf_ts == 0 &&
17346 		    (so->so_rcv.sb_flags & SB_AUTOSIZE))
17347 			tp->rfbuf_ts = tcp_ts_getticks();
17348 		/* Selective ACK's. */
17349 		if (tp->t_flags & TF_SACK_PERMIT) {
17350 			if (flags & TH_SYN)
17351 				to.to_flags |= TOF_SACKPERM;
17352 			else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17353 				 tp->rcv_numsacks > 0) {
17354 				to.to_flags |= TOF_SACK;
17355 				to.to_nsacks = tp->rcv_numsacks;
17356 				to.to_sacks = (u_char *)tp->sackblks;
17357 			}
17358 		}
17359 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17360 		/* TCP-MD5 (RFC2385). */
17361 		if (tp->t_flags & TF_SIGNATURE)
17362 			to.to_flags |= TOF_SIGNATURE;
17363 #endif				/* TCP_SIGNATURE */
17364 
17365 		/* Processing the options. */
17366 		hdrlen += optlen = tcp_addoptions(&to, opt);
17367 		/*
17368 		 * If we wanted a TFO option to be added, but it was unable
17369 		 * to fit, ensure no data is sent.
17370 		 */
17371 		if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
17372 		    !(to.to_flags & TOF_FASTOPEN))
17373 			len = 0;
17374 	}
17375 	if (tp->t_port) {
17376 		if (V_tcp_udp_tunneling_port == 0) {
17377 			/* The port was removed?? */
17378 			SOCKBUF_UNLOCK(&so->so_snd);
17379 #ifdef TCP_ACCOUNTING
17380 			crtsc = get_cyclecount();
17381 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17382 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
17383 			}
17384 			counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
17385 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17386 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
17387 			}
17388 			counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
17389 			sched_unpin();
17390 #endif
17391 			return (EHOSTUNREACH);
17392 		}
17393 		hdrlen += sizeof(struct udphdr);
17394 	}
17395 #ifdef INET6
17396 	if (isipv6)
17397 		ipoptlen = ip6_optlen(tp->t_inpcb);
17398 	else
17399 #endif
17400 		if (tp->t_inpcb->inp_options)
17401 			ipoptlen = tp->t_inpcb->inp_options->m_len -
17402 				offsetof(struct ipoption, ipopt_list);
17403 		else
17404 			ipoptlen = 0;
17405 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17406 	ipoptlen += ipsec_optlen;
17407 #endif
17408 
17409 	/*
17410 	 * Adjust data length if insertion of options will bump the packet
17411 	 * length beyond the t_maxseg length. Clear the FIN bit because we
17412 	 * cut off the tail of the segment.
17413 	 */
17414 	if (len + optlen + ipoptlen > tp->t_maxseg) {
17415 		if (tso) {
17416 			uint32_t if_hw_tsomax;
17417 			uint32_t moff;
17418 			int32_t max_len;
17419 
17420 			/* extract TSO information */
17421 			if_hw_tsomax = tp->t_tsomax;
17422 			if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
17423 			if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
17424 			KASSERT(ipoptlen == 0,
17425 				("%s: TSO can't do IP options", __func__));
17426 
17427 			/*
17428 			 * Check if we should limit by maximum payload
17429 			 * length:
17430 			 */
17431 			if (if_hw_tsomax != 0) {
17432 				/* compute maximum TSO length */
17433 				max_len = (if_hw_tsomax - hdrlen -
17434 					   max_linkhdr);
17435 				if (max_len <= 0) {
17436 					len = 0;
17437 				} else if (len > max_len) {
17438 					sendalot = 1;
17439 					len = max_len;
17440 					mark = 2;
17441 				}
17442 			}
17443 			/*
17444 			 * Prevent the last segment from being fractional
17445 			 * unless the send sockbuf can be emptied:
17446 			 */
17447 			max_len = (tp->t_maxseg - optlen);
17448 			if ((sb_offset + len) < sbavail(sb)) {
17449 				moff = len % (u_int)max_len;
17450 				if (moff != 0) {
17451 					mark = 3;
17452 					len -= moff;
17453 				}
17454 			}
17455 			/*
17456 			 * In case there are too many small fragments don't
17457 			 * use TSO:
17458 			 */
17459 			if (len <= segsiz) {
17460 				mark = 4;
17461 				tso = 0;
17462 			}
17463 			/*
17464 			 * Send the FIN in a separate segment after the bulk
17465 			 * sending is done. We don't trust the TSO
17466 			 * implementations to clear the FIN flag on all but
17467 			 * the last segment.
17468 			 */
17469 			if (tp->t_flags & TF_NEEDFIN) {
17470 				sendalot = 4;
17471 			}
17472 		} else {
17473 			mark = 5;
17474 			if (optlen + ipoptlen >= tp->t_maxseg) {
17475 				/*
17476 				 * Since we don't have enough space to put
17477 				 * the IP header chain and the TCP header in
17478 				 * one packet as required by RFC 7112, don't
17479 				 * send it. Also ensure that at least one
17480 				 * byte of the payload can be put into the
17481 				 * TCP segment.
17482 				 */
17483 				SOCKBUF_UNLOCK(&so->so_snd);
17484 				error = EMSGSIZE;
17485 				sack_rxmit = 0;
17486 				goto out;
17487 			}
17488 			len = tp->t_maxseg - optlen - ipoptlen;
17489 			sendalot = 5;
17490 		}
17491 	} else {
17492 		tso = 0;
17493 		mark = 6;
17494 	}
17495 	KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
17496 		("%s: len > IP_MAXPACKET", __func__));
17497 #ifdef DIAGNOSTIC
17498 #ifdef INET6
17499 	if (max_linkhdr + hdrlen > MCLBYTES)
17500 #else
17501 		if (max_linkhdr + hdrlen > MHLEN)
17502 #endif
17503 			panic("tcphdr too big");
17504 #endif
17505 
17506 	/*
17507 	 * This KASSERT is here to catch edge cases at a well defined place.
17508 	 * Before, those had triggered (random) panic conditions further
17509 	 * down.
17510 	 */
17511 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
17512 	if ((len == 0) &&
17513 	    (flags & TH_FIN) &&
17514 	    (sbused(sb))) {
17515 		/*
17516 		 * We have outstanding data, don't send a fin by itself!.
17517 		 */
17518 		goto just_return;
17519 	}
17520 	/*
17521 	 * Grab a header mbuf, attaching a copy of data to be transmitted,
17522 	 * and initialize the header from the template for sends on this
17523 	 * connection.
17524 	 */
17525 	hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0;
17526 	if (len) {
17527 		uint32_t max_val;
17528 		uint32_t moff;
17529 
17530 		if (rack->r_ctl.rc_pace_max_segs)
17531 			max_val = rack->r_ctl.rc_pace_max_segs;
17532 		else if (rack->rc_user_set_max_segs)
17533 			max_val = rack->rc_user_set_max_segs * segsiz;
17534 		else
17535 			max_val = len;
17536 		/*
17537 		 * We allow a limit on sending with hptsi.
17538 		 */
17539 		if (len > max_val) {
17540 			mark = 7;
17541 			len = max_val;
17542 		}
17543 #ifdef INET6
17544 		if (MHLEN < hdrlen + max_linkhdr)
17545 			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
17546 		else
17547 #endif
17548 			m = m_gethdr(M_NOWAIT, MT_DATA);
17549 
17550 		if (m == NULL) {
17551 			SOCKBUF_UNLOCK(sb);
17552 			error = ENOBUFS;
17553 			sack_rxmit = 0;
17554 			goto out;
17555 		}
17556 		m->m_data += max_linkhdr;
17557 		m->m_len = hdrlen;
17558 
17559 		/*
17560 		 * Start the m_copy functions from the closest mbuf to the
17561 		 * sb_offset in the socket buffer chain.
17562 		 */
17563 		mb = sbsndptr_noadv(sb, sb_offset, &moff);
17564 		s_mb = mb;
17565 		s_moff = moff;
17566 		if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
17567 			m_copydata(mb, moff, (int)len,
17568 				   mtod(m, caddr_t)+hdrlen);
17569 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17570 				sbsndptr_adv(sb, mb, len);
17571 			m->m_len += len;
17572 		} else {
17573 			struct sockbuf *msb;
17574 
17575 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17576 				msb = NULL;
17577 			else
17578 				msb = sb;
17579 			m->m_next = tcp_m_copym(
17580 				mb, moff, &len,
17581 				if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
17582 				((rsm == NULL) ? hw_tls : 0)
17583 #ifdef NETFLIX_COPY_ARGS
17584 				, &filled_all
17585 #endif
17586 				);
17587 			if (len <= (tp->t_maxseg - optlen)) {
17588 				/*
17589 				 * Must have ran out of mbufs for the copy
17590 				 * shorten it to no longer need tso. Lets
17591 				 * not put on sendalot since we are low on
17592 				 * mbufs.
17593 				 */
17594 				tso = 0;
17595 			}
17596 			if (m->m_next == NULL) {
17597 				SOCKBUF_UNLOCK(sb);
17598 				(void)m_free(m);
17599 				error = ENOBUFS;
17600 				sack_rxmit = 0;
17601 				goto out;
17602 			}
17603 		}
17604 		if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
17605 			if (rsm && (rsm->r_flags & RACK_TLP)) {
17606 				/*
17607 				 * TLP should not count in retran count, but
17608 				 * in its own bin
17609 				 */
17610 				counter_u64_add(rack_tlp_retran, 1);
17611 				counter_u64_add(rack_tlp_retran_bytes, len);
17612 			} else {
17613 				tp->t_sndrexmitpack++;
17614 				KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
17615 				KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
17616 			}
17617 #ifdef STATS
17618 			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
17619 						 len);
17620 #endif
17621 		} else {
17622 			KMOD_TCPSTAT_INC(tcps_sndpack);
17623 			KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
17624 #ifdef STATS
17625 			stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
17626 						 len);
17627 #endif
17628 		}
17629 		/*
17630 		 * If we're sending everything we've got, set PUSH. (This
17631 		 * will keep happy those implementations which only give
17632 		 * data to the user when a buffer fills or a PUSH comes in.)
17633 		 */
17634 		if (sb_offset + len == sbused(sb) &&
17635 		    sbused(sb) &&
17636 		    !(flags & TH_SYN)) {
17637 			flags |= TH_PUSH;
17638 			add_flag |= RACK_HAD_PUSH;
17639 		}
17640 
17641 		SOCKBUF_UNLOCK(sb);
17642 	} else {
17643 		SOCKBUF_UNLOCK(sb);
17644 		if (tp->t_flags & TF_ACKNOW)
17645 			KMOD_TCPSTAT_INC(tcps_sndacks);
17646 		else if (flags & (TH_SYN | TH_FIN | TH_RST))
17647 			KMOD_TCPSTAT_INC(tcps_sndctrl);
17648 		else
17649 			KMOD_TCPSTAT_INC(tcps_sndwinup);
17650 
17651 		m = m_gethdr(M_NOWAIT, MT_DATA);
17652 		if (m == NULL) {
17653 			error = ENOBUFS;
17654 			sack_rxmit = 0;
17655 			goto out;
17656 		}
17657 #ifdef INET6
17658 		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
17659 		    MHLEN >= hdrlen) {
17660 			M_ALIGN(m, hdrlen);
17661 		} else
17662 #endif
17663 			m->m_data += max_linkhdr;
17664 		m->m_len = hdrlen;
17665 	}
17666 	SOCKBUF_UNLOCK_ASSERT(sb);
17667 	m->m_pkthdr.rcvif = (struct ifnet *)0;
17668 #ifdef MAC
17669 	mac_inpcb_create_mbuf(inp, m);
17670 #endif
17671 	if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) &&  rack->r_fsb_inited) {
17672 #ifdef INET6
17673 		if (isipv6)
17674 			ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
17675 		else
17676 #endif				/* INET6 */
17677 			ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
17678 		th = rack->r_ctl.fsb.th;
17679 		udp = rack->r_ctl.fsb.udp;
17680 		if (udp) {
17681 			if (isipv6)
17682 				ulen = hdrlen + len - sizeof(struct ip6_hdr);
17683 			else
17684 				ulen = hdrlen + len - sizeof(struct ip);
17685 			udp->uh_ulen = htons(ulen);
17686 		}
17687 	} else {
17688 #ifdef INET6
17689 		if (isipv6) {
17690 			ip6 = mtod(m, struct ip6_hdr *);
17691 			if (tp->t_port) {
17692 				udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
17693 				udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17694 				udp->uh_dport = tp->t_port;
17695 				ulen = hdrlen + len - sizeof(struct ip6_hdr);
17696 				udp->uh_ulen = htons(ulen);
17697 				th = (struct tcphdr *)(udp + 1);
17698 			} else
17699 				th = (struct tcphdr *)(ip6 + 1);
17700 			tcpip_fillheaders(inp, tp->t_port, ip6, th);
17701 		} else
17702 #endif				/* INET6 */
17703 		{
17704 			ip = mtod(m, struct ip *);
17705 #ifdef TCPDEBUG
17706 			ipov = (struct ipovly *)ip;
17707 #endif
17708 			if (tp->t_port) {
17709 				udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
17710 				udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17711 				udp->uh_dport = tp->t_port;
17712 				ulen = hdrlen + len - sizeof(struct ip);
17713 				udp->uh_ulen = htons(ulen);
17714 				th = (struct tcphdr *)(udp + 1);
17715 			} else
17716 				th = (struct tcphdr *)(ip + 1);
17717 			tcpip_fillheaders(inp, tp->t_port, ip, th);
17718 		}
17719 	}
17720 	/*
17721 	 * Fill in fields, remembering maximum advertised window for use in
17722 	 * delaying messages about window sizes. If resending a FIN, be sure
17723 	 * not to use a new sequence number.
17724 	 */
17725 	if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
17726 	    tp->snd_nxt == tp->snd_max)
17727 		tp->snd_nxt--;
17728 	/*
17729 	 * If we are starting a connection, send ECN setup SYN packet. If we
17730 	 * are on a retransmit, we may resend those bits a number of times
17731 	 * as per RFC 3168.
17732 	 */
17733 	if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
17734 		if (tp->t_rxtshift >= 1) {
17735 			if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
17736 				flags |= TH_ECE | TH_CWR;
17737 		} else
17738 			flags |= TH_ECE | TH_CWR;
17739 	}
17740 	/* Handle parallel SYN for ECN */
17741 	if ((tp->t_state == TCPS_SYN_RECEIVED) &&
17742 	    (tp->t_flags2 & TF2_ECN_SND_ECE)) {
17743 		flags |= TH_ECE;
17744 		tp->t_flags2 &= ~TF2_ECN_SND_ECE;
17745 	}
17746 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17747 	    (tp->t_flags2 & TF2_ECN_PERMIT)) {
17748 		/*
17749 		 * If the peer has ECN, mark data packets with ECN capable
17750 		 * transmission (ECT). Ignore pure ack packets,
17751 		 * retransmissions.
17752 		 */
17753 		if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
17754 		    (sack_rxmit == 0)) {
17755 #ifdef INET6
17756 			if (isipv6)
17757 				ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
17758 			else
17759 #endif
17760 				ip->ip_tos |= IPTOS_ECN_ECT0;
17761 			KMOD_TCPSTAT_INC(tcps_ecn_ect0);
17762 			/*
17763 			 * Reply with proper ECN notifications.
17764 			 * Only set CWR on new data segments.
17765 			 */
17766 			if (tp->t_flags2 & TF2_ECN_SND_CWR) {
17767 				flags |= TH_CWR;
17768 				tp->t_flags2 &= ~TF2_ECN_SND_CWR;
17769 			}
17770 		}
17771 		if (tp->t_flags2 & TF2_ECN_SND_ECE)
17772 			flags |= TH_ECE;
17773 	}
17774 	/*
17775 	 * If we are doing retransmissions, then snd_nxt will not reflect
17776 	 * the first unsent octet.  For ACK only packets, we do not want the
17777 	 * sequence number of the retransmitted packet, we want the sequence
17778 	 * number of the next unsent octet.  So, if there is no data (and no
17779 	 * SYN or FIN), use snd_max instead of snd_nxt when filling in
17780 	 * ti_seq.  But if we are in persist state, snd_max might reflect
17781 	 * one byte beyond the right edge of the window, so use snd_nxt in
17782 	 * that case, since we know we aren't doing a retransmission.
17783 	 * (retransmit and persist are mutually exclusive...)
17784 	 */
17785 	if (sack_rxmit == 0) {
17786 		if (len || (flags & (TH_SYN | TH_FIN))) {
17787 			th->th_seq = htonl(tp->snd_nxt);
17788 			rack_seq = tp->snd_nxt;
17789 		} else {
17790 			th->th_seq = htonl(tp->snd_max);
17791 			rack_seq = tp->snd_max;
17792 		}
17793 	} else {
17794 		th->th_seq = htonl(rsm->r_start);
17795 		rack_seq = rsm->r_start;
17796 	}
17797 	th->th_ack = htonl(tp->rcv_nxt);
17798 	th->th_flags = flags;
17799 	/*
17800 	 * Calculate receive window.  Don't shrink window, but avoid silly
17801 	 * window syndrome.
17802 	 * If a RST segment is sent, advertise a window of zero.
17803 	 */
17804 	if (flags & TH_RST) {
17805 		recwin = 0;
17806 	} else {
17807 		if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
17808 		    recwin < (long)segsiz) {
17809 			recwin = 0;
17810 		}
17811 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
17812 		    recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
17813 			recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
17814 	}
17815 
17816 	/*
17817 	 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
17818 	 * <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK> case is
17819 	 * handled in syncache.
17820 	 */
17821 	if (flags & TH_SYN)
17822 		th->th_win = htons((u_short)
17823 				   (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
17824 	else {
17825 		/* Avoid shrinking window with window scaling. */
17826 		recwin = roundup2(recwin, 1 << tp->rcv_scale);
17827 		th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
17828 	}
17829 	/*
17830 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
17831 	 * window.  This may cause the remote transmitter to stall.  This
17832 	 * flag tells soreceive() to disable delayed acknowledgements when
17833 	 * draining the buffer.  This can occur if the receiver is
17834 	 * attempting to read more data than can be buffered prior to
17835 	 * transmitting on the connection.
17836 	 */
17837 	if (th->th_win == 0) {
17838 		tp->t_sndzerowin++;
17839 		tp->t_flags |= TF_RXWIN0SENT;
17840 	} else
17841 		tp->t_flags &= ~TF_RXWIN0SENT;
17842 	tp->snd_up = tp->snd_una;	/* drag it along, its deprecated */
17843 	/* Now are we using fsb?, if so copy the template data to the mbuf */
17844 	if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
17845 		uint8_t *cpto;
17846 
17847 		cpto = mtod(m, uint8_t *);
17848 		memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
17849 		/*
17850 		 * We have just copied in:
17851 		 * IP/IP6
17852 		 * <optional udphdr>
17853 		 * tcphdr (no options)
17854 		 *
17855 		 * We need to grab the correct pointers into the mbuf
17856 		 * for both the tcp header, and possibly the udp header (if tunneling).
17857 		 * We do this by using the offset in the copy buffer and adding it
17858 		 * to the mbuf base pointer (cpto).
17859 		 */
17860 #ifdef INET6
17861 		if (isipv6)
17862 			ip6 = mtod(m, struct ip6_hdr *);
17863 		else
17864 #endif				/* INET6 */
17865 			ip = mtod(m, struct ip *);
17866 		th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
17867 		/* If we have a udp header lets set it into the mbuf as well */
17868 		if (udp)
17869 			udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
17870 	}
17871 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17872 	if (to.to_flags & TOF_SIGNATURE) {
17873 		/*
17874 		 * Calculate MD5 signature and put it into the place
17875 		 * determined before.
17876 		 * NOTE: since TCP options buffer doesn't point into
17877 		 * mbuf's data, calculate offset and use it.
17878 		 */
17879 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
17880 						       (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
17881 			/*
17882 			 * Do not send segment if the calculation of MD5
17883 			 * digest has failed.
17884 			 */
17885 			goto out;
17886 		}
17887 	}
17888 #endif
17889 	if (optlen) {
17890 		bcopy(opt, th + 1, optlen);
17891 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
17892 	}
17893 	/*
17894 	 * Put TCP length in extended header, and then checksum extended
17895 	 * header and data.
17896 	 */
17897 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
17898 #ifdef INET6
17899 	if (isipv6) {
17900 		/*
17901 		 * ip6_plen is not need to be filled now, and will be filled
17902 		 * in ip6_output.
17903 		 */
17904 		if (tp->t_port) {
17905 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
17906 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17907 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
17908 			th->th_sum = htons(0);
17909 			UDPSTAT_INC(udps_opackets);
17910 		} else {
17911 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
17912 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17913 			th->th_sum = in6_cksum_pseudo(ip6,
17914 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
17915 						      0);
17916 		}
17917 	}
17918 #endif
17919 #if defined(INET6) && defined(INET)
17920 	else
17921 #endif
17922 #ifdef INET
17923 	{
17924 		if (tp->t_port) {
17925 			m->m_pkthdr.csum_flags = CSUM_UDP;
17926 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17927 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
17928 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
17929 			th->th_sum = htons(0);
17930 			UDPSTAT_INC(udps_opackets);
17931 		} else {
17932 			m->m_pkthdr.csum_flags = CSUM_TCP;
17933 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17934 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
17935 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
17936 									IPPROTO_TCP + len + optlen));
17937 		}
17938 		/* IP version must be set here for ipv4/ipv6 checking later */
17939 		KASSERT(ip->ip_v == IPVERSION,
17940 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
17941 	}
17942 #endif
17943 	/*
17944 	 * Enable TSO and specify the size of the segments. The TCP pseudo
17945 	 * header checksum is always provided. XXX: Fixme: This is currently
17946 	 * not the case for IPv6.
17947 	 */
17948 	if (tso) {
17949 		KASSERT(len > tp->t_maxseg - optlen,
17950 			("%s: len <= tso_segsz", __func__));
17951 		m->m_pkthdr.csum_flags |= CSUM_TSO;
17952 		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
17953 	}
17954 	KASSERT(len + hdrlen == m_length(m, NULL),
17955 		("%s: mbuf chain different than expected: %d + %u != %u",
17956 		 __func__, len, hdrlen, m_length(m, NULL)));
17957 
17958 #ifdef TCP_HHOOK
17959 	/* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
17960 	hhook_run_tcp_est_out(tp, th, &to, len, tso);
17961 #endif
17962 	/* We're getting ready to send; log now. */
17963 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
17964 		union tcp_log_stackspecific log;
17965 
17966 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
17967 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
17968 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
17969 		if (rack->rack_no_prr)
17970 			log.u_bbr.flex1 = 0;
17971 		else
17972 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
17973 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
17974 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
17975 		log.u_bbr.flex4 = orig_len;
17976 		if (filled_all)
17977 			log.u_bbr.flex5 = 0x80000000;
17978 		else
17979 			log.u_bbr.flex5 = 0;
17980 		/* Save off the early/late values */
17981 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
17982 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
17983 		log.u_bbr.bw_inuse = rack_get_bw(rack);
17984 		if (rsm || sack_rxmit) {
17985 			if (doing_tlp)
17986 				log.u_bbr.flex8 = 2;
17987 			else
17988 				log.u_bbr.flex8 = 1;
17989 		} else {
17990 			log.u_bbr.flex8 = 0;
17991 		}
17992 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
17993 		log.u_bbr.flex7 = mark;
17994 		log.u_bbr.flex7 <<= 8;
17995 		log.u_bbr.flex7 |= pass;
17996 		log.u_bbr.pkts_out = tp->t_maxseg;
17997 		log.u_bbr.timeStamp = cts;
17998 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
17999 		log.u_bbr.lt_epoch = cwnd_to_use;
18000 		log.u_bbr.delivered = sendalot;
18001 		lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
18002 				     len, &log, false, NULL, NULL, 0, &tv);
18003 	} else
18004 		lgb = NULL;
18005 
18006 	/*
18007 	 * Fill in IP length and desired time to live and send to IP level.
18008 	 * There should be a better way to handle ttl and tos; we could keep
18009 	 * them in the template, but need a way to checksum without them.
18010 	 */
18011 	/*
18012 	 * m->m_pkthdr.len should have been set before cksum calcuration,
18013 	 * because in6_cksum() need it.
18014 	 */
18015 #ifdef INET6
18016 	if (isipv6) {
18017 		/*
18018 		 * we separately set hoplimit for every segment, since the
18019 		 * user might want to change the value via setsockopt. Also,
18020 		 * desired default hop limit might be changed via Neighbor
18021 		 * Discovery.
18022 		 */
18023 		rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
18024 
18025 		/*
18026 		 * Set the packet size here for the benefit of DTrace
18027 		 * probes. ip6_output() will set it properly; it's supposed
18028 		 * to include the option header lengths as well.
18029 		 */
18030 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18031 
18032 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18033 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18034 		else
18035 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18036 
18037 		if (tp->t_state == TCPS_SYN_SENT)
18038 			TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
18039 
18040 		TCP_PROBE5(send, NULL, tp, ip6, tp, th);
18041 		/* TODO: IPv6 IP6TOS_ECT bit on */
18042 		error = ip6_output(m,
18043 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18044 				   inp->in6p_outputopts,
18045 #else
18046 				   NULL,
18047 #endif
18048 				   &inp->inp_route6,
18049 				   ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
18050 				   NULL, NULL, inp);
18051 
18052 		if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
18053 			mtu = inp->inp_route6.ro_nh->nh_mtu;
18054 	}
18055 #endif				/* INET6 */
18056 #if defined(INET) && defined(INET6)
18057 	else
18058 #endif
18059 #ifdef INET
18060 	{
18061 		ip->ip_len = htons(m->m_pkthdr.len);
18062 #ifdef INET6
18063 		if (inp->inp_vflag & INP_IPV6PROTO)
18064 			ip->ip_ttl = in6_selecthlim(inp, NULL);
18065 #endif				/* INET6 */
18066 		rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
18067 		/*
18068 		 * If we do path MTU discovery, then we set DF on every
18069 		 * packet. This might not be the best thing to do according
18070 		 * to RFC3390 Section 2. However the tcp hostcache migitates
18071 		 * the problem so it affects only the first tcp connection
18072 		 * with a host.
18073 		 *
18074 		 * NB: Don't set DF on small MTU/MSS to have a safe
18075 		 * fallback.
18076 		 */
18077 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18078 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18079 			if (tp->t_port == 0 || len < V_tcp_minmss) {
18080 				ip->ip_off |= htons(IP_DF);
18081 			}
18082 		} else {
18083 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18084 		}
18085 
18086 		if (tp->t_state == TCPS_SYN_SENT)
18087 			TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
18088 
18089 		TCP_PROBE5(send, NULL, tp, ip, tp, th);
18090 
18091 		error = ip_output(m,
18092 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18093 				  inp->inp_options,
18094 #else
18095 				  NULL,
18096 #endif
18097 				  &inp->inp_route,
18098 				  ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
18099 				  inp);
18100 		if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
18101 			mtu = inp->inp_route.ro_nh->nh_mtu;
18102 	}
18103 #endif				/* INET */
18104 
18105 out:
18106 	if (lgb) {
18107 		lgb->tlb_errno = error;
18108 		lgb = NULL;
18109 	}
18110 	/*
18111 	 * In transmit state, time the transmission and arrange for the
18112 	 * retransmit.  In persist state, just set snd_max.
18113 	 */
18114 	if (error == 0) {
18115 		rack->forced_ack = 0;	/* If we send something zap the FA flag */
18116 		if (rsm && (doing_tlp == 0)) {
18117 			/* Set we retransmitted */
18118 			rack->rc_gp_saw_rec = 1;
18119 		} else {
18120 			if (cwnd_to_use > tp->snd_ssthresh) {
18121 				/* Set we sent in CA */
18122 				rack->rc_gp_saw_ca = 1;
18123 			} else {
18124 				/* Set we sent in SS */
18125 				rack->rc_gp_saw_ss = 1;
18126 			}
18127 		}
18128 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18129 		    (tp->t_flags & TF_SACK_PERMIT) &&
18130 		    tp->rcv_numsacks > 0)
18131 			tcp_clean_dsack_blocks(tp);
18132 		tot_len_this_send += len;
18133 		if (len == 0)
18134 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
18135 		else if (len == 1) {
18136 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
18137 		} else if (len > 1) {
18138 			int idx;
18139 
18140 			idx = (len / segsiz) + 3;
18141 			if (idx >= TCP_MSS_ACCT_ATIMER)
18142 				counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18143 			else
18144 				counter_u64_add(rack_out_size[idx], 1);
18145 		}
18146 	}
18147 	if ((rack->rack_no_prr == 0) &&
18148 	    sub_from_prr &&
18149 	    (error == 0)) {
18150 		if (rack->r_ctl.rc_prr_sndcnt >= len)
18151 			rack->r_ctl.rc_prr_sndcnt -= len;
18152 		else
18153 			rack->r_ctl.rc_prr_sndcnt = 0;
18154 	}
18155 	sub_from_prr = 0;
18156 	if (doing_tlp && (rsm == NULL)) {
18157 		/* New send doing a TLP */
18158 		add_flag |= RACK_TLP;
18159 		tp->t_sndtlppack++;
18160 		tp->t_sndtlpbyte += len;
18161 	}
18162 	rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
18163 			rack_to_usec_ts(&tv),
18164 			rsm, add_flag, s_mb, s_moff);
18165 
18166 
18167 	if ((error == 0) &&
18168 	    (len > 0) &&
18169 	    (tp->snd_una == tp->snd_max))
18170 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
18171 	{
18172 		tcp_seq startseq = tp->snd_nxt;
18173 
18174 		/* Track our lost count */
18175 		if (rsm && (doing_tlp == 0))
18176 			rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
18177 		/*
18178 		 * Advance snd_nxt over sequence space of this segment.
18179 		 */
18180 		if (error)
18181 			/* We don't log or do anything with errors */
18182 			goto nomore;
18183 		if (doing_tlp == 0) {
18184 			if (rsm == NULL) {
18185 				/*
18186 				 * Not a retransmission of some
18187 				 * sort, new data is going out so
18188 				 * clear our TLP count and flag.
18189 				 */
18190 				rack->rc_tlp_in_progress = 0;
18191 				rack->r_ctl.rc_tlp_cnt_out = 0;
18192 			}
18193 		} else {
18194 			/*
18195 			 * We have just sent a TLP, mark that it is true
18196 			 * and make sure our in progress is set so we
18197 			 * continue to check the count.
18198 			 */
18199 			rack->rc_tlp_in_progress = 1;
18200 			rack->r_ctl.rc_tlp_cnt_out++;
18201 		}
18202 		if (flags & (TH_SYN | TH_FIN)) {
18203 			if (flags & TH_SYN)
18204 				tp->snd_nxt++;
18205 			if (flags & TH_FIN) {
18206 				tp->snd_nxt++;
18207 				tp->t_flags |= TF_SENTFIN;
18208 			}
18209 		}
18210 		/* In the ENOBUFS case we do *not* update snd_max */
18211 		if (sack_rxmit)
18212 			goto nomore;
18213 
18214 		tp->snd_nxt += len;
18215 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
18216 			if (tp->snd_una == tp->snd_max) {
18217 				/*
18218 				 * Update the time we just added data since
18219 				 * none was outstanding.
18220 				 */
18221 				rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
18222 				tp->t_acktime = ticks;
18223 			}
18224 			tp->snd_max = tp->snd_nxt;
18225 			/*
18226 			 * Time this transmission if not a retransmission and
18227 			 * not currently timing anything.
18228 			 * This is only relevant in case of switching back to
18229 			 * the base stack.
18230 			 */
18231 			if (tp->t_rtttime == 0) {
18232 				tp->t_rtttime = ticks;
18233 				tp->t_rtseq = startseq;
18234 				KMOD_TCPSTAT_INC(tcps_segstimed);
18235 			}
18236 			if (len &&
18237 			    ((tp->t_flags & TF_GPUTINPROG) == 0))
18238 				rack_start_gp_measurement(tp, rack, startseq, sb_offset);
18239 		}
18240 		/*
18241 		 * If we are doing FO we need to update the mbuf position and subtract
18242 		 * this happens when the peer sends us duplicate information and
18243 		 * we thus want to send a DSACK.
18244 		 *
18245 		 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
18246 		 * turned off? If not then we are going to echo multiple DSACK blocks
18247 		 * out (with the TSO), which we should not be doing.
18248 		 */
18249 		if (rack->r_fast_output && len) {
18250 			if (rack->r_ctl.fsb.left_to_send > len)
18251 				rack->r_ctl.fsb.left_to_send -= len;
18252 			else
18253 				rack->r_ctl.fsb.left_to_send = 0;
18254 			if (rack->r_ctl.fsb.left_to_send < segsiz)
18255 				rack->r_fast_output = 0;
18256 			if (rack->r_fast_output) {
18257 				rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18258 				rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18259 			}
18260 		}
18261 	}
18262 nomore:
18263 	if (error) {
18264 		rack->r_ctl.rc_agg_delayed = 0;
18265 		rack->r_early = 0;
18266 		rack->r_late = 0;
18267 		rack->r_ctl.rc_agg_early = 0;
18268 		SOCKBUF_UNLOCK_ASSERT(sb);	/* Check gotos. */
18269 		/*
18270 		 * Failures do not advance the seq counter above. For the
18271 		 * case of ENOBUFS we will fall out and retry in 1ms with
18272 		 * the hpts. Everything else will just have to retransmit
18273 		 * with the timer.
18274 		 *
18275 		 * In any case, we do not want to loop around for another
18276 		 * send without a good reason.
18277 		 */
18278 		sendalot = 0;
18279 		switch (error) {
18280 		case EPERM:
18281 			tp->t_softerror = error;
18282 #ifdef TCP_ACCOUNTING
18283 			crtsc = get_cyclecount();
18284 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18285 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18286 			}
18287 			counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18288 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18289 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18290 			}
18291 			counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18292 			sched_unpin();
18293 #endif
18294 			return (error);
18295 		case ENOBUFS:
18296 			/*
18297 			 * Pace us right away to retry in a some
18298 			 * time
18299 			 */
18300 			slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18301 			if (rack->rc_enobuf < 0x7f)
18302 				rack->rc_enobuf++;
18303 			if (slot < (10 * HPTS_USEC_IN_MSEC))
18304 				slot = 10 * HPTS_USEC_IN_MSEC;
18305 			if (rack->r_ctl.crte != NULL) {
18306 				counter_u64_add(rack_saw_enobuf_hw, 1);
18307 				tcp_rl_log_enobuf(rack->r_ctl.crte);
18308 			}
18309 			counter_u64_add(rack_saw_enobuf, 1);
18310 			goto enobufs;
18311 		case EMSGSIZE:
18312 			/*
18313 			 * For some reason the interface we used initially
18314 			 * to send segments changed to another or lowered
18315 			 * its MTU. If TSO was active we either got an
18316 			 * interface without TSO capabilits or TSO was
18317 			 * turned off. If we obtained mtu from ip_output()
18318 			 * then update it and try again.
18319 			 */
18320 			if (tso)
18321 				tp->t_flags &= ~TF_TSO;
18322 			if (mtu != 0) {
18323 				tcp_mss_update(tp, -1, mtu, NULL, NULL);
18324 				goto again;
18325 			}
18326 			slot = 10 * HPTS_USEC_IN_MSEC;
18327 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18328 #ifdef TCP_ACCOUNTING
18329 			crtsc = get_cyclecount();
18330 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18331 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18332 			}
18333 			counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18334 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18335 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18336 			}
18337 			counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18338 			sched_unpin();
18339 #endif
18340 			return (error);
18341 		case ENETUNREACH:
18342 			counter_u64_add(rack_saw_enetunreach, 1);
18343 		case EHOSTDOWN:
18344 		case EHOSTUNREACH:
18345 		case ENETDOWN:
18346 			if (TCPS_HAVERCVDSYN(tp->t_state)) {
18347 				tp->t_softerror = error;
18348 			}
18349 			/* FALLTHROUGH */
18350 		default:
18351 			slot = 10 * HPTS_USEC_IN_MSEC;
18352 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18353 #ifdef TCP_ACCOUNTING
18354 			crtsc = get_cyclecount();
18355 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18356 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18357 			}
18358 			counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18359 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18360 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18361 			}
18362 			counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18363 			sched_unpin();
18364 #endif
18365 			return (error);
18366 		}
18367 	} else {
18368 		rack->rc_enobuf = 0;
18369 		if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18370 			rack->r_ctl.retran_during_recovery += len;
18371 	}
18372 	KMOD_TCPSTAT_INC(tcps_sndtotal);
18373 
18374 	/*
18375 	 * Data sent (as far as we can tell). If this advertises a larger
18376 	 * window than any other segment, then remember the size of the
18377 	 * advertised window. Any pending ACK has now been sent.
18378 	 */
18379 	if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
18380 		tp->rcv_adv = tp->rcv_nxt + recwin;
18381 
18382 	tp->last_ack_sent = tp->rcv_nxt;
18383 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18384 enobufs:
18385 	if (sendalot) {
18386 		/* Do we need to turn off sendalot? */
18387 		if (rack->r_ctl.rc_pace_max_segs &&
18388 		    (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
18389 			/* We hit our max. */
18390 			sendalot = 0;
18391 		} else if ((rack->rc_user_set_max_segs) &&
18392 			   (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
18393 			/* We hit the user defined max */
18394 			sendalot = 0;
18395 		}
18396 	}
18397 	if ((error == 0) && (flags & TH_FIN))
18398 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
18399 	if (flags & TH_RST) {
18400 		/*
18401 		 * We don't send again after sending a RST.
18402 		 */
18403 		slot = 0;
18404 		sendalot = 0;
18405 		if (error == 0)
18406 			tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
18407 	} else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
18408 		/*
18409 		 * Get our pacing rate, if an error
18410 		 * occurred in sending (ENOBUF) we would
18411 		 * hit the else if with slot preset. Other
18412 		 * errors return.
18413 		 */
18414 		slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
18415 	}
18416 	if (rsm &&
18417 	    (rsm->r_flags & RACK_HAS_SYN) == 0 &&
18418 	    rack->use_rack_rr) {
18419 		/* Its a retransmit and we use the rack cheat? */
18420 		if ((slot == 0) ||
18421 		    (rack->rc_always_pace == 0) ||
18422 		    (rack->r_rr_config == 1)) {
18423 			/*
18424 			 * We have no pacing set or we
18425 			 * are using old-style rack or
18426 			 * we are overriden to use the old 1ms pacing.
18427 			 */
18428 			slot = rack->r_ctl.rc_min_to;
18429 		}
18430 	}
18431 	/* We have sent clear the flag */
18432 	rack->r_ent_rec_ns = 0;
18433 	if (rack->r_must_retran) {
18434 		if (rsm) {
18435 			rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
18436 			if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
18437 				/*
18438 				 * We have retransmitted all.
18439 				 */
18440 				rack->r_must_retran = 0;
18441 				rack->r_ctl.rc_out_at_rto = 0;
18442 			}
18443 		} else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18444 			/*
18445 			 * Sending new data will also kill
18446 			 * the loop.
18447 			 */
18448 			rack->r_must_retran = 0;
18449 			rack->r_ctl.rc_out_at_rto = 0;
18450 		}
18451 	}
18452 	rack->r_ctl.fsb.recwin = recwin;
18453 	if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
18454 	    SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18455 		/*
18456 		 * We hit an RTO and now have past snd_max at the RTO
18457 		 * clear all the WAS flags.
18458 		 */
18459 		tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
18460 	}
18461 	if (slot) {
18462 		/* set the rack tcb into the slot N */
18463 		counter_u64_add(rack_paced_segments, 1);
18464 		if ((error == 0) &&
18465 		    rack_use_rfo &&
18466 		    ((flags & (TH_SYN|TH_FIN)) == 0) &&
18467 		    (rsm == NULL) &&
18468 		    (tp->snd_nxt == tp->snd_max) &&
18469 		    (ipoptlen == 0) &&
18470 		    (tp->rcv_numsacks == 0) &&
18471 		    rack->r_fsb_inited &&
18472 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
18473 		    (rack->r_must_retran == 0) &&
18474 		    ((tp->t_flags & TF_NEEDFIN) == 0) &&
18475 		    (len > 0) && (orig_len > 0) &&
18476 		    (orig_len > len) &&
18477 		    ((orig_len - len) >= segsiz) &&
18478 		    ((optlen == 0) ||
18479 		     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18480 			/* We can send at least one more MSS using our fsb */
18481 
18482 			rack->r_fast_output = 1;
18483 			rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18484 			rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18485 			rack->r_ctl.fsb.tcp_flags = flags;
18486 			rack->r_ctl.fsb.left_to_send = orig_len - len;
18487 			KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18488 				("rack:%p left_to_send:%u sbavail:%u out:%u",
18489 				 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18490 				 (tp->snd_max - tp->snd_una)));
18491 			if (rack->r_ctl.fsb.left_to_send < segsiz)
18492 				rack->r_fast_output = 0;
18493 			else {
18494 				if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18495 					rack->r_ctl.fsb.rfo_apply_push = 1;
18496 				else
18497 					rack->r_ctl.fsb.rfo_apply_push = 0;
18498 			}
18499 		} else
18500 			rack->r_fast_output = 0;
18501 		rack_log_fsb(rack, tp, so, flags,
18502 			     ipoptlen, orig_len, len, error,
18503 			     (rsm == NULL), optlen, __LINE__, 2);
18504 	} else if (sendalot) {
18505 		int ret;
18506 
18507 		if (len)
18508 			counter_u64_add(rack_unpaced_segments, 1);
18509 		sack_rxmit = 0;
18510 		if ((error == 0) &&
18511 		    rack_use_rfo &&
18512 		    ((flags & (TH_SYN|TH_FIN)) == 0) &&
18513 		    (rsm == NULL) &&
18514 		    (ipoptlen == 0) &&
18515 		    (tp->rcv_numsacks == 0) &&
18516 		    (tp->snd_nxt == tp->snd_max) &&
18517 		    (rack->r_must_retran == 0) &&
18518 		    rack->r_fsb_inited &&
18519 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
18520 		    ((tp->t_flags & TF_NEEDFIN) == 0) &&
18521 		    (len > 0) && (orig_len > 0) &&
18522 		    (orig_len > len) &&
18523 		    ((orig_len - len) >= segsiz) &&
18524 		    ((optlen == 0) ||
18525 		     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18526 			/* we can use fast_output for more */
18527 
18528 			rack->r_fast_output = 1;
18529 			rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18530 			rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18531 			rack->r_ctl.fsb.tcp_flags = flags;
18532 			rack->r_ctl.fsb.left_to_send = orig_len - len;
18533 			KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18534 				("rack:%p left_to_send:%u sbavail:%u out:%u",
18535 				 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18536 				 (tp->snd_max - tp->snd_una)));
18537 			if (rack->r_ctl.fsb.left_to_send < segsiz) {
18538 				rack->r_fast_output = 0;
18539 			}
18540 			if (rack->r_fast_output) {
18541 				if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18542 					rack->r_ctl.fsb.rfo_apply_push = 1;
18543 				else
18544 					rack->r_ctl.fsb.rfo_apply_push = 0;
18545 				rack_log_fsb(rack, tp, so, flags,
18546 					     ipoptlen, orig_len, len, error,
18547 					     (rsm == NULL), optlen, __LINE__, 3);
18548 				error = 0;
18549 				ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
18550 				if (ret >= 0)
18551 					return (ret);
18552 			        else if (error)
18553 					goto nomore;
18554 
18555 			}
18556 		}
18557 		goto again;
18558 	} else if (len) {
18559 		counter_u64_add(rack_unpaced_segments, 1);
18560 	}
18561 	/* Assure when we leave that snd_nxt will point to top */
18562 	if (SEQ_GT(tp->snd_max, tp->snd_nxt))
18563 		tp->snd_nxt = tp->snd_max;
18564 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
18565 #ifdef TCP_ACCOUNTING
18566 	crtsc = get_cyclecount() - ts_val;
18567 	if (tot_len_this_send) {
18568 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18569 			tp->tcp_cnt_counters[SND_OUT_DATA]++;
18570 		}
18571 		counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
18572 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18573 			tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
18574 		}
18575 		counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc);
18576 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18577 			tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
18578 		}
18579 		counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz));
18580 	} else {
18581 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18582 			tp->tcp_cnt_counters[SND_OUT_ACK]++;
18583 		}
18584 		counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1);
18585 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18586 			tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
18587 		}
18588 		counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc);
18589 	}
18590 	sched_unpin();
18591 #endif
18592 	if (error == ENOBUFS)
18593 		error = 0;
18594 	return (error);
18595 }
18596 
18597 static void
18598 rack_update_seg(struct tcp_rack *rack)
18599 {
18600 	uint32_t orig_val;
18601 
18602 	orig_val = rack->r_ctl.rc_pace_max_segs;
18603 	rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
18604 	if (orig_val != rack->r_ctl.rc_pace_max_segs)
18605 		rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL);
18606 }
18607 
18608 static void
18609 rack_mtu_change(struct tcpcb *tp)
18610 {
18611 	/*
18612 	 * The MSS may have changed
18613 	 */
18614 	struct tcp_rack *rack;
18615 
18616 	rack = (struct tcp_rack *)tp->t_fb_ptr;
18617 	if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
18618 		/*
18619 		 * The MTU has changed we need to resend everything
18620 		 * since all we have sent is lost. We first fix
18621 		 * up the mtu though.
18622 		 */
18623 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
18624 		/* We treat this like a full retransmit timeout without the cwnd adjustment */
18625 		rack_remxt_tmr(tp);
18626 		rack->r_fast_output = 0;
18627 		rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
18628 						rack->r_ctl.rc_sacked);
18629 		rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
18630 		rack->r_must_retran = 1;
18631 
18632 	}
18633 	sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
18634 	/* We don't use snd_nxt to retransmit */
18635 	tp->snd_nxt = tp->snd_max;
18636 }
18637 
18638 static int
18639 rack_set_profile(struct tcp_rack *rack, int prof)
18640 {
18641 	int err = EINVAL;
18642 	if (prof == 1) {
18643 		/* pace_always=1 */
18644 		if (rack->rc_always_pace == 0) {
18645 			if (tcp_can_enable_pacing() == 0)
18646 				return (EBUSY);
18647 		}
18648 		rack->rc_always_pace = 1;
18649 		if (rack->use_fixed_rate || rack->gp_ready)
18650 			rack_set_cc_pacing(rack);
18651 		rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18652 		rack->rack_attempt_hdwr_pace = 0;
18653 		/* cmpack=1 */
18654 		if (rack_use_cmp_acks)
18655 			rack->r_use_cmp_ack = 1;
18656 		if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18657 		    rack->r_use_cmp_ack)
18658 			rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18659 		/* scwnd=1 */
18660 		rack->rack_enable_scwnd = 1;
18661 		/* dynamic=100 */
18662 		rack->rc_gp_dyn_mul = 1;
18663 		/* gp_inc_ca */
18664 		rack->r_ctl.rack_per_of_gp_ca = 100;
18665 		/* rrr_conf=3 */
18666 		rack->r_rr_config = 3;
18667 		/* npush=2 */
18668 		rack->r_ctl.rc_no_push_at_mrtt = 2;
18669 		/* fillcw=1 */
18670 		rack->rc_pace_to_cwnd = 1;
18671 		rack->rc_pace_fill_if_rttin_range = 0;
18672 		rack->rtt_limit_mul = 0;
18673 		/* noprr=1 */
18674 		rack->rack_no_prr = 1;
18675 		/* lscwnd=1 */
18676 		rack->r_limit_scw = 1;
18677 		/* gp_inc_rec */
18678 		rack->r_ctl.rack_per_of_gp_rec = 90;
18679 		err = 0;
18680 
18681 	} else if (prof == 3) {
18682 		/* Same as profile one execept fill_cw becomes 2 (less aggressive set) */
18683 		/* pace_always=1 */
18684 		if (rack->rc_always_pace == 0) {
18685 			if (tcp_can_enable_pacing() == 0)
18686 				return (EBUSY);
18687 		}
18688 		rack->rc_always_pace = 1;
18689 		if (rack->use_fixed_rate || rack->gp_ready)
18690 			rack_set_cc_pacing(rack);
18691 		rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18692 		rack->rack_attempt_hdwr_pace = 0;
18693 		/* cmpack=1 */
18694 		if (rack_use_cmp_acks)
18695 			rack->r_use_cmp_ack = 1;
18696 		if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18697 		    rack->r_use_cmp_ack)
18698 			rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18699 		/* scwnd=1 */
18700 		rack->rack_enable_scwnd = 1;
18701 		/* dynamic=100 */
18702 		rack->rc_gp_dyn_mul = 1;
18703 		/* gp_inc_ca */
18704 		rack->r_ctl.rack_per_of_gp_ca = 100;
18705 		/* rrr_conf=3 */
18706 		rack->r_rr_config = 3;
18707 		/* npush=2 */
18708 		rack->r_ctl.rc_no_push_at_mrtt = 2;
18709 		/* fillcw=2 */
18710 		rack->rc_pace_to_cwnd = 1;
18711 		rack->r_fill_less_agg = 1;
18712 		rack->rc_pace_fill_if_rttin_range = 0;
18713 		rack->rtt_limit_mul = 0;
18714 		/* noprr=1 */
18715 		rack->rack_no_prr = 1;
18716 		/* lscwnd=1 */
18717 		rack->r_limit_scw = 1;
18718 		/* gp_inc_rec */
18719 		rack->r_ctl.rack_per_of_gp_rec = 90;
18720 		err = 0;
18721 
18722 
18723 	} else if (prof == 2) {
18724 		/* cmpack=1 */
18725 		if (rack->rc_always_pace == 0) {
18726 			if (tcp_can_enable_pacing() == 0)
18727 				return (EBUSY);
18728 		}
18729 		rack->rc_always_pace = 1;
18730 		if (rack->use_fixed_rate || rack->gp_ready)
18731 			rack_set_cc_pacing(rack);
18732 		rack->r_use_cmp_ack = 1;
18733 		if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18734 			rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18735 		/* pace_always=1 */
18736 		rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18737 		/* scwnd=1 */
18738 		rack->rack_enable_scwnd = 1;
18739 		/* dynamic=100 */
18740 		rack->rc_gp_dyn_mul = 1;
18741 		rack->r_ctl.rack_per_of_gp_ca = 100;
18742 		/* rrr_conf=3 */
18743 		rack->r_rr_config = 3;
18744 		/* npush=2 */
18745 		rack->r_ctl.rc_no_push_at_mrtt = 2;
18746 		/* fillcw=1 */
18747 		rack->rc_pace_to_cwnd = 1;
18748 		rack->rc_pace_fill_if_rttin_range = 0;
18749 		rack->rtt_limit_mul = 0;
18750 		/* noprr=1 */
18751 		rack->rack_no_prr = 1;
18752 		/* lscwnd=0 */
18753 		rack->r_limit_scw = 0;
18754 		err = 0;
18755 	} else if (prof == 0) {
18756 		/* This changes things back to the default settings */
18757 		err = 0;
18758 		if (rack->rc_always_pace) {
18759 			tcp_decrement_paced_conn();
18760 			rack_undo_cc_pacing(rack);
18761 			rack->rc_always_pace = 0;
18762 		}
18763 		if (rack_pace_every_seg && tcp_can_enable_pacing()) {
18764 			rack->rc_always_pace = 1;
18765 			if (rack->use_fixed_rate || rack->gp_ready)
18766 				rack_set_cc_pacing(rack);
18767 		} else
18768 			rack->rc_always_pace = 0;
18769 		if (rack_use_cmp_acks)
18770 			rack->r_use_cmp_ack = 1;
18771 		else
18772 			rack->r_use_cmp_ack = 0;
18773 		if (rack_disable_prr)
18774 			rack->rack_no_prr = 1;
18775 		else
18776 			rack->rack_no_prr = 0;
18777 		if (rack_gp_no_rec_chg)
18778 			rack->rc_gp_no_rec_chg = 1;
18779 		else
18780 			rack->rc_gp_no_rec_chg = 0;
18781 		if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
18782 			rack->r_mbuf_queue = 1;
18783 			if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18784 				rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18785 			rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18786 		} else {
18787 			rack->r_mbuf_queue = 0;
18788 			rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
18789 		}
18790 		if (rack_enable_shared_cwnd)
18791 			rack->rack_enable_scwnd = 1;
18792 		else
18793 			rack->rack_enable_scwnd = 0;
18794 		if (rack_do_dyn_mul) {
18795 			/* When dynamic adjustment is on CA needs to start at 100% */
18796 			rack->rc_gp_dyn_mul = 1;
18797 			if (rack_do_dyn_mul >= 100)
18798 				rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
18799 		} else {
18800 			rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
18801 			rack->rc_gp_dyn_mul = 0;
18802 		}
18803 		rack->r_rr_config = 0;
18804 		rack->r_ctl.rc_no_push_at_mrtt = 0;
18805 		rack->rc_pace_to_cwnd = 0;
18806 		rack->rc_pace_fill_if_rttin_range = 0;
18807 		rack->rtt_limit_mul = 0;
18808 
18809 		if (rack_enable_hw_pacing)
18810 			rack->rack_hdw_pace_ena = 1;
18811 		else
18812 			rack->rack_hdw_pace_ena = 0;
18813 		if (rack_disable_prr)
18814 			rack->rack_no_prr = 1;
18815 		else
18816 			rack->rack_no_prr = 0;
18817 		if (rack_limits_scwnd)
18818 			rack->r_limit_scw  = 1;
18819 		else
18820 			rack->r_limit_scw  = 0;
18821 		err = 0;
18822 	}
18823 	return (err);
18824 }
18825 
18826 static int
18827 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
18828 {
18829 	struct deferred_opt_list *dol;
18830 
18831 	dol = malloc(sizeof(struct deferred_opt_list),
18832 		     M_TCPFSB, M_NOWAIT|M_ZERO);
18833 	if (dol == NULL) {
18834 		/*
18835 		 * No space yikes -- fail out..
18836 		 */
18837 		return (0);
18838 	}
18839 	dol->optname = sopt_name;
18840 	dol->optval = loptval;
18841 	TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
18842 	return (1);
18843 }
18844 
18845 static int
18846 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
18847 		    uint32_t optval, uint64_t loptval)
18848 {
18849 	struct epoch_tracker et;
18850 	struct sockopt sopt;
18851 	struct cc_newreno_opts opt;
18852 	uint64_t val;
18853 	int error = 0;
18854 	uint16_t ca, ss;
18855 
18856 	switch (sopt_name) {
18857 
18858 	case TCP_RACK_PACING_BETA:
18859 		RACK_OPTS_INC(tcp_rack_beta);
18860 		if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18861 			/* This only works for newreno. */
18862 			error = EINVAL;
18863 			break;
18864 		}
18865 		if (rack->rc_pacing_cc_set) {
18866 			/*
18867 			 * Set them into the real CC module
18868 			 * whats in the rack pcb is the old values
18869 			 * to be used on restoral/
18870 			 */
18871 			sopt.sopt_dir = SOPT_SET;
18872 			opt.name = CC_NEWRENO_BETA;
18873 			opt.val = optval;
18874 			if (CC_ALGO(tp)->ctl_output != NULL)
18875 				error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18876 			else {
18877 				error = ENOENT;
18878 				break;
18879 			}
18880 		} else {
18881 			/*
18882 			 * Not pacing yet so set it into our local
18883 			 * rack pcb storage.
18884 			 */
18885 			rack->r_ctl.rc_saved_beta.beta = optval;
18886 		}
18887 		break;
18888 	case TCP_RACK_TIMER_SLOP:
18889 		RACK_OPTS_INC(tcp_rack_timer_slop);
18890 		rack->r_ctl.timer_slop = optval;
18891 		if (rack->rc_tp->t_srtt) {
18892 			/*
18893 			 * If we have an SRTT lets update t_rxtcur
18894 			 * to have the new slop.
18895 			 */
18896 			RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
18897 					   rack_rto_min, rack_rto_max,
18898 					   rack->r_ctl.timer_slop);
18899 		}
18900 		break;
18901 	case TCP_RACK_PACING_BETA_ECN:
18902 		RACK_OPTS_INC(tcp_rack_beta_ecn);
18903 		if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18904 			/* This only works for newreno. */
18905 			error = EINVAL;
18906 			break;
18907 		}
18908 		if (rack->rc_pacing_cc_set) {
18909 			/*
18910 			 * Set them into the real CC module
18911 			 * whats in the rack pcb is the old values
18912 			 * to be used on restoral/
18913 			 */
18914 			sopt.sopt_dir = SOPT_SET;
18915 			opt.name = CC_NEWRENO_BETA_ECN;
18916 			opt.val = optval;
18917 			if (CC_ALGO(tp)->ctl_output != NULL)
18918 				error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18919 			else
18920 				error = ENOENT;
18921 		} else {
18922 			/*
18923 			 * Not pacing yet so set it into our local
18924 			 * rack pcb storage.
18925 			 */
18926 			rack->r_ctl.rc_saved_beta.beta_ecn = optval;
18927 			rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
18928 		}
18929 		break;
18930 	case TCP_DEFER_OPTIONS:
18931 		RACK_OPTS_INC(tcp_defer_opt);
18932 		if (optval) {
18933 			if (rack->gp_ready) {
18934 				/* Too late */
18935 				error = EINVAL;
18936 				break;
18937 			}
18938 			rack->defer_options = 1;
18939 		} else
18940 			rack->defer_options = 0;
18941 		break;
18942 	case TCP_RACK_MEASURE_CNT:
18943 		RACK_OPTS_INC(tcp_rack_measure_cnt);
18944 		if (optval && (optval <= 0xff)) {
18945 			rack->r_ctl.req_measurements = optval;
18946 		} else
18947 			error = EINVAL;
18948 		break;
18949 	case TCP_REC_ABC_VAL:
18950 		RACK_OPTS_INC(tcp_rec_abc_val);
18951 		if (optval > 0)
18952 			rack->r_use_labc_for_rec = 1;
18953 		else
18954 			rack->r_use_labc_for_rec = 0;
18955 		break;
18956 	case TCP_RACK_ABC_VAL:
18957 		RACK_OPTS_INC(tcp_rack_abc_val);
18958 		if ((optval > 0) && (optval < 255))
18959 			rack->rc_labc = optval;
18960 		else
18961 			error = EINVAL;
18962 		break;
18963 	case TCP_HDWR_UP_ONLY:
18964 		RACK_OPTS_INC(tcp_pacing_up_only);
18965 		if (optval)
18966 			rack->r_up_only = 1;
18967 		else
18968 			rack->r_up_only = 0;
18969 		break;
18970 	case TCP_PACING_RATE_CAP:
18971 		RACK_OPTS_INC(tcp_pacing_rate_cap);
18972 		rack->r_ctl.bw_rate_cap = loptval;
18973 		break;
18974 	case TCP_RACK_PROFILE:
18975 		RACK_OPTS_INC(tcp_profile);
18976 		error = rack_set_profile(rack, optval);
18977 		break;
18978 	case TCP_USE_CMP_ACKS:
18979 		RACK_OPTS_INC(tcp_use_cmp_acks);
18980 		if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
18981 			/* You can't turn it off once its on! */
18982 			error = EINVAL;
18983 		} else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
18984 			rack->r_use_cmp_ack = 1;
18985 			rack->r_mbuf_queue = 1;
18986 			tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18987 		}
18988 		if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
18989 			rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18990 		break;
18991 	case TCP_SHARED_CWND_TIME_LIMIT:
18992 		RACK_OPTS_INC(tcp_lscwnd);
18993 		if (optval)
18994 			rack->r_limit_scw = 1;
18995 		else
18996 			rack->r_limit_scw = 0;
18997 		break;
18998  	case TCP_RACK_PACE_TO_FILL:
18999 		RACK_OPTS_INC(tcp_fillcw);
19000 		if (optval == 0)
19001 			rack->rc_pace_to_cwnd = 0;
19002 		else {
19003 			rack->rc_pace_to_cwnd = 1;
19004 			if (optval > 1)
19005 				rack->r_fill_less_agg = 1;
19006 		}
19007 		if ((optval >= rack_gp_rtt_maxmul) &&
19008 		    rack_gp_rtt_maxmul &&
19009 		    (optval < 0xf)) {
19010 			rack->rc_pace_fill_if_rttin_range = 1;
19011 			rack->rtt_limit_mul = optval;
19012 		} else {
19013 			rack->rc_pace_fill_if_rttin_range = 0;
19014 			rack->rtt_limit_mul = 0;
19015 		}
19016 		break;
19017 	case TCP_RACK_NO_PUSH_AT_MAX:
19018 		RACK_OPTS_INC(tcp_npush);
19019 		if (optval == 0)
19020 			rack->r_ctl.rc_no_push_at_mrtt = 0;
19021 		else if (optval < 0xff)
19022 			rack->r_ctl.rc_no_push_at_mrtt = optval;
19023 		else
19024 			error = EINVAL;
19025 		break;
19026 	case TCP_SHARED_CWND_ENABLE:
19027 		RACK_OPTS_INC(tcp_rack_scwnd);
19028 		if (optval == 0)
19029 			rack->rack_enable_scwnd = 0;
19030 		else
19031 			rack->rack_enable_scwnd = 1;
19032 		break;
19033 	case TCP_RACK_MBUF_QUEUE:
19034 		/* Now do we use the LRO mbuf-queue feature */
19035 		RACK_OPTS_INC(tcp_rack_mbufq);
19036 		if (optval || rack->r_use_cmp_ack)
19037 			rack->r_mbuf_queue = 1;
19038 		else
19039 			rack->r_mbuf_queue = 0;
19040 		if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19041 			tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19042 		else
19043 			tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19044 		break;
19045 	case TCP_RACK_NONRXT_CFG_RATE:
19046 		RACK_OPTS_INC(tcp_rack_cfg_rate);
19047 		if (optval == 0)
19048 			rack->rack_rec_nonrxt_use_cr = 0;
19049 		else
19050 			rack->rack_rec_nonrxt_use_cr = 1;
19051 		break;
19052 	case TCP_NO_PRR:
19053 		RACK_OPTS_INC(tcp_rack_noprr);
19054 		if (optval == 0)
19055 			rack->rack_no_prr = 0;
19056 		else if (optval == 1)
19057 			rack->rack_no_prr = 1;
19058 		else if (optval == 2)
19059 			rack->no_prr_addback = 1;
19060 		else
19061 			error = EINVAL;
19062 		break;
19063 	case TCP_TIMELY_DYN_ADJ:
19064 		RACK_OPTS_INC(tcp_timely_dyn);
19065 		if (optval == 0)
19066 			rack->rc_gp_dyn_mul = 0;
19067 		else {
19068 			rack->rc_gp_dyn_mul = 1;
19069 			if (optval >= 100) {
19070 				/*
19071 				 * If the user sets something 100 or more
19072 				 * its the gp_ca value.
19073 				 */
19074 				rack->r_ctl.rack_per_of_gp_ca  = optval;
19075 			}
19076 		}
19077 		break;
19078 	case TCP_RACK_DO_DETECTION:
19079 		RACK_OPTS_INC(tcp_rack_do_detection);
19080 		if (optval == 0)
19081 			rack->do_detection = 0;
19082 		else
19083 			rack->do_detection = 1;
19084 		break;
19085 	case TCP_RACK_TLP_USE:
19086 		if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
19087 			error = EINVAL;
19088 			break;
19089 		}
19090 		RACK_OPTS_INC(tcp_tlp_use);
19091 		rack->rack_tlp_threshold_use = optval;
19092 		break;
19093 	case TCP_RACK_TLP_REDUCE:
19094 		/* RACK TLP cwnd reduction (bool) */
19095 		RACK_OPTS_INC(tcp_rack_tlp_reduce);
19096 		rack->r_ctl.rc_tlp_cwnd_reduce = optval;
19097 		break;
19098 	/*  Pacing related ones */
19099 	case TCP_RACK_PACE_ALWAYS:
19100 		/*
19101 		 * zero is old rack method, 1 is new
19102 		 * method using a pacing rate.
19103 		 */
19104 		RACK_OPTS_INC(tcp_rack_pace_always);
19105 		if (optval > 0) {
19106 			if (rack->rc_always_pace) {
19107 				error = EALREADY;
19108 				break;
19109 			} else if (tcp_can_enable_pacing()) {
19110 				rack->rc_always_pace = 1;
19111 				if (rack->use_fixed_rate || rack->gp_ready)
19112 					rack_set_cc_pacing(rack);
19113 			}
19114 			else {
19115 				error = ENOSPC;
19116 				break;
19117 			}
19118 		} else {
19119 			if (rack->rc_always_pace) {
19120 				tcp_decrement_paced_conn();
19121 				rack->rc_always_pace = 0;
19122 				rack_undo_cc_pacing(rack);
19123 			}
19124 		}
19125 		if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19126 			tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19127 		else
19128 			tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19129 		/* A rate may be set irate or other, if so set seg size */
19130 		rack_update_seg(rack);
19131 		break;
19132 	case TCP_BBR_RACK_INIT_RATE:
19133 		RACK_OPTS_INC(tcp_initial_rate);
19134 		val = optval;
19135 		/* Change from kbits per second to bytes per second */
19136 		val *= 1000;
19137 		val /= 8;
19138 		rack->r_ctl.init_rate = val;
19139 		if (rack->rc_init_win != rack_default_init_window) {
19140 			uint32_t win, snt;
19141 
19142 			/*
19143 			 * Options don't always get applied
19144 			 * in the order you think. So in order
19145 			 * to assure we update a cwnd we need
19146 			 * to check and see if we are still
19147 			 * where we should raise the cwnd.
19148 			 */
19149 			win = rc_init_window(rack);
19150 			if (SEQ_GT(tp->snd_max, tp->iss))
19151 				snt = tp->snd_max - tp->iss;
19152 			else
19153 				snt = 0;
19154 			if ((snt < win) &&
19155 			    (tp->snd_cwnd < win))
19156 				tp->snd_cwnd = win;
19157 		}
19158 		if (rack->rc_always_pace)
19159 			rack_update_seg(rack);
19160 		break;
19161 	case TCP_BBR_IWINTSO:
19162 		RACK_OPTS_INC(tcp_initial_win);
19163 		if (optval && (optval <= 0xff)) {
19164 			uint32_t win, snt;
19165 
19166 			rack->rc_init_win = optval;
19167 			win = rc_init_window(rack);
19168 			if (SEQ_GT(tp->snd_max, tp->iss))
19169 				snt = tp->snd_max - tp->iss;
19170 			else
19171 				snt = 0;
19172 			if ((snt < win) &&
19173 			    (tp->t_srtt |
19174 #ifdef NETFLIX_PEAKRATE
19175 			     tp->t_maxpeakrate |
19176 #endif
19177 			     rack->r_ctl.init_rate)) {
19178 				/*
19179 				 * We are not past the initial window
19180 				 * and we have some bases for pacing,
19181 				 * so we need to possibly adjust up
19182 				 * the cwnd. Note even if we don't set
19183 				 * the cwnd, its still ok to raise the rc_init_win
19184 				 * which can be used coming out of idle when we
19185 				 * would have a rate.
19186 				 */
19187 				if (tp->snd_cwnd < win)
19188 					tp->snd_cwnd = win;
19189 			}
19190 			if (rack->rc_always_pace)
19191 				rack_update_seg(rack);
19192 		} else
19193 			error = EINVAL;
19194 		break;
19195 	case TCP_RACK_FORCE_MSEG:
19196 		RACK_OPTS_INC(tcp_rack_force_max_seg);
19197 		if (optval)
19198 			rack->rc_force_max_seg = 1;
19199 		else
19200 			rack->rc_force_max_seg = 0;
19201 		break;
19202 	case TCP_RACK_PACE_MAX_SEG:
19203 		/* Max segments size in a pace in bytes */
19204 		RACK_OPTS_INC(tcp_rack_max_seg);
19205 		rack->rc_user_set_max_segs = optval;
19206 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
19207 		break;
19208 	case TCP_RACK_PACE_RATE_REC:
19209 		/* Set the fixed pacing rate in Bytes per second ca */
19210 		RACK_OPTS_INC(tcp_rack_pace_rate_rec);
19211 		rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19212 		if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19213 			rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19214 		if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19215 			rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19216 		rack->use_fixed_rate = 1;
19217 		if (rack->rc_always_pace)
19218 			rack_set_cc_pacing(rack);
19219 		rack_log_pacing_delay_calc(rack,
19220 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
19221 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
19222 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19223 					   __LINE__, NULL);
19224 		break;
19225 
19226 	case TCP_RACK_PACE_RATE_SS:
19227 		/* Set the fixed pacing rate in Bytes per second ca */
19228 		RACK_OPTS_INC(tcp_rack_pace_rate_ss);
19229 		rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19230 		if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19231 			rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19232 		if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19233 			rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19234 		rack->use_fixed_rate = 1;
19235 		if (rack->rc_always_pace)
19236 			rack_set_cc_pacing(rack);
19237 		rack_log_pacing_delay_calc(rack,
19238 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
19239 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
19240 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19241 					   __LINE__, NULL);
19242 		break;
19243 
19244 	case TCP_RACK_PACE_RATE_CA:
19245 		/* Set the fixed pacing rate in Bytes per second ca */
19246 		RACK_OPTS_INC(tcp_rack_pace_rate_ca);
19247 		rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19248 		if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19249 			rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19250 		if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19251 			rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19252 		rack->use_fixed_rate = 1;
19253 		if (rack->rc_always_pace)
19254 			rack_set_cc_pacing(rack);
19255 		rack_log_pacing_delay_calc(rack,
19256 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
19257 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
19258 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19259 					   __LINE__, NULL);
19260 		break;
19261 	case TCP_RACK_GP_INCREASE_REC:
19262 		RACK_OPTS_INC(tcp_gp_inc_rec);
19263 		rack->r_ctl.rack_per_of_gp_rec = optval;
19264 		rack_log_pacing_delay_calc(rack,
19265 					   rack->r_ctl.rack_per_of_gp_ss,
19266 					   rack->r_ctl.rack_per_of_gp_ca,
19267 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19268 					   __LINE__, NULL);
19269 		break;
19270 	case TCP_RACK_GP_INCREASE_CA:
19271 		RACK_OPTS_INC(tcp_gp_inc_ca);
19272 		ca = optval;
19273 		if (ca < 100) {
19274 			/*
19275 			 * We don't allow any reduction
19276 			 * over the GP b/w.
19277 			 */
19278 			error = EINVAL;
19279 			break;
19280 		}
19281 		rack->r_ctl.rack_per_of_gp_ca = ca;
19282 		rack_log_pacing_delay_calc(rack,
19283 					   rack->r_ctl.rack_per_of_gp_ss,
19284 					   rack->r_ctl.rack_per_of_gp_ca,
19285 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19286 					   __LINE__, NULL);
19287 		break;
19288 	case TCP_RACK_GP_INCREASE_SS:
19289 		RACK_OPTS_INC(tcp_gp_inc_ss);
19290 		ss = optval;
19291 		if (ss < 100) {
19292 			/*
19293 			 * We don't allow any reduction
19294 			 * over the GP b/w.
19295 			 */
19296 			error = EINVAL;
19297 			break;
19298 		}
19299 		rack->r_ctl.rack_per_of_gp_ss = ss;
19300 		rack_log_pacing_delay_calc(rack,
19301 					   rack->r_ctl.rack_per_of_gp_ss,
19302 					   rack->r_ctl.rack_per_of_gp_ca,
19303 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19304 					   __LINE__, NULL);
19305 		break;
19306 	case TCP_RACK_RR_CONF:
19307 		RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
19308 		if (optval && optval <= 3)
19309 			rack->r_rr_config = optval;
19310 		else
19311 			rack->r_rr_config = 0;
19312 		break;
19313 	case TCP_HDWR_RATE_CAP:
19314 		RACK_OPTS_INC(tcp_hdwr_rate_cap);
19315 		if (optval) {
19316 			if (rack->r_rack_hw_rate_caps == 0)
19317 				rack->r_rack_hw_rate_caps = 1;
19318 			else
19319 				error = EALREADY;
19320 		} else {
19321 			rack->r_rack_hw_rate_caps = 0;
19322 		}
19323 		break;
19324 	case TCP_BBR_HDWR_PACE:
19325 		RACK_OPTS_INC(tcp_hdwr_pacing);
19326 		if (optval){
19327 			if (rack->rack_hdrw_pacing == 0) {
19328 				rack->rack_hdw_pace_ena = 1;
19329 				rack->rack_attempt_hdwr_pace = 0;
19330 			} else
19331 				error = EALREADY;
19332 		} else {
19333 			rack->rack_hdw_pace_ena = 0;
19334 #ifdef RATELIMIT
19335 			if (rack->r_ctl.crte != NULL) {
19336 				rack->rack_hdrw_pacing = 0;
19337 				rack->rack_attempt_hdwr_pace = 0;
19338 				tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
19339 				rack->r_ctl.crte = NULL;
19340 			}
19341 #endif
19342 		}
19343 		break;
19344 	/*  End Pacing related ones */
19345 	case TCP_RACK_PRR_SENDALOT:
19346 		/* Allow PRR to send more than one seg */
19347 		RACK_OPTS_INC(tcp_rack_prr_sendalot);
19348 		rack->r_ctl.rc_prr_sendalot = optval;
19349 		break;
19350 	case TCP_RACK_MIN_TO:
19351 		/* Minimum time between rack t-o's in ms */
19352 		RACK_OPTS_INC(tcp_rack_min_to);
19353 		rack->r_ctl.rc_min_to = optval;
19354 		break;
19355 	case TCP_RACK_EARLY_SEG:
19356 		/* If early recovery max segments */
19357 		RACK_OPTS_INC(tcp_rack_early_seg);
19358 		rack->r_ctl.rc_early_recovery_segs = optval;
19359 		break;
19360 	case TCP_RACK_REORD_THRESH:
19361 		/* RACK reorder threshold (shift amount) */
19362 		RACK_OPTS_INC(tcp_rack_reord_thresh);
19363 		if ((optval > 0) && (optval < 31))
19364 			rack->r_ctl.rc_reorder_shift = optval;
19365 		else
19366 			error = EINVAL;
19367 		break;
19368 	case TCP_RACK_REORD_FADE:
19369 		/* Does reordering fade after ms time */
19370 		RACK_OPTS_INC(tcp_rack_reord_fade);
19371 		rack->r_ctl.rc_reorder_fade = optval;
19372 		break;
19373 	case TCP_RACK_TLP_THRESH:
19374 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
19375 		RACK_OPTS_INC(tcp_rack_tlp_thresh);
19376 		if (optval)
19377 			rack->r_ctl.rc_tlp_threshold = optval;
19378 		else
19379 			error = EINVAL;
19380 		break;
19381 	case TCP_BBR_USE_RACK_RR:
19382 		RACK_OPTS_INC(tcp_rack_rr);
19383 		if (optval)
19384 			rack->use_rack_rr = 1;
19385 		else
19386 			rack->use_rack_rr = 0;
19387 		break;
19388 	case TCP_FAST_RSM_HACK:
19389 		RACK_OPTS_INC(tcp_rack_fastrsm_hack);
19390 		if (optval)
19391 			rack->fast_rsm_hack = 1;
19392 		else
19393 			rack->fast_rsm_hack = 0;
19394 		break;
19395 	case TCP_RACK_PKT_DELAY:
19396 		/* RACK added ms i.e. rack-rtt + reord + N */
19397 		RACK_OPTS_INC(tcp_rack_pkt_delay);
19398 		rack->r_ctl.rc_pkt_delay = optval;
19399 		break;
19400 	case TCP_DELACK:
19401 		RACK_OPTS_INC(tcp_rack_delayed_ack);
19402 		if (optval == 0)
19403 			tp->t_delayed_ack = 0;
19404 		else
19405 			tp->t_delayed_ack = 1;
19406 		if (tp->t_flags & TF_DELACK) {
19407 			tp->t_flags &= ~TF_DELACK;
19408 			tp->t_flags |= TF_ACKNOW;
19409 			NET_EPOCH_ENTER(et);
19410 			rack_output(tp);
19411 			NET_EPOCH_EXIT(et);
19412 		}
19413 		break;
19414 
19415 	case TCP_BBR_RACK_RTT_USE:
19416 		RACK_OPTS_INC(tcp_rack_rtt_use);
19417 		if ((optval != USE_RTT_HIGH) &&
19418 		    (optval != USE_RTT_LOW) &&
19419 		    (optval != USE_RTT_AVG))
19420 			error = EINVAL;
19421 		else
19422 			rack->r_ctl.rc_rate_sample_method = optval;
19423 		break;
19424 	case TCP_DATA_AFTER_CLOSE:
19425 		RACK_OPTS_INC(tcp_data_after_close);
19426 		if (optval)
19427 			rack->rc_allow_data_af_clo = 1;
19428 		else
19429 			rack->rc_allow_data_af_clo = 0;
19430 		break;
19431 	default:
19432 		break;
19433 	}
19434 #ifdef NETFLIX_STATS
19435 	tcp_log_socket_option(tp, sopt_name, optval, error);
19436 #endif
19437 	return (error);
19438 }
19439 
19440 
19441 static void
19442 rack_apply_deferred_options(struct tcp_rack *rack)
19443 {
19444 	struct deferred_opt_list *dol, *sdol;
19445 	uint32_t s_optval;
19446 
19447 	TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
19448 		TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
19449 		/* Disadvantage of deferal is you loose the error return */
19450 		s_optval = (uint32_t)dol->optval;
19451 		(void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval);
19452 		free(dol, M_TCPDO);
19453 	}
19454 }
19455 
19456 static int
19457 rack_pru_options(struct tcpcb *tp, int flags)
19458 {
19459 	if (flags & PRUS_OOB)
19460 		return (EOPNOTSUPP);
19461 	return (0);
19462 }
19463 
19464 static struct tcp_function_block __tcp_rack = {
19465 	.tfb_tcp_block_name = __XSTRING(STACKNAME),
19466 	.tfb_tcp_output = rack_output,
19467 	.tfb_do_queued_segments = ctf_do_queued_segments,
19468 	.tfb_do_segment_nounlock = rack_do_segment_nounlock,
19469 	.tfb_tcp_do_segment = rack_do_segment,
19470 	.tfb_tcp_ctloutput = rack_ctloutput,
19471 	.tfb_tcp_fb_init = rack_init,
19472 	.tfb_tcp_fb_fini = rack_fini,
19473 	.tfb_tcp_timer_stop_all = rack_stopall,
19474 	.tfb_tcp_timer_activate = rack_timer_activate,
19475 	.tfb_tcp_timer_active = rack_timer_active,
19476 	.tfb_tcp_timer_stop = rack_timer_stop,
19477 	.tfb_tcp_rexmit_tmr = rack_remxt_tmr,
19478 	.tfb_tcp_handoff_ok = rack_handoff_ok,
19479 	.tfb_tcp_mtu_chg = rack_mtu_change,
19480 	.tfb_pru_options = rack_pru_options,
19481 
19482 };
19483 
19484 /*
19485  * rack_ctloutput() must drop the inpcb lock before performing copyin on
19486  * socket option arguments.  When it re-acquires the lock after the copy, it
19487  * has to revalidate that the connection is still valid for the socket
19488  * option.
19489  */
19490 static int
19491 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
19492     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19493 {
19494 	uint64_t loptval;
19495 	int32_t error = 0, optval;
19496 
19497 	switch (sopt->sopt_name) {
19498 	case TCP_RACK_TLP_REDUCE:		/*  URL:tlp_reduce */
19499 	/*  Pacing related ones */
19500 	case TCP_RACK_PACE_ALWAYS:		/*  URL:pace_always */
19501 	case TCP_BBR_RACK_INIT_RATE:		/*  URL:irate */
19502 	case TCP_BBR_IWINTSO:			/*  URL:tso_iwin */
19503 	case TCP_RACK_PACE_MAX_SEG:		/*  URL:pace_max_seg */
19504 	case TCP_RACK_FORCE_MSEG:		/*  URL:force_max_seg */
19505 	case TCP_RACK_PACE_RATE_CA:		/*  URL:pr_ca */
19506 	case TCP_RACK_PACE_RATE_SS:		/*  URL:pr_ss*/
19507 	case TCP_RACK_PACE_RATE_REC:		/*  URL:pr_rec */
19508 	case TCP_RACK_GP_INCREASE_CA:		/*  URL:gp_inc_ca */
19509 	case TCP_RACK_GP_INCREASE_SS:		/*  URL:gp_inc_ss */
19510 	case TCP_RACK_GP_INCREASE_REC:		/*  URL:gp_inc_rec */
19511 	case TCP_RACK_RR_CONF:			/*  URL:rrr_conf */
19512 	case TCP_BBR_HDWR_PACE:			/*  URL:hdwrpace */
19513 	case TCP_HDWR_RATE_CAP:			/*  URL: hdwrcap boolean */
19514 	case TCP_PACING_RATE_CAP:		/*  URL:cap-- used by side-channel */
19515 	case TCP_HDWR_UP_ONLY:			/*  URL:uponly -- hardware pacing  boolean */
19516        /* End pacing related */
19517 	case TCP_FAST_RSM_HACK:			/*  URL:frsm_hack */
19518 	case TCP_DELACK:			/*  URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
19519 	case TCP_RACK_PRR_SENDALOT:		/*  URL:prr_sendalot */
19520 	case TCP_RACK_MIN_TO:			/*  URL:min_to */
19521 	case TCP_RACK_EARLY_SEG:		/*  URL:early_seg */
19522 	case TCP_RACK_REORD_THRESH:		/*  URL:reord_thresh */
19523 	case TCP_RACK_REORD_FADE:		/*  URL:reord_fade */
19524 	case TCP_RACK_TLP_THRESH:		/*  URL:tlp_thresh */
19525 	case TCP_RACK_PKT_DELAY:		/*  URL:pkt_delay */
19526 	case TCP_RACK_TLP_USE:			/*  URL:tlp_use */
19527 	case TCP_BBR_RACK_RTT_USE:		/*  URL:rttuse */
19528 	case TCP_BBR_USE_RACK_RR:		/*  URL:rackrr */
19529 	case TCP_RACK_DO_DETECTION:		/*  URL:detect */
19530 	case TCP_NO_PRR:			/*  URL:noprr */
19531 	case TCP_TIMELY_DYN_ADJ:      		/*  URL:dynamic */
19532 	case TCP_DATA_AFTER_CLOSE:		/*  no URL */
19533 	case TCP_RACK_NONRXT_CFG_RATE:		/*  URL:nonrxtcr */
19534 	case TCP_SHARED_CWND_ENABLE:		/*  URL:scwnd */
19535 	case TCP_RACK_MBUF_QUEUE:		/*  URL:mqueue */
19536 	case TCP_RACK_NO_PUSH_AT_MAX:		/*  URL:npush */
19537 	case TCP_RACK_PACE_TO_FILL:		/*  URL:fillcw */
19538 	case TCP_SHARED_CWND_TIME_LIMIT:	/*  URL:lscwnd */
19539 	case TCP_RACK_PROFILE:			/*  URL:profile */
19540 	case TCP_USE_CMP_ACKS:			/*  URL:cmpack */
19541 	case TCP_RACK_ABC_VAL:			/*  URL:labc */
19542 	case TCP_REC_ABC_VAL:			/*  URL:reclabc */
19543 	case TCP_RACK_MEASURE_CNT:		/*  URL:measurecnt */
19544 	case TCP_DEFER_OPTIONS:			/*  URL:defer */
19545 	case TCP_RACK_PACING_BETA:		/*  URL:pacing_beta */
19546 	case TCP_RACK_PACING_BETA_ECN:		/*  URL:pacing_beta_ecn */
19547 	case TCP_RACK_TIMER_SLOP:		/*  URL:timer_slop */
19548 		break;
19549 	default:
19550 		/* Filter off all unknown options to the base stack */
19551 		return (tcp_default_ctloutput(so, sopt, inp, tp));
19552 		break;
19553 	}
19554 	INP_WUNLOCK(inp);
19555 	if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
19556 		error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
19557 		/*
19558 		 * We truncate it down to 32 bits for the socket-option trace this
19559 		 * means rates > 34Gbps won't show right, but thats probably ok.
19560 		 */
19561 		optval = (uint32_t)loptval;
19562 	} else {
19563 		error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
19564 		/* Save it in 64 bit form too */
19565 		loptval = optval;
19566 	}
19567 	if (error)
19568 		return (error);
19569 	INP_WLOCK(inp);
19570 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
19571 		INP_WUNLOCK(inp);
19572 		return (ECONNRESET);
19573 	}
19574 	if (tp->t_fb != &__tcp_rack) {
19575 		INP_WUNLOCK(inp);
19576 		return (ENOPROTOOPT);
19577 	}
19578 	if (rack->defer_options && (rack->gp_ready == 0) &&
19579 	    (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
19580 	    (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
19581 	    (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
19582 	    (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
19583 		/* Options are beind deferred */
19584 		if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
19585 			INP_WUNLOCK(inp);
19586 			return (0);
19587 		} else {
19588 			/* No memory to defer, fail */
19589 			INP_WUNLOCK(inp);
19590 			return (ENOMEM);
19591 		}
19592 	}
19593 	error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval);
19594 	INP_WUNLOCK(inp);
19595 	return (error);
19596 }
19597 
19598 static void
19599 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
19600 {
19601 
19602 	INP_WLOCK_ASSERT(tp->t_inpcb);
19603 	bzero(ti, sizeof(*ti));
19604 
19605 	ti->tcpi_state = tp->t_state;
19606 	if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
19607 		ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
19608 	if (tp->t_flags & TF_SACK_PERMIT)
19609 		ti->tcpi_options |= TCPI_OPT_SACK;
19610 	if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
19611 		ti->tcpi_options |= TCPI_OPT_WSCALE;
19612 		ti->tcpi_snd_wscale = tp->snd_scale;
19613 		ti->tcpi_rcv_wscale = tp->rcv_scale;
19614 	}
19615 	if (tp->t_flags2 & TF2_ECN_PERMIT)
19616 		ti->tcpi_options |= TCPI_OPT_ECN;
19617 	if (tp->t_flags & TF_FASTOPEN)
19618 		ti->tcpi_options |= TCPI_OPT_TFO;
19619 	/* still kept in ticks is t_rcvtime */
19620 	ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
19621 	/* Since we hold everything in precise useconds this is easy */
19622 	ti->tcpi_rtt = tp->t_srtt;
19623 	ti->tcpi_rttvar = tp->t_rttvar;
19624 	ti->tcpi_rto = tp->t_rxtcur;
19625 	ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
19626 	ti->tcpi_snd_cwnd = tp->snd_cwnd;
19627 	/*
19628 	 * FreeBSD-specific extension fields for tcp_info.
19629 	 */
19630 	ti->tcpi_rcv_space = tp->rcv_wnd;
19631 	ti->tcpi_rcv_nxt = tp->rcv_nxt;
19632 	ti->tcpi_snd_wnd = tp->snd_wnd;
19633 	ti->tcpi_snd_bwnd = 0;		/* Unused, kept for compat. */
19634 	ti->tcpi_snd_nxt = tp->snd_nxt;
19635 	ti->tcpi_snd_mss = tp->t_maxseg;
19636 	ti->tcpi_rcv_mss = tp->t_maxseg;
19637 	ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
19638 	ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
19639 	ti->tcpi_snd_zerowin = tp->t_sndzerowin;
19640 #ifdef NETFLIX_STATS
19641 	ti->tcpi_total_tlp = tp->t_sndtlppack;
19642 	ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
19643 	memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
19644 #endif
19645 #ifdef TCP_OFFLOAD
19646 	if (tp->t_flags & TF_TOE) {
19647 		ti->tcpi_options |= TCPI_OPT_TOE;
19648 		tcp_offload_tcp_info(tp, ti);
19649 	}
19650 #endif
19651 }
19652 
19653 static int
19654 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
19655     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19656 {
19657 	int32_t error, optval;
19658 	uint64_t val, loptval;
19659 	struct	tcp_info ti;
19660 	/*
19661 	 * Because all our options are either boolean or an int, we can just
19662 	 * pull everything into optval and then unlock and copy. If we ever
19663 	 * add a option that is not a int, then this will have quite an
19664 	 * impact to this routine.
19665 	 */
19666 	error = 0;
19667 	switch (sopt->sopt_name) {
19668 	case TCP_INFO:
19669 		/* First get the info filled */
19670 		rack_fill_info(tp, &ti);
19671 		/* Fix up the rtt related fields if needed */
19672 		INP_WUNLOCK(inp);
19673 		error = sooptcopyout(sopt, &ti, sizeof ti);
19674 		return (error);
19675 	/*
19676 	 * Beta is the congestion control value for NewReno that influences how
19677 	 * much of a backoff happens when loss is detected. It is normally set
19678 	 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
19679 	 * when you exit recovery.
19680 	 */
19681 	case TCP_RACK_PACING_BETA:
19682 		if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19683 			error = EINVAL;
19684 		else if (rack->rc_pacing_cc_set == 0)
19685 			optval = rack->r_ctl.rc_saved_beta.beta;
19686 		else {
19687 			/*
19688 			 * Reach out into the CC data and report back what
19689 			 * I have previously set. Yeah it looks hackish but
19690 			 * we don't want to report the saved values.
19691 			 */
19692 			if (tp->ccv->cc_data)
19693 				optval = ((struct newreno *)tp->ccv->cc_data)->beta;
19694 			else
19695 				error = EINVAL;
19696 		}
19697 		break;
19698 		/*
19699 		 * Beta_ecn is the congestion control value for NewReno that influences how
19700 		 * much of a backoff happens when a ECN mark is detected. It is normally set
19701 		 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
19702 		 * you exit recovery. Note that classic ECN has a beta of 50, it is only
19703 		 * ABE Ecn that uses this "less" value, but we do too with pacing :)
19704 		 */
19705 
19706 	case TCP_RACK_PACING_BETA_ECN:
19707 		if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19708 			error = EINVAL;
19709 		else if (rack->rc_pacing_cc_set == 0)
19710 			optval = rack->r_ctl.rc_saved_beta.beta_ecn;
19711 		else {
19712 			/*
19713 			 * Reach out into the CC data and report back what
19714 			 * I have previously set. Yeah it looks hackish but
19715 			 * we don't want to report the saved values.
19716 			 */
19717 			if (tp->ccv->cc_data)
19718 				optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn;
19719 			else
19720 				error = EINVAL;
19721 		}
19722 		break;
19723 	case TCP_FAST_RSM_HACK:
19724 		optval = rack->fast_rsm_hack;
19725 		break;
19726 	case TCP_DEFER_OPTIONS:
19727 		optval = rack->defer_options;
19728 		break;
19729 	case TCP_RACK_MEASURE_CNT:
19730 		optval = rack->r_ctl.req_measurements;
19731 		break;
19732 	case TCP_REC_ABC_VAL:
19733 		optval = rack->r_use_labc_for_rec;
19734 		break;
19735 	case TCP_RACK_ABC_VAL:
19736 		optval = rack->rc_labc;
19737 		break;
19738 	case TCP_HDWR_UP_ONLY:
19739 		optval= rack->r_up_only;
19740 		break;
19741 	case TCP_PACING_RATE_CAP:
19742 		loptval = rack->r_ctl.bw_rate_cap;
19743 		break;
19744 	case TCP_RACK_PROFILE:
19745 		/* You cannot retrieve a profile, its write only */
19746 		error = EINVAL;
19747 		break;
19748 	case TCP_USE_CMP_ACKS:
19749 		optval = rack->r_use_cmp_ack;
19750 		break;
19751 	case TCP_RACK_PACE_TO_FILL:
19752 		optval = rack->rc_pace_to_cwnd;
19753 		if (optval && rack->r_fill_less_agg)
19754 			optval++;
19755 		break;
19756 	case TCP_RACK_NO_PUSH_AT_MAX:
19757 		optval = rack->r_ctl.rc_no_push_at_mrtt;
19758 		break;
19759 	case TCP_SHARED_CWND_ENABLE:
19760 		optval = rack->rack_enable_scwnd;
19761 		break;
19762 	case TCP_RACK_NONRXT_CFG_RATE:
19763 		optval = rack->rack_rec_nonrxt_use_cr;
19764 		break;
19765 	case TCP_NO_PRR:
19766 		if (rack->rack_no_prr  == 1)
19767 			optval = 1;
19768 		else if (rack->no_prr_addback == 1)
19769 			optval = 2;
19770 		else
19771 			optval = 0;
19772 		break;
19773 	case TCP_RACK_DO_DETECTION:
19774 		optval = rack->do_detection;
19775 		break;
19776 	case TCP_RACK_MBUF_QUEUE:
19777 		/* Now do we use the LRO mbuf-queue feature */
19778 		optval = rack->r_mbuf_queue;
19779 		break;
19780 	case TCP_TIMELY_DYN_ADJ:
19781 		optval = rack->rc_gp_dyn_mul;
19782 		break;
19783 	case TCP_BBR_IWINTSO:
19784 		optval = rack->rc_init_win;
19785 		break;
19786 	case TCP_RACK_TLP_REDUCE:
19787 		/* RACK TLP cwnd reduction (bool) */
19788 		optval = rack->r_ctl.rc_tlp_cwnd_reduce;
19789 		break;
19790 	case TCP_BBR_RACK_INIT_RATE:
19791 		val = rack->r_ctl.init_rate;
19792 		/* convert to kbits per sec */
19793 		val *= 8;
19794 		val /= 1000;
19795 		optval = (uint32_t)val;
19796 		break;
19797 	case TCP_RACK_FORCE_MSEG:
19798 		optval = rack->rc_force_max_seg;
19799 		break;
19800 	case TCP_RACK_PACE_MAX_SEG:
19801 		/* Max segments in a pace */
19802 		optval = rack->rc_user_set_max_segs;
19803 		break;
19804 	case TCP_RACK_PACE_ALWAYS:
19805 		/* Use the always pace method */
19806 		optval = rack->rc_always_pace;
19807 		break;
19808 	case TCP_RACK_PRR_SENDALOT:
19809 		/* Allow PRR to send more than one seg */
19810 		optval = rack->r_ctl.rc_prr_sendalot;
19811 		break;
19812 	case TCP_RACK_MIN_TO:
19813 		/* Minimum time between rack t-o's in ms */
19814 		optval = rack->r_ctl.rc_min_to;
19815 		break;
19816 	case TCP_RACK_EARLY_SEG:
19817 		/* If early recovery max segments */
19818 		optval = rack->r_ctl.rc_early_recovery_segs;
19819 		break;
19820 	case TCP_RACK_REORD_THRESH:
19821 		/* RACK reorder threshold (shift amount) */
19822 		optval = rack->r_ctl.rc_reorder_shift;
19823 		break;
19824 	case TCP_RACK_REORD_FADE:
19825 		/* Does reordering fade after ms time */
19826 		optval = rack->r_ctl.rc_reorder_fade;
19827 		break;
19828 	case TCP_BBR_USE_RACK_RR:
19829 		/* Do we use the rack cheat for rxt */
19830 		optval = rack->use_rack_rr;
19831 		break;
19832 	case TCP_RACK_RR_CONF:
19833 		optval = rack->r_rr_config;
19834 		break;
19835 	case TCP_HDWR_RATE_CAP:
19836 		optval = rack->r_rack_hw_rate_caps;
19837 		break;
19838 	case TCP_BBR_HDWR_PACE:
19839 		optval = rack->rack_hdw_pace_ena;
19840 		break;
19841 	case TCP_RACK_TLP_THRESH:
19842 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
19843 		optval = rack->r_ctl.rc_tlp_threshold;
19844 		break;
19845 	case TCP_RACK_PKT_DELAY:
19846 		/* RACK added ms i.e. rack-rtt + reord + N */
19847 		optval = rack->r_ctl.rc_pkt_delay;
19848 		break;
19849 	case TCP_RACK_TLP_USE:
19850 		optval = rack->rack_tlp_threshold_use;
19851 		break;
19852 	case TCP_RACK_PACE_RATE_CA:
19853 		optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
19854 		break;
19855 	case TCP_RACK_PACE_RATE_SS:
19856 		optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
19857 		break;
19858 	case TCP_RACK_PACE_RATE_REC:
19859 		optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
19860 		break;
19861 	case TCP_RACK_GP_INCREASE_SS:
19862 		optval = rack->r_ctl.rack_per_of_gp_ca;
19863 		break;
19864 	case TCP_RACK_GP_INCREASE_CA:
19865 		optval = rack->r_ctl.rack_per_of_gp_ss;
19866 		break;
19867 	case TCP_BBR_RACK_RTT_USE:
19868 		optval = rack->r_ctl.rc_rate_sample_method;
19869 		break;
19870 	case TCP_DELACK:
19871 		optval = tp->t_delayed_ack;
19872 		break;
19873 	case TCP_DATA_AFTER_CLOSE:
19874 		optval = rack->rc_allow_data_af_clo;
19875 		break;
19876 	case TCP_SHARED_CWND_TIME_LIMIT:
19877 		optval = rack->r_limit_scw;
19878 		break;
19879 	case TCP_RACK_TIMER_SLOP:
19880 		optval = rack->r_ctl.timer_slop;
19881 		break;
19882 	default:
19883 		return (tcp_default_ctloutput(so, sopt, inp, tp));
19884 		break;
19885 	}
19886 	INP_WUNLOCK(inp);
19887 	if (error == 0) {
19888 		if (TCP_PACING_RATE_CAP)
19889 			error = sooptcopyout(sopt, &loptval, sizeof loptval);
19890 		else
19891 			error = sooptcopyout(sopt, &optval, sizeof optval);
19892 	}
19893 	return (error);
19894 }
19895 
19896 static int
19897 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
19898 {
19899 	int32_t error = EINVAL;
19900 	struct tcp_rack *rack;
19901 
19902 	rack = (struct tcp_rack *)tp->t_fb_ptr;
19903 	if (rack == NULL) {
19904 		/* Huh? */
19905 		goto out;
19906 	}
19907 	if (sopt->sopt_dir == SOPT_SET) {
19908 		return (rack_set_sockopt(so, sopt, inp, tp, rack));
19909 	} else if (sopt->sopt_dir == SOPT_GET) {
19910 		return (rack_get_sockopt(so, sopt, inp, tp, rack));
19911 	}
19912 out:
19913 	INP_WUNLOCK(inp);
19914 	return (error);
19915 }
19916 
19917 static const char *rack_stack_names[] = {
19918 	__XSTRING(STACKNAME),
19919 #ifdef STACKALIAS
19920 	__XSTRING(STACKALIAS),
19921 #endif
19922 };
19923 
19924 static int
19925 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
19926 {
19927 	memset(mem, 0, size);
19928 	return (0);
19929 }
19930 
19931 static void
19932 rack_dtor(void *mem, int32_t size, void *arg)
19933 {
19934 
19935 }
19936 
19937 static bool rack_mod_inited = false;
19938 
19939 static int
19940 tcp_addrack(module_t mod, int32_t type, void *data)
19941 {
19942 	int32_t err = 0;
19943 	int num_stacks;
19944 
19945 	switch (type) {
19946 	case MOD_LOAD:
19947 		rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
19948 		    sizeof(struct rack_sendmap),
19949 		    rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
19950 
19951 		rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
19952 		    sizeof(struct tcp_rack),
19953 		    rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
19954 
19955 		sysctl_ctx_init(&rack_sysctl_ctx);
19956 		rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
19957 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
19958 		    OID_AUTO,
19959 #ifdef STACKALIAS
19960 		    __XSTRING(STACKALIAS),
19961 #else
19962 		    __XSTRING(STACKNAME),
19963 #endif
19964 		    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
19965 		    "");
19966 		if (rack_sysctl_root == NULL) {
19967 			printf("Failed to add sysctl node\n");
19968 			err = EFAULT;
19969 			goto free_uma;
19970 		}
19971 		rack_init_sysctls();
19972 		num_stacks = nitems(rack_stack_names);
19973 		err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
19974 		    rack_stack_names, &num_stacks);
19975 		if (err) {
19976 			printf("Failed to register %s stack name for "
19977 			    "%s module\n", rack_stack_names[num_stacks],
19978 			    __XSTRING(MODNAME));
19979 			sysctl_ctx_free(&rack_sysctl_ctx);
19980 free_uma:
19981 			uma_zdestroy(rack_zone);
19982 			uma_zdestroy(rack_pcb_zone);
19983 			rack_counter_destroy();
19984 			printf("Failed to register rack module -- err:%d\n", err);
19985 			return (err);
19986 		}
19987 		tcp_lro_reg_mbufq();
19988 		rack_mod_inited = true;
19989 		break;
19990 	case MOD_QUIESCE:
19991 		err = deregister_tcp_functions(&__tcp_rack, true, false);
19992 		break;
19993 	case MOD_UNLOAD:
19994 		err = deregister_tcp_functions(&__tcp_rack, false, true);
19995 		if (err == EBUSY)
19996 			break;
19997 		if (rack_mod_inited) {
19998 			uma_zdestroy(rack_zone);
19999 			uma_zdestroy(rack_pcb_zone);
20000 			sysctl_ctx_free(&rack_sysctl_ctx);
20001 			rack_counter_destroy();
20002 			rack_mod_inited = false;
20003 		}
20004 		tcp_lro_dereg_mbufq();
20005 		err = 0;
20006 		break;
20007 	default:
20008 		return (EOPNOTSUPP);
20009 	}
20010 	return (err);
20011 }
20012 
20013 static moduledata_t tcp_rack = {
20014 	.name = __XSTRING(MODNAME),
20015 	.evhand = tcp_addrack,
20016 	.priv = 0
20017 };
20018 
20019 MODULE_VERSION(MODNAME, 1);
20020 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
20021 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
20022