xref: /freebsd/sys/netinet/tcp_stacks/rack.c (revision 036d2e814bf0f5d88ffb4b24c159320894541757)
1 /*-
2  * Copyright (c) 2016-2019 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include "opt_kern_tls.h"
36 #include <sys/param.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
39 #ifdef TCP_HHOOK
40 #include <sys/hhook.h>
41 #endif
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/mbuf.h>
47 #include <sys/proc.h>		/* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #ifdef KERN_TLS
51 #include <sys/ktls.h>
52 #endif
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #ifdef NETFLIX_STATS
56 #include <sys/qmath.h>
57 #include <sys/tree.h>
58 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
59 #endif
60 #include <sys/refcount.h>
61 #include <sys/tree.h>
62 #include <sys/queue.h>
63 #include <sys/smp.h>
64 #include <sys/kthread.h>
65 #include <sys/kern_prefetch.h>
66 
67 #include <vm/uma.h>
68 
69 #include <net/route.h>
70 #include <net/vnet.h>
71 
72 #define TCPSTATES		/* for logging */
73 
74 #include <netinet/in.h>
75 #include <netinet/in_kdtrace.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
79 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
80 #include <netinet/ip_var.h>
81 #include <netinet/ip6.h>
82 #include <netinet6/in6_pcb.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet/tcp.h>
85 #define	TCPOUTFLAGS
86 #include <netinet/tcp_fsm.h>
87 #include <netinet/tcp_log_buf.h>
88 #include <netinet/tcp_seq.h>
89 #include <netinet/tcp_timer.h>
90 #include <netinet/tcp_var.h>
91 #include <netinet/tcp_hpts.h>
92 #include <netinet/tcpip.h>
93 #include <netinet/cc/cc.h>
94 #include <netinet/tcp_fastopen.h>
95 #include <netinet/tcp_lro.h>
96 #ifdef TCPDEBUG
97 #include <netinet/tcp_debug.h>
98 #endif				/* TCPDEBUG */
99 #ifdef TCP_OFFLOAD
100 #include <netinet/tcp_offload.h>
101 #endif
102 #ifdef INET6
103 #include <netinet6/tcp6_var.h>
104 #endif
105 
106 #include <netipsec/ipsec_support.h>
107 
108 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
109 #include <netipsec/ipsec.h>
110 #include <netipsec/ipsec6.h>
111 #endif				/* IPSEC */
112 
113 #include <netinet/udp.h>
114 #include <netinet/udp_var.h>
115 #include <machine/in_cksum.h>
116 
117 #ifdef MAC
118 #include <security/mac/mac_framework.h>
119 #endif
120 #include "sack_filter.h"
121 #include "tcp_rack.h"
122 #include "rack_bbr_common.h"
123 
124 uma_zone_t rack_zone;
125 uma_zone_t rack_pcb_zone;
126 
127 #ifndef TICKS2SBT
128 #define	TICKS2SBT(__t)	(tick_sbt * ((sbintime_t)(__t)))
129 #endif
130 
131 struct sysctl_ctx_list rack_sysctl_ctx;
132 struct sysctl_oid *rack_sysctl_root;
133 
134 #define CUM_ACKED 1
135 #define SACKED 2
136 
137 /*
138  * The RACK module incorporates a number of
139  * TCP ideas that have been put out into the IETF
140  * over the last few years:
141  * - Matt Mathis's Rate Halving which slowly drops
142  *    the congestion window so that the ack clock can
143  *    be maintained during a recovery.
144  * - Yuchung Cheng's RACK TCP (for which its named) that
145  *    will stop us using the number of dup acks and instead
146  *    use time as the gage of when we retransmit.
147  * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
148  *    of Dukkipati et.al.
149  * RACK depends on SACK, so if an endpoint arrives that
150  * cannot do SACK the state machine below will shuttle the
151  * connection back to using the "default" TCP stack that is
152  * in FreeBSD.
153  *
154  * To implement RACK the original TCP stack was first decomposed
155  * into a functional state machine with individual states
156  * for each of the possible TCP connection states. The do_segement
157  * functions role in life is to mandate the connection supports SACK
158  * initially and then assure that the RACK state matches the conenction
159  * state before calling the states do_segment function. Each
160  * state is simplified due to the fact that the original do_segment
161  * has been decomposed and we *know* what state we are in (no
162  * switches on the state) and all tests for SACK are gone. This
163  * greatly simplifies what each state does.
164  *
165  * TCP output is also over-written with a new version since it
166  * must maintain the new rack scoreboard.
167  *
168  */
169 static int32_t rack_tlp_thresh = 1;
170 static int32_t rack_reorder_thresh = 2;
171 static int32_t rack_reorder_fade = 60000;	/* 0 - never fade, def 60,000
172 						 * - 60 seconds */
173 /* Attack threshold detections */
174 static uint32_t rack_highest_sack_thresh_seen = 0;
175 static uint32_t rack_highest_move_thresh_seen = 0;
176 
177 static int32_t rack_pkt_delay = 1;
178 static int32_t rack_min_pace_time = 0;
179 static int32_t rack_early_recovery = 1;
180 static int32_t rack_send_a_lot_in_prr = 1;
181 static int32_t rack_min_to = 1;	/* Number of ms minimum timeout */
182 static int32_t rack_verbose_logging = 0;
183 static int32_t rack_ignore_data_after_close = 1;
184 static int32_t use_rack_cheat = 1;
185 static int32_t rack_persist_min = 250;	/* 250ms */
186 static int32_t rack_persist_max = 1000;	/* 1 Second */
187 static int32_t rack_sack_not_required = 0;	/* set to one to allow non-sack to use rack */
188 static int32_t rack_hw_tls_max_seg = 0; /* 0 means use hw-tls single segment */
189 
190 /*  Sack attack detection thresholds and such */
191 static int32_t tcp_force_detection = 0;
192 
193 #ifdef NETFLIX_EXP_DETECTION
194 static int32_t tcp_sack_to_ack_thresh = 700;	/* 70 % */
195 static int32_t tcp_sack_to_move_thresh = 600;	/* 60 % */
196 static int32_t tcp_restoral_thresh = 650;	/* 65 % (sack:2:ack -5%) */
197 static int32_t tcp_attack_on_turns_on_logging = 0;
198 static int32_t tcp_map_minimum = 500;
199 #endif
200 static int32_t tcp_sad_decay_val = 800;
201 static int32_t tcp_sad_pacing_interval = 2000;
202 static int32_t tcp_sad_low_pps = 100;
203 
204 
205 /*
206  * Currently regular tcp has a rto_min of 30ms
207  * the backoff goes 12 times so that ends up
208  * being a total of 122.850 seconds before a
209  * connection is killed.
210  */
211 static int32_t rack_tlp_min = 10;
212 static int32_t rack_rto_min = 30;	/* 30ms same as main freebsd */
213 static int32_t rack_rto_max = 4000;	/* 4 seconds */
214 static const int32_t rack_free_cache = 2;
215 static int32_t rack_hptsi_segments = 40;
216 static int32_t rack_rate_sample_method = USE_RTT_LOW;
217 static int32_t rack_pace_every_seg = 0;
218 static int32_t rack_delayed_ack_time = 200;	/* 200ms */
219 static int32_t rack_slot_reduction = 4;
220 static int32_t rack_lower_cwnd_at_tlp = 0;
221 static int32_t rack_use_proportional_reduce = 0;
222 static int32_t rack_proportional_rate = 10;
223 static int32_t rack_tlp_max_resend = 2;
224 static int32_t rack_limited_retran = 0;
225 static int32_t rack_always_send_oldest = 0;
226 static int32_t rack_use_sack_filter = 1;
227 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
228 static int32_t rack_per_of_gp = 50;
229 static int32_t rack_tcp_map_entries_limit = 1500;
230 static int32_t rack_tcp_map_split_limit = 256;
231 
232 
233 /* Rack specific counters */
234 counter_u64_t rack_badfr;
235 counter_u64_t rack_badfr_bytes;
236 counter_u64_t rack_rtm_prr_retran;
237 counter_u64_t rack_rtm_prr_newdata;
238 counter_u64_t rack_timestamp_mismatch;
239 counter_u64_t rack_reorder_seen;
240 counter_u64_t rack_paced_segments;
241 counter_u64_t rack_unpaced_segments;
242 counter_u64_t rack_calc_zero;
243 counter_u64_t rack_calc_nonzero;
244 counter_u64_t rack_saw_enobuf;
245 counter_u64_t rack_saw_enetunreach;
246 counter_u64_t rack_per_timer_hole;
247 
248 /* Tail loss probe counters */
249 counter_u64_t rack_tlp_tot;
250 counter_u64_t rack_tlp_newdata;
251 counter_u64_t rack_tlp_retran;
252 counter_u64_t rack_tlp_retran_bytes;
253 counter_u64_t rack_tlp_retran_fail;
254 counter_u64_t rack_to_tot;
255 counter_u64_t rack_to_arm_rack;
256 counter_u64_t rack_to_arm_tlp;
257 counter_u64_t rack_to_alloc;
258 counter_u64_t rack_to_alloc_hard;
259 counter_u64_t rack_to_alloc_emerg;
260 counter_u64_t rack_to_alloc_limited;
261 counter_u64_t rack_alloc_limited_conns;
262 counter_u64_t rack_split_limited;
263 
264 counter_u64_t rack_sack_proc_all;
265 counter_u64_t rack_sack_proc_short;
266 counter_u64_t rack_sack_proc_restart;
267 counter_u64_t rack_sack_attacks_detected;
268 counter_u64_t rack_sack_attacks_reversed;
269 counter_u64_t rack_sack_used_next_merge;
270 counter_u64_t rack_sack_splits;
271 counter_u64_t rack_sack_used_prev_merge;
272 counter_u64_t rack_sack_skipped_acked;
273 counter_u64_t rack_ack_total;
274 counter_u64_t rack_express_sack;
275 counter_u64_t rack_sack_total;
276 counter_u64_t rack_move_none;
277 counter_u64_t rack_move_some;
278 
279 counter_u64_t rack_used_tlpmethod;
280 counter_u64_t rack_used_tlpmethod2;
281 counter_u64_t rack_enter_tlp_calc;
282 counter_u64_t rack_input_idle_reduces;
283 counter_u64_t rack_collapsed_win;
284 counter_u64_t rack_tlp_does_nada;
285 
286 /* Counters for HW TLS */
287 counter_u64_t rack_tls_rwnd;
288 counter_u64_t rack_tls_cwnd;
289 counter_u64_t rack_tls_app;
290 counter_u64_t rack_tls_other;
291 counter_u64_t rack_tls_filled;
292 counter_u64_t rack_tls_rxt;
293 counter_u64_t rack_tls_tlp;
294 
295 /* Temp CPU counters */
296 counter_u64_t rack_find_high;
297 
298 counter_u64_t rack_progress_drops;
299 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
300 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
301 
302 static void
303 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line);
304 
305 static int
306 rack_process_ack(struct mbuf *m, struct tcphdr *th,
307     struct socket *so, struct tcpcb *tp, struct tcpopt *to,
308     uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
309 static int
310 rack_process_data(struct mbuf *m, struct tcphdr *th,
311     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
312     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
313 static void
314 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
315     struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
316 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
317 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
318     uint8_t limit_type);
319 static struct rack_sendmap *
320 rack_check_recovery_mode(struct tcpcb *tp,
321     uint32_t tsused);
322 static void
323 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
324     uint32_t type);
325 static void rack_counter_destroy(void);
326 static int
327 rack_ctloutput(struct socket *so, struct sockopt *sopt,
328     struct inpcb *inp, struct tcpcb *tp);
329 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
330 static void
331 rack_do_segment(struct mbuf *m, struct tcphdr *th,
332     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
333     uint8_t iptos);
334 static void rack_dtor(void *mem, int32_t size, void *arg);
335 static void
336 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
337     uint32_t t, uint32_t cts);
338 static struct rack_sendmap *
339 rack_find_high_nonack(struct tcp_rack *rack,
340     struct rack_sendmap *rsm);
341 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
342 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
343 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
344 static int
345 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
346     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
347 static int32_t rack_handoff_ok(struct tcpcb *tp);
348 static int32_t rack_init(struct tcpcb *tp);
349 static void rack_init_sysctls(void);
350 static void
351 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
352     struct tcphdr *th);
353 static void
354 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
355     uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
356     uint8_t pass, struct rack_sendmap *hintrsm);
357 static void
358 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
359     struct rack_sendmap *rsm);
360 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int num);
361 static int32_t rack_output(struct tcpcb *tp);
362 
363 static uint32_t
364 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
365     struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
366     uint32_t cts, int *moved_two);
367 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
368 static void rack_remxt_tmr(struct tcpcb *tp);
369 static int
370 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
371     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
372 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
373 static int32_t rack_stopall(struct tcpcb *tp);
374 static void
375 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
376     uint32_t delta);
377 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
378 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
379 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
380 static uint32_t
381 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
382     struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
383 static void
384 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
385     struct rack_sendmap *rsm, uint32_t ts);
386 static int
387 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
388     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type);
389 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
390 static int
391 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
392     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
393     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
394 static int
395 rack_do_closing(struct mbuf *m, struct tcphdr *th,
396     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
397     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
398 static int
399 rack_do_established(struct mbuf *m, struct tcphdr *th,
400     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
401     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
402 static int
403 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
404     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
405     int32_t tlen, uint32_t tiwin, int32_t nxt_pkt);
406 static int
407 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
408     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
409     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
410 static int
411 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
412     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
413     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
414 static int
415 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
416     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
417     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
418 static int
419 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
420     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
421     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
422 static int
423 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
424     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
425     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
426 struct rack_sendmap *
427 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
428     uint32_t tsused);
429 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt);
430 static void
431      tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
432 
433 int32_t rack_clear_counter=0;
434 
435 
436 static int
437 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
438 {
439 	uint32_t stat;
440 	int32_t error;
441 
442 	error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
443 	if (error || req->newptr == NULL)
444 		return error;
445 
446 	error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
447 	if (error)
448 		return (error);
449 	if (stat == 1) {
450 #ifdef INVARIANTS
451 		printf("Clearing RACK counters\n");
452 #endif
453 		counter_u64_zero(rack_badfr);
454 		counter_u64_zero(rack_badfr_bytes);
455 		counter_u64_zero(rack_rtm_prr_retran);
456 		counter_u64_zero(rack_rtm_prr_newdata);
457 		counter_u64_zero(rack_timestamp_mismatch);
458 		counter_u64_zero(rack_reorder_seen);
459 		counter_u64_zero(rack_tlp_tot);
460 		counter_u64_zero(rack_tlp_newdata);
461 		counter_u64_zero(rack_tlp_retran);
462 		counter_u64_zero(rack_tlp_retran_bytes);
463 		counter_u64_zero(rack_tlp_retran_fail);
464 		counter_u64_zero(rack_to_tot);
465 		counter_u64_zero(rack_to_arm_rack);
466 		counter_u64_zero(rack_to_arm_tlp);
467 		counter_u64_zero(rack_paced_segments);
468 		counter_u64_zero(rack_calc_zero);
469 		counter_u64_zero(rack_calc_nonzero);
470 		counter_u64_zero(rack_unpaced_segments);
471 		counter_u64_zero(rack_saw_enobuf);
472 		counter_u64_zero(rack_saw_enetunreach);
473 		counter_u64_zero(rack_per_timer_hole);
474 		counter_u64_zero(rack_to_alloc_hard);
475 		counter_u64_zero(rack_to_alloc_emerg);
476 		counter_u64_zero(rack_sack_proc_all);
477 		counter_u64_zero(rack_sack_proc_short);
478 		counter_u64_zero(rack_sack_proc_restart);
479 		counter_u64_zero(rack_to_alloc);
480 		counter_u64_zero(rack_to_alloc_limited);
481 		counter_u64_zero(rack_alloc_limited_conns);
482 		counter_u64_zero(rack_split_limited);
483 		counter_u64_zero(rack_find_high);
484 		counter_u64_zero(rack_tls_rwnd);
485 		counter_u64_zero(rack_tls_cwnd);
486 		counter_u64_zero(rack_tls_app);
487 		counter_u64_zero(rack_tls_other);
488 		counter_u64_zero(rack_tls_filled);
489 		counter_u64_zero(rack_tls_rxt);
490 		counter_u64_zero(rack_tls_tlp);
491 		counter_u64_zero(rack_sack_attacks_detected);
492 		counter_u64_zero(rack_sack_attacks_reversed);
493 		counter_u64_zero(rack_sack_used_next_merge);
494 		counter_u64_zero(rack_sack_used_prev_merge);
495 		counter_u64_zero(rack_sack_splits);
496 		counter_u64_zero(rack_sack_skipped_acked);
497 		counter_u64_zero(rack_ack_total);
498 		counter_u64_zero(rack_express_sack);
499 		counter_u64_zero(rack_sack_total);
500 		counter_u64_zero(rack_move_none);
501 		counter_u64_zero(rack_move_some);
502 		counter_u64_zero(rack_used_tlpmethod);
503 		counter_u64_zero(rack_used_tlpmethod2);
504 		counter_u64_zero(rack_enter_tlp_calc);
505 		counter_u64_zero(rack_progress_drops);
506 		counter_u64_zero(rack_tlp_does_nada);
507 		counter_u64_zero(rack_collapsed_win);
508 
509 	}
510 	rack_clear_counter = 0;
511 	return (0);
512 }
513 
514 
515 
516 static void
517 rack_init_sysctls(void)
518 {
519 	struct sysctl_oid *rack_counters;
520 	struct sysctl_oid *rack_attack;
521 
522 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
523 	    SYSCTL_CHILDREN(rack_sysctl_root),
524 	    OID_AUTO, "rate_sample_method", CTLFLAG_RW,
525 	    &rack_rate_sample_method , USE_RTT_LOW,
526 	    "What method should we use for rate sampling 0=high, 1=low ");
527 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
528 	    SYSCTL_CHILDREN(rack_sysctl_root),
529 	    OID_AUTO, "hw_tlsmax", CTLFLAG_RW,
530 	    &rack_hw_tls_max_seg , 0,
531 	    "Do we have a multplier of TLS records we can send as a max (0=1 TLS record)? ");
532 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
533 	    SYSCTL_CHILDREN(rack_sysctl_root),
534 	    OID_AUTO, "data_after_close", CTLFLAG_RW,
535 	    &rack_ignore_data_after_close, 0,
536 	    "Do we hold off sending a RST until all pending data is ack'd");
537 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
538 	    SYSCTL_CHILDREN(rack_sysctl_root),
539 	    OID_AUTO, "cheat_rxt", CTLFLAG_RW,
540 	    &use_rack_cheat, 1,
541 	    "Do we use the rxt cheat for rack?");
542 
543 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
544 	    SYSCTL_CHILDREN(rack_sysctl_root),
545 	    OID_AUTO, "persmin", CTLFLAG_RW,
546 	    &rack_persist_min, 250,
547 	    "What is the minimum time in milliseconds between persists");
548 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
549 	    SYSCTL_CHILDREN(rack_sysctl_root),
550 	    OID_AUTO, "persmax", CTLFLAG_RW,
551 	    &rack_persist_max, 1000,
552 	    "What is the largest delay in milliseconds between persists");
553 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
554 	    SYSCTL_CHILDREN(rack_sysctl_root),
555 	    OID_AUTO, "no_sack_needed", CTLFLAG_RW,
556 	    &rack_sack_not_required, 0,
557 	    "Do we allow rack to run on connections not supporting SACK?");
558 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
559 	    SYSCTL_CHILDREN(rack_sysctl_root),
560 	    OID_AUTO, "tlpmethod", CTLFLAG_RW,
561 	    &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
562 	    "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
563 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
564 	    SYSCTL_CHILDREN(rack_sysctl_root),
565 	    OID_AUTO, "gp_percentage", CTLFLAG_RW,
566 	    &rack_per_of_gp, 50,
567 	    "Do we pace to percentage of goodput (0=old method)?");
568 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
569 	    SYSCTL_CHILDREN(rack_sysctl_root),
570 	    OID_AUTO, "min_pace_time", CTLFLAG_RW,
571 	    &rack_min_pace_time, 0,
572 	    "Should we enforce a minimum pace time of 1ms");
573 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
574 	    SYSCTL_CHILDREN(rack_sysctl_root),
575 	    OID_AUTO, "bb_verbose", CTLFLAG_RW,
576 	    &rack_verbose_logging, 0,
577 	    "Should RACK black box logging be verbose");
578 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
579 	    SYSCTL_CHILDREN(rack_sysctl_root),
580 	    OID_AUTO, "sackfiltering", CTLFLAG_RW,
581 	    &rack_use_sack_filter, 1,
582 	    "Do we use sack filtering?");
583 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
584 	    SYSCTL_CHILDREN(rack_sysctl_root),
585 	    OID_AUTO, "delayed_ack", CTLFLAG_RW,
586 	    &rack_delayed_ack_time, 200,
587 	    "Delayed ack time (200ms)");
588 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
589 	    SYSCTL_CHILDREN(rack_sysctl_root),
590 	    OID_AUTO, "tlpminto", CTLFLAG_RW,
591 	    &rack_tlp_min, 10,
592 	    "TLP minimum timeout per the specification (10ms)");
593 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
594 	    SYSCTL_CHILDREN(rack_sysctl_root),
595 	    OID_AUTO, "send_oldest", CTLFLAG_RW,
596 	    &rack_always_send_oldest, 1,
597 	    "Should we always send the oldest TLP and RACK-TLP");
598 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
599 	    SYSCTL_CHILDREN(rack_sysctl_root),
600 	    OID_AUTO, "rack_tlimit", CTLFLAG_RW,
601 	    &rack_limited_retran, 0,
602 	    "How many times can a rack timeout drive out sends");
603 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
604 	    SYSCTL_CHILDREN(rack_sysctl_root),
605 	    OID_AUTO, "minrto", CTLFLAG_RW,
606 	    &rack_rto_min, 0,
607 	    "Minimum RTO in ms -- set with caution below 1000 due to TLP");
608 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
609 	    SYSCTL_CHILDREN(rack_sysctl_root),
610 	    OID_AUTO, "maxrto", CTLFLAG_RW,
611 	    &rack_rto_max, 0,
612 	    "Maxiumum RTO in ms -- should be at least as large as min_rto");
613 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
614 	    SYSCTL_CHILDREN(rack_sysctl_root),
615 	    OID_AUTO, "tlp_retry", CTLFLAG_RW,
616 	    &rack_tlp_max_resend, 2,
617 	    "How many times does TLP retry a single segment or multiple with no ACK");
618 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
619 	    SYSCTL_CHILDREN(rack_sysctl_root),
620 	    OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
621 	    &rack_use_proportional_reduce, 0,
622 	    "Should we proportionaly reduce cwnd based on the number of losses ");
623 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
624 	    SYSCTL_CHILDREN(rack_sysctl_root),
625 	    OID_AUTO, "recovery_prop", CTLFLAG_RW,
626 	    &rack_proportional_rate, 10,
627 	    "What percent reduction per loss");
628 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
629 	    SYSCTL_CHILDREN(rack_sysctl_root),
630 	    OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
631 	    &rack_lower_cwnd_at_tlp, 0,
632 	    "When a TLP completes a retran should we enter recovery?");
633 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
634 	    SYSCTL_CHILDREN(rack_sysctl_root),
635 	    OID_AUTO, "hptsi_reduces", CTLFLAG_RW,
636 	    &rack_slot_reduction, 4,
637 	    "When setting a slot should we reduce by divisor");
638 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
639 	    SYSCTL_CHILDREN(rack_sysctl_root),
640 	    OID_AUTO, "hptsi_every_seg", CTLFLAG_RW,
641 	    &rack_pace_every_seg, 0,
642 	    "Should we use the original pacing mechanism that did not pace much?");
643 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
644 	    SYSCTL_CHILDREN(rack_sysctl_root),
645 	    OID_AUTO, "hptsi_seg_max", CTLFLAG_RW,
646 	    &rack_hptsi_segments, 40,
647 	    "Should we pace out only a limited size of segments");
648 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
649 	    SYSCTL_CHILDREN(rack_sysctl_root),
650 	    OID_AUTO, "prr_sendalot", CTLFLAG_RW,
651 	    &rack_send_a_lot_in_prr, 1,
652 	    "Send a lot in prr");
653 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
654 	    SYSCTL_CHILDREN(rack_sysctl_root),
655 	    OID_AUTO, "minto", CTLFLAG_RW,
656 	    &rack_min_to, 1,
657 	    "Minimum rack timeout in milliseconds");
658 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
659 	    SYSCTL_CHILDREN(rack_sysctl_root),
660 	    OID_AUTO, "earlyrecovery", CTLFLAG_RW,
661 	    &rack_early_recovery, 1,
662 	    "Do we do early recovery with rack");
663 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
664 	    SYSCTL_CHILDREN(rack_sysctl_root),
665 	    OID_AUTO, "reorder_thresh", CTLFLAG_RW,
666 	    &rack_reorder_thresh, 2,
667 	    "What factor for rack will be added when seeing reordering (shift right)");
668 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
669 	    SYSCTL_CHILDREN(rack_sysctl_root),
670 	    OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
671 	    &rack_tlp_thresh, 1,
672 	    "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
673 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
674 	    SYSCTL_CHILDREN(rack_sysctl_root),
675 	    OID_AUTO, "reorder_fade", CTLFLAG_RW,
676 	    &rack_reorder_fade, 0,
677 	    "Does reorder detection fade, if so how many ms (0 means never)");
678 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
679 	    SYSCTL_CHILDREN(rack_sysctl_root),
680 	    OID_AUTO, "pktdelay", CTLFLAG_RW,
681 	    &rack_pkt_delay, 1,
682 	    "Extra RACK time (in ms) besides reordering thresh");
683 
684 	rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
685 	    SYSCTL_CHILDREN(rack_sysctl_root),
686 	    OID_AUTO,
687 	    "stats",
688 	    CTLFLAG_RW, 0,
689 	    "Rack Counters");
690 	rack_badfr = counter_u64_alloc(M_WAITOK);
691 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
692 	    SYSCTL_CHILDREN(rack_counters),
693 	    OID_AUTO, "badfr", CTLFLAG_RD,
694 	    &rack_badfr, "Total number of bad FRs");
695 	rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
696 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
697 	    SYSCTL_CHILDREN(rack_counters),
698 	    OID_AUTO, "badfr_bytes", CTLFLAG_RD,
699 	    &rack_badfr_bytes, "Total number of bad FRs");
700 	rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
701 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
702 	    SYSCTL_CHILDREN(rack_counters),
703 	    OID_AUTO, "prrsndret", CTLFLAG_RD,
704 	    &rack_rtm_prr_retran,
705 	    "Total number of prr based retransmits");
706 	rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
707 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
708 	    SYSCTL_CHILDREN(rack_counters),
709 	    OID_AUTO, "prrsndnew", CTLFLAG_RD,
710 	    &rack_rtm_prr_newdata,
711 	    "Total number of prr based new transmits");
712 	rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
713 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
714 	    SYSCTL_CHILDREN(rack_counters),
715 	    OID_AUTO, "tsnf", CTLFLAG_RD,
716 	    &rack_timestamp_mismatch,
717 	    "Total number of timestamps that we could not find the reported ts");
718 	rack_find_high = counter_u64_alloc(M_WAITOK);
719 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
720 	    SYSCTL_CHILDREN(rack_counters),
721 	    OID_AUTO, "findhigh", CTLFLAG_RD,
722 	    &rack_find_high,
723 	    "Total number of FIN causing find-high");
724 	rack_reorder_seen = counter_u64_alloc(M_WAITOK);
725 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
726 	    SYSCTL_CHILDREN(rack_counters),
727 	    OID_AUTO, "reordering", CTLFLAG_RD,
728 	    &rack_reorder_seen,
729 	    "Total number of times we added delay due to reordering");
730 	rack_tlp_tot = counter_u64_alloc(M_WAITOK);
731 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
732 	    SYSCTL_CHILDREN(rack_counters),
733 	    OID_AUTO, "tlp_to_total", CTLFLAG_RD,
734 	    &rack_tlp_tot,
735 	    "Total number of tail loss probe expirations");
736 	rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
737 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
738 	    SYSCTL_CHILDREN(rack_counters),
739 	    OID_AUTO, "tlp_new", CTLFLAG_RD,
740 	    &rack_tlp_newdata,
741 	    "Total number of tail loss probe sending new data");
742 
743 	rack_tlp_retran = counter_u64_alloc(M_WAITOK);
744 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
745 	    SYSCTL_CHILDREN(rack_counters),
746 	    OID_AUTO, "tlp_retran", CTLFLAG_RD,
747 	    &rack_tlp_retran,
748 	    "Total number of tail loss probe sending retransmitted data");
749 	rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
750 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
751 	    SYSCTL_CHILDREN(rack_counters),
752 	    OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
753 	    &rack_tlp_retran_bytes,
754 	    "Total bytes of tail loss probe sending retransmitted data");
755 	rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
756 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
757 	    SYSCTL_CHILDREN(rack_counters),
758 	    OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
759 	    &rack_tlp_retran_fail,
760 	    "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
761 	rack_to_tot = counter_u64_alloc(M_WAITOK);
762 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
763 	    SYSCTL_CHILDREN(rack_counters),
764 	    OID_AUTO, "rack_to_tot", CTLFLAG_RD,
765 	    &rack_to_tot,
766 	    "Total number of times the rack to expired?");
767 	rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
768 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
769 	    SYSCTL_CHILDREN(rack_counters),
770 	    OID_AUTO, "arm_rack", CTLFLAG_RD,
771 	    &rack_to_arm_rack,
772 	    "Total number of times the rack timer armed?");
773 	rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
774 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
775 	    SYSCTL_CHILDREN(rack_counters),
776 	    OID_AUTO, "arm_tlp", CTLFLAG_RD,
777 	    &rack_to_arm_tlp,
778 	    "Total number of times the tlp timer armed?");
779 
780 	rack_calc_zero = counter_u64_alloc(M_WAITOK);
781 	rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
782 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
783 	    SYSCTL_CHILDREN(rack_counters),
784 	    OID_AUTO, "calc_zero", CTLFLAG_RD,
785 	    &rack_calc_zero,
786 	    "Total number of times pacing time worked out to zero?");
787 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
788 	    SYSCTL_CHILDREN(rack_counters),
789 	    OID_AUTO, "calc_nonzero", CTLFLAG_RD,
790 	    &rack_calc_nonzero,
791 	    "Total number of times pacing time worked out to non-zero?");
792 	rack_paced_segments = counter_u64_alloc(M_WAITOK);
793 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
794 	    SYSCTL_CHILDREN(rack_counters),
795 	    OID_AUTO, "paced", CTLFLAG_RD,
796 	    &rack_paced_segments,
797 	    "Total number of times a segment send caused hptsi");
798 	rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
799 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
800 	    SYSCTL_CHILDREN(rack_counters),
801 	    OID_AUTO, "unpaced", CTLFLAG_RD,
802 	    &rack_unpaced_segments,
803 	    "Total number of times a segment did not cause hptsi");
804 	rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
805 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
806 	    SYSCTL_CHILDREN(rack_counters),
807 	    OID_AUTO, "saw_enobufs", CTLFLAG_RD,
808 	    &rack_saw_enobuf,
809 	    "Total number of times a segment did not cause hptsi");
810 	rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
811 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
812 	    SYSCTL_CHILDREN(rack_counters),
813 	    OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
814 	    &rack_saw_enetunreach,
815 	    "Total number of times a segment did not cause hptsi");
816 	rack_to_alloc = counter_u64_alloc(M_WAITOK);
817 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
818 	    SYSCTL_CHILDREN(rack_counters),
819 	    OID_AUTO, "allocs", CTLFLAG_RD,
820 	    &rack_to_alloc,
821 	    "Total allocations of tracking structures");
822 	rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
823 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
824 	    SYSCTL_CHILDREN(rack_counters),
825 	    OID_AUTO, "allochard", CTLFLAG_RD,
826 	    &rack_to_alloc_hard,
827 	    "Total allocations done with sleeping the hard way");
828 	rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
829 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
830 	    SYSCTL_CHILDREN(rack_counters),
831 	    OID_AUTO, "allocemerg", CTLFLAG_RD,
832 	    &rack_to_alloc_emerg,
833 	    "Total allocations done from emergency cache");
834 	rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
835 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
836 	    SYSCTL_CHILDREN(rack_counters),
837 	    OID_AUTO, "alloc_limited", CTLFLAG_RD,
838 	    &rack_to_alloc_limited,
839 	    "Total allocations dropped due to limit");
840 	rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
841 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
842 	    SYSCTL_CHILDREN(rack_counters),
843 	    OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
844 	    &rack_alloc_limited_conns,
845 	    "Connections with allocations dropped due to limit");
846 	rack_split_limited = counter_u64_alloc(M_WAITOK);
847 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
848 	    SYSCTL_CHILDREN(rack_counters),
849 	    OID_AUTO, "split_limited", CTLFLAG_RD,
850 	    &rack_split_limited,
851 	    "Split allocations dropped due to limit");
852 	rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
853 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
854 	    SYSCTL_CHILDREN(rack_counters),
855 	    OID_AUTO, "sack_long", CTLFLAG_RD,
856 	    &rack_sack_proc_all,
857 	    "Total times we had to walk whole list for sack processing");
858 
859 	rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
860 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
861 	    SYSCTL_CHILDREN(rack_counters),
862 	    OID_AUTO, "sack_restart", CTLFLAG_RD,
863 	    &rack_sack_proc_restart,
864 	    "Total times we had to walk whole list due to a restart");
865 	rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
866 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
867 	    SYSCTL_CHILDREN(rack_counters),
868 	    OID_AUTO, "sack_short", CTLFLAG_RD,
869 	    &rack_sack_proc_short,
870 	    "Total times we took shortcut for sack processing");
871 	rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
872 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
873 	    SYSCTL_CHILDREN(rack_counters),
874 	    OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
875 	    &rack_enter_tlp_calc,
876 	    "Total times we called calc-tlp");
877 	rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
878 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
879 	    SYSCTL_CHILDREN(rack_counters),
880 	    OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
881 	    &rack_used_tlpmethod,
882 	    "Total number of runt sacks");
883 	rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
884 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
885 	    SYSCTL_CHILDREN(rack_counters),
886 	    OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
887 	    &rack_used_tlpmethod2,
888 	    "Total number of times we hit TLP method 2");
889 	/* Sack Attacker detection stuff */
890 	rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
891 	    SYSCTL_CHILDREN(rack_sysctl_root),
892 	    OID_AUTO,
893 	    "sack_attack",
894 	    CTLFLAG_RW, 0,
895 	    "Rack Sack Attack Counters and Controls");
896 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
897 	    SYSCTL_CHILDREN(rack_attack),
898 	    OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
899 	    &rack_highest_sack_thresh_seen, 0,
900 	    "Highest sack to ack ratio seen");
901 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
902 	    SYSCTL_CHILDREN(rack_attack),
903 	    OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
904 	    &rack_highest_move_thresh_seen, 0,
905 	    "Highest move to non-move ratio seen");
906 	rack_ack_total = counter_u64_alloc(M_WAITOK);
907 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
908 	    SYSCTL_CHILDREN(rack_attack),
909 	    OID_AUTO, "acktotal", CTLFLAG_RD,
910 	    &rack_ack_total,
911 	    "Total number of Ack's");
912 
913 	rack_express_sack = counter_u64_alloc(M_WAITOK);
914 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
915 	    SYSCTL_CHILDREN(rack_attack),
916 	    OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
917 	    &rack_express_sack,
918 	    "Total expresss number of Sack's");
919 	rack_sack_total = counter_u64_alloc(M_WAITOK);
920 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
921 	    SYSCTL_CHILDREN(rack_attack),
922 	    OID_AUTO, "sacktotal", CTLFLAG_RD,
923 	    &rack_sack_total,
924 	    "Total number of SACK's");
925 	rack_move_none = counter_u64_alloc(M_WAITOK);
926 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
927 	    SYSCTL_CHILDREN(rack_attack),
928 	    OID_AUTO, "move_none", CTLFLAG_RD,
929 	    &rack_move_none,
930 	    "Total number of SACK index reuse of postions under threshold");
931 	rack_move_some = counter_u64_alloc(M_WAITOK);
932 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
933 	    SYSCTL_CHILDREN(rack_attack),
934 	    OID_AUTO, "move_some", CTLFLAG_RD,
935 	    &rack_move_some,
936 	    "Total number of SACK index reuse of postions over threshold");
937 	rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
938 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
939 	    SYSCTL_CHILDREN(rack_attack),
940 	    OID_AUTO, "attacks", CTLFLAG_RD,
941 	    &rack_sack_attacks_detected,
942 	    "Total number of SACK attackers that had sack disabled");
943 	rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
944 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
945 	    SYSCTL_CHILDREN(rack_attack),
946 	    OID_AUTO, "reversed", CTLFLAG_RD,
947 	    &rack_sack_attacks_reversed,
948 	    "Total number of SACK attackers that were later determined false positive");
949 	rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
950 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
951 	    SYSCTL_CHILDREN(rack_attack),
952 	    OID_AUTO, "nextmerge", CTLFLAG_RD,
953 	    &rack_sack_used_next_merge,
954 	    "Total number of times we used the next merge");
955 	rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
956 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
957 	    SYSCTL_CHILDREN(rack_attack),
958 	    OID_AUTO, "prevmerge", CTLFLAG_RD,
959 	    &rack_sack_used_prev_merge,
960 	    "Total number of times we used the prev merge");
961 	rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
962 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
963 	    SYSCTL_CHILDREN(rack_attack),
964 	    OID_AUTO, "skipacked", CTLFLAG_RD,
965 	    &rack_sack_skipped_acked,
966 	    "Total number of times we skipped previously sacked");
967 	rack_sack_splits = counter_u64_alloc(M_WAITOK);
968 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
969 	    SYSCTL_CHILDREN(rack_attack),
970 	    OID_AUTO, "ofsplit", CTLFLAG_RD,
971 	    &rack_sack_splits,
972 	    "Total number of times we did the old fashion tree split");
973 	rack_progress_drops = counter_u64_alloc(M_WAITOK);
974 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
975 	    SYSCTL_CHILDREN(rack_counters),
976 	    OID_AUTO, "prog_drops", CTLFLAG_RD,
977 	    &rack_progress_drops,
978 	    "Total number of progress drops");
979 	rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
980 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
981 	    SYSCTL_CHILDREN(rack_counters),
982 	    OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
983 	    &rack_input_idle_reduces,
984 	    "Total number of idle reductions on input");
985 	rack_collapsed_win = counter_u64_alloc(M_WAITOK);
986 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
987 	    SYSCTL_CHILDREN(rack_counters),
988 	    OID_AUTO, "collapsed_win", CTLFLAG_RD,
989 	    &rack_collapsed_win,
990 	    "Total number of collapsed windows");
991 	rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
992 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
993 	    SYSCTL_CHILDREN(rack_counters),
994 	    OID_AUTO, "tlp_nada", CTLFLAG_RD,
995 	    &rack_tlp_does_nada,
996 	    "Total number of nada tlp calls");
997 
998 	rack_tls_rwnd = counter_u64_alloc(M_WAITOK);
999 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1000 	    SYSCTL_CHILDREN(rack_counters),
1001 	    OID_AUTO, "tls_rwnd", CTLFLAG_RD,
1002 	    &rack_tls_rwnd,
1003 	    "Total hdwr tls rwnd limited");
1004 
1005 	rack_tls_cwnd = counter_u64_alloc(M_WAITOK);
1006 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1007 	    SYSCTL_CHILDREN(rack_counters),
1008 	    OID_AUTO, "tls_cwnd", CTLFLAG_RD,
1009 	    &rack_tls_cwnd,
1010 	    "Total hdwr tls cwnd limited");
1011 
1012 	rack_tls_app = counter_u64_alloc(M_WAITOK);
1013 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1014 	    SYSCTL_CHILDREN(rack_counters),
1015 	    OID_AUTO, "tls_app", CTLFLAG_RD,
1016 	    &rack_tls_app,
1017 	    "Total hdwr tls app limited");
1018 
1019 	rack_tls_other = counter_u64_alloc(M_WAITOK);
1020 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1021 	    SYSCTL_CHILDREN(rack_counters),
1022 	    OID_AUTO, "tls_other", CTLFLAG_RD,
1023 	    &rack_tls_other,
1024 	    "Total hdwr tls other limited");
1025 
1026 	rack_tls_filled = counter_u64_alloc(M_WAITOK);
1027 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1028 	    SYSCTL_CHILDREN(rack_counters),
1029 	    OID_AUTO, "tls_filled", CTLFLAG_RD,
1030 	    &rack_tls_filled,
1031 	    "Total hdwr tls filled");
1032 
1033 	rack_tls_rxt = counter_u64_alloc(M_WAITOK);
1034 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1035 	    SYSCTL_CHILDREN(rack_counters),
1036 	    OID_AUTO, "tls_rxt", CTLFLAG_RD,
1037 	    &rack_tls_rxt,
1038 	    "Total hdwr rxt");
1039 
1040 	rack_tls_tlp = counter_u64_alloc(M_WAITOK);
1041 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1042 	    SYSCTL_CHILDREN(rack_counters),
1043 	    OID_AUTO, "tls_tlp", CTLFLAG_RD,
1044 	    &rack_tls_tlp,
1045 	    "Total hdwr tls tlp");
1046 	rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1047 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1048 	    SYSCTL_CHILDREN(rack_counters),
1049 	    OID_AUTO, "timer_hole", CTLFLAG_RD,
1050 	    &rack_per_timer_hole,
1051 	    "Total persists start in timer hole");
1052 
1053 	COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1054 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1055 	    OID_AUTO, "outsize", CTLFLAG_RD,
1056 	    rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1057 	COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1058 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1059 	    OID_AUTO, "opts", CTLFLAG_RD,
1060 	    rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1061 	SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1062 	    SYSCTL_CHILDREN(rack_sysctl_root),
1063 	    OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1064 	    &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1065 }
1066 
1067 static __inline int
1068 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1069 {
1070 	if (SEQ_GEQ(b->r_start, a->r_start) &&
1071 	    SEQ_LT(b->r_start, a->r_end)) {
1072 		/*
1073 		 * The entry b is within the
1074 		 * block a. i.e.:
1075 		 * a --   |-------------|
1076 		 * b --   |----|
1077 		 * <or>
1078 		 * b --       |------|
1079 		 * <or>
1080 		 * b --       |-----------|
1081 		 */
1082 		return (0);
1083 	} else if (SEQ_GEQ(b->r_start, a->r_end)) {
1084 		/*
1085 		 * b falls as either the next
1086 		 * sequence block after a so a
1087 		 * is said to be smaller than b.
1088 		 * i.e:
1089 		 * a --   |------|
1090 		 * b --          |--------|
1091 		 * or
1092 		 * b --              |-----|
1093 		 */
1094 		return (1);
1095 	}
1096 	/*
1097 	 * Whats left is where a is
1098 	 * larger than b. i.e:
1099 	 * a --         |-------|
1100 	 * b --  |---|
1101 	 * or even possibly
1102 	 * b --   |--------------|
1103 	 */
1104 	return (-1);
1105 }
1106 
1107 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1108 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1109 
1110 static inline int32_t
1111 rack_progress_timeout_check(struct tcpcb *tp)
1112 {
1113 	if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) {
1114 		if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) {
1115 			/*
1116 			 * There is an assumption that the caller
1117 			 * will drop the connection so we will
1118 			 * increment the counters here.
1119 			 */
1120 			struct tcp_rack *rack;
1121 			rack = (struct tcp_rack *)tp->t_fb_ptr;
1122 			counter_u64_add(rack_progress_drops, 1);
1123 #ifdef NETFLIX_STATS
1124 			TCPSTAT_INC(tcps_progdrops);
1125 #endif
1126 			rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__);
1127 			return (1);
1128 		}
1129 	}
1130 	return (0);
1131 }
1132 
1133 
1134 
1135 static void
1136 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
1137 {
1138 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1139 		union tcp_log_stackspecific log;
1140 		struct timeval tv;
1141 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1142 		log.u_bbr.flex1 = tsused;
1143 		log.u_bbr.flex2 = thresh;
1144 		log.u_bbr.flex3 = rsm->r_flags;
1145 		log.u_bbr.flex4 = rsm->r_dupack;
1146 		log.u_bbr.flex5 = rsm->r_start;
1147 		log.u_bbr.flex6 = rsm->r_end;
1148 		log.u_bbr.flex8 = mod;
1149 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1150 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1151 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1152 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1153 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1154 		    &rack->rc_inp->inp_socket->so_rcv,
1155 		    &rack->rc_inp->inp_socket->so_snd,
1156 		    BBR_LOG_SETTINGS_CHG, 0,
1157 		    0, &log, false, &tv);
1158 	}
1159 }
1160 
1161 
1162 
1163 static void
1164 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
1165 {
1166 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1167 		union tcp_log_stackspecific log;
1168 		struct timeval tv;
1169 
1170 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1171 		log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
1172 		log.u_bbr.flex2 = to;
1173 		log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1174 		log.u_bbr.flex4 = slot;
1175 		log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
1176 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1177 		log.u_bbr.flex7 = rack->rc_in_persist;
1178 		log.u_bbr.flex8 = which;
1179 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1180 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1181 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1182 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1183 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1184 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1185 		    &rack->rc_inp->inp_socket->so_rcv,
1186 		    &rack->rc_inp->inp_socket->so_snd,
1187 		    BBR_LOG_TIMERSTAR, 0,
1188 		    0, &log, false, &tv);
1189 	}
1190 }
1191 
1192 static void
1193 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int no)
1194 {
1195 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1196 		union tcp_log_stackspecific log;
1197 		struct timeval tv;
1198 
1199 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1200 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1201 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1202 		log.u_bbr.flex8 = to_num;
1203 		log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
1204 		log.u_bbr.flex2 = rack->rc_rack_rtt;
1205 		log.u_bbr.flex3 = no;
1206 		log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1207 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1208 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1209 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1210 		    &rack->rc_inp->inp_socket->so_rcv,
1211 		    &rack->rc_inp->inp_socket->so_snd,
1212 		    BBR_LOG_RTO, 0,
1213 		    0, &log, false, &tv);
1214 	}
1215 }
1216 
1217 static void
1218 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
1219     uint32_t o_srtt, uint32_t o_var)
1220 {
1221 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1222 		union tcp_log_stackspecific log;
1223 		struct timeval tv;
1224 
1225 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1226 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1227 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1228 		log.u_bbr.flex1 = t;
1229 		log.u_bbr.flex2 = o_srtt;
1230 		log.u_bbr.flex3 = o_var;
1231 		log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
1232 		log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
1233 		log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
1234 		log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
1235 		log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
1236 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
1237 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1238 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1239 		TCP_LOG_EVENTP(tp, NULL,
1240 		    &rack->rc_inp->inp_socket->so_rcv,
1241 		    &rack->rc_inp->inp_socket->so_snd,
1242 		    BBR_LOG_BBRRTT, 0,
1243 		    0, &log, false, &tv);
1244 	}
1245 }
1246 
1247 static void
1248 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
1249 {
1250 	/*
1251 	 * Log the rtt sample we are
1252 	 * applying to the srtt algorithm in
1253 	 * useconds.
1254 	 */
1255 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1256 		union tcp_log_stackspecific log;
1257 		struct timeval tv;
1258 
1259 		/* Convert our ms to a microsecond */
1260 		memset(&log, 0, sizeof(log));
1261 		log.u_bbr.flex1 = rtt * 1000;
1262 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
1263 		log.u_bbr.flex3 = rack->r_ctl.sack_count;
1264 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
1265 		log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
1266 		log.u_bbr.flex8 = rack->sack_attack_disable;
1267 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1268 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1269 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1270 		    &rack->rc_inp->inp_socket->so_rcv,
1271 		    &rack->rc_inp->inp_socket->so_snd,
1272 		    TCP_LOG_RTT, 0,
1273 		    0, &log, false, &tv);
1274 	}
1275 }
1276 
1277 
1278 static inline void
1279 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line)
1280 {
1281 	if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1282 		union tcp_log_stackspecific log;
1283 		struct timeval tv;
1284 
1285 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1286 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1287 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1288 		log.u_bbr.flex1 = line;
1289 		log.u_bbr.flex2 = tick;
1290 		log.u_bbr.flex3 = tp->t_maxunacktime;
1291 		log.u_bbr.flex4 = tp->t_acktime;
1292 		log.u_bbr.flex8 = event;
1293 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1294 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1295 		TCP_LOG_EVENTP(tp, NULL,
1296 		    &rack->rc_inp->inp_socket->so_rcv,
1297 		    &rack->rc_inp->inp_socket->so_snd,
1298 		    BBR_LOG_PROGRESS, 0,
1299 		    0, &log, false, &tv);
1300 	}
1301 }
1302 
1303 static void
1304 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts)
1305 {
1306 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1307 		union tcp_log_stackspecific log;
1308 		struct timeval tv;
1309 
1310 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1311 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1312 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1313 		log.u_bbr.flex1 = slot;
1314 		log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
1315 		log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
1316 		log.u_bbr.flex8 = rack->rc_in_persist;
1317 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1318 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1319 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1320 		    &rack->rc_inp->inp_socket->so_rcv,
1321 		    &rack->rc_inp->inp_socket->so_snd,
1322 		    BBR_LOG_BBRSND, 0,
1323 		    0, &log, false, &tv);
1324 	}
1325 }
1326 
1327 static void
1328 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
1329 {
1330 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1331 		union tcp_log_stackspecific log;
1332 		struct timeval tv;
1333 
1334 		memset(&log, 0, sizeof(log));
1335 		log.u_bbr.flex1 = did_out;
1336 		log.u_bbr.flex2 = nxt_pkt;
1337 		log.u_bbr.flex3 = way_out;
1338 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1339 		log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1340 		log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
1341 		log.u_bbr.flex7 = rack->r_wanted_output;
1342 		log.u_bbr.flex8 = rack->rc_in_persist;
1343 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1344 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1345 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1346 		    &rack->rc_inp->inp_socket->so_rcv,
1347 		    &rack->rc_inp->inp_socket->so_snd,
1348 		    BBR_LOG_DOSEG_DONE, 0,
1349 		    0, &log, false, &tv);
1350 	}
1351 }
1352 
1353 static void
1354 rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod, int32_t orig_len, int frm)
1355 {
1356 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
1357 		union tcp_log_stackspecific log;
1358 		struct timeval tv;
1359 		uint32_t cts;
1360 
1361 		memset(&log, 0, sizeof(log));
1362 		cts = tcp_get_usecs(&tv);
1363 		log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
1364 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
1365 		log.u_bbr.flex4 = len;
1366 		log.u_bbr.flex5 = orig_len;
1367 		log.u_bbr.flex6 = rack->r_ctl.rc_sacked;
1368 		log.u_bbr.flex7 = mod;
1369 		log.u_bbr.flex8 = frm;
1370 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1371 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1372 		TCP_LOG_EVENTP(tp, NULL,
1373 		    &tp->t_inpcb->inp_socket->so_rcv,
1374 		    &tp->t_inpcb->inp_socket->so_snd,
1375 		    TCP_HDWR_TLS, 0,
1376 		    0, &log, false, &tv);
1377 	}
1378 }
1379 
1380 static void
1381 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
1382 {
1383 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1384 		union tcp_log_stackspecific log;
1385 		struct timeval tv;
1386 
1387 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1388 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1389 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1390 		log.u_bbr.flex1 = slot;
1391 		log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
1392 		log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1393 		log.u_bbr.flex7 = hpts_calling;
1394 		log.u_bbr.flex8 = rack->rc_in_persist;
1395 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1396 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1397 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1398 		    &rack->rc_inp->inp_socket->so_rcv,
1399 		    &rack->rc_inp->inp_socket->so_snd,
1400 		    BBR_LOG_JUSTRET, 0,
1401 		    tlen, &log, false, &tv);
1402 	}
1403 }
1404 
1405 static void
1406 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line)
1407 {
1408 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1409 		union tcp_log_stackspecific log;
1410 		struct timeval tv;
1411 
1412 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1413 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1414 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1415 		log.u_bbr.flex1 = line;
1416 		log.u_bbr.flex2 = 0;
1417 		log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1418 		log.u_bbr.flex4 = 0;
1419 		log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
1420 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1421 		log.u_bbr.flex8 = hpts_removed;
1422 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1423 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1424 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1425 		    &rack->rc_inp->inp_socket->so_rcv,
1426 		    &rack->rc_inp->inp_socket->so_snd,
1427 		    BBR_LOG_TIMERCANC, 0,
1428 		    0, &log, false, &tv);
1429 	}
1430 }
1431 
1432 static void
1433 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
1434 {
1435 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1436 		union tcp_log_stackspecific log;
1437 		struct timeval tv;
1438 
1439 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1440 		log.u_bbr.flex1 = timers;
1441 		log.u_bbr.flex2 = ret;
1442 		log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
1443 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1444 		log.u_bbr.flex5 = cts;
1445 		log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
1446 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1447 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1448 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1449 		    &rack->rc_inp->inp_socket->so_rcv,
1450 		    &rack->rc_inp->inp_socket->so_snd,
1451 		    BBR_LOG_TO_PROCESS, 0,
1452 		    0, &log, false, &tv);
1453 	}
1454 }
1455 
1456 static void
1457 rack_log_to_prr(struct tcp_rack *rack, int frm)
1458 {
1459 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1460 		union tcp_log_stackspecific log;
1461 		struct timeval tv;
1462 
1463 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1464 		log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
1465 		log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
1466 		log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
1467 		log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
1468 		log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
1469 		log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
1470 		log.u_bbr.flex8 = frm;
1471 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1472 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1473 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1474 		    &rack->rc_inp->inp_socket->so_rcv,
1475 		    &rack->rc_inp->inp_socket->so_snd,
1476 		    BBR_LOG_BBRUPD, 0,
1477 		    0, &log, false, &tv);
1478 	}
1479 }
1480 
1481 #ifdef NETFLIX_EXP_DETECTION
1482 static void
1483 rack_log_sad(struct tcp_rack *rack, int event)
1484 {
1485 	if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1486 		union tcp_log_stackspecific log;
1487 		struct timeval tv;
1488 
1489 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1490 		log.u_bbr.flex1 = rack->r_ctl.sack_count;
1491 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
1492 		log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
1493 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
1494 		log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
1495 		log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
1496 		log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
1497 		log.u_bbr.lt_epoch = (tcp_force_detection << 8);
1498 		log.u_bbr.lt_epoch |= rack->do_detection;
1499 		log.u_bbr.applimited = tcp_map_minimum;
1500 		log.u_bbr.flex7 = rack->sack_attack_disable;
1501 		log.u_bbr.flex8 = event;
1502 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1503 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
1504 		log.u_bbr.delivered = tcp_sad_decay_val;
1505 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
1506 		    &rack->rc_inp->inp_socket->so_rcv,
1507 		    &rack->rc_inp->inp_socket->so_snd,
1508 		    TCP_SAD_DETECTION, 0,
1509 		    0, &log, false, &tv);
1510 	}
1511 }
1512 #endif
1513 
1514 static void
1515 rack_counter_destroy(void)
1516 {
1517 	counter_u64_free(rack_badfr);
1518 	counter_u64_free(rack_badfr_bytes);
1519 	counter_u64_free(rack_rtm_prr_retran);
1520 	counter_u64_free(rack_rtm_prr_newdata);
1521 	counter_u64_free(rack_timestamp_mismatch);
1522 	counter_u64_free(rack_reorder_seen);
1523 	counter_u64_free(rack_tlp_tot);
1524 	counter_u64_free(rack_tlp_newdata);
1525 	counter_u64_free(rack_tlp_retran);
1526 	counter_u64_free(rack_tlp_retran_bytes);
1527 	counter_u64_free(rack_tlp_retran_fail);
1528 	counter_u64_free(rack_to_tot);
1529 	counter_u64_free(rack_to_arm_rack);
1530 	counter_u64_free(rack_to_arm_tlp);
1531 	counter_u64_free(rack_paced_segments);
1532 	counter_u64_free(rack_unpaced_segments);
1533 	counter_u64_free(rack_saw_enobuf);
1534 	counter_u64_free(rack_saw_enetunreach);
1535 	counter_u64_free(rack_to_alloc_hard);
1536 	counter_u64_free(rack_to_alloc_emerg);
1537 	counter_u64_free(rack_sack_proc_all);
1538 	counter_u64_free(rack_sack_proc_short);
1539 	counter_u64_free(rack_sack_proc_restart);
1540 	counter_u64_free(rack_to_alloc);
1541 	counter_u64_free(rack_to_alloc_limited);
1542 	counter_u64_free(rack_alloc_limited_conns);
1543 	counter_u64_free(rack_split_limited);
1544 	counter_u64_free(rack_find_high);
1545 	counter_u64_free(rack_enter_tlp_calc);
1546 	counter_u64_free(rack_used_tlpmethod);
1547 	counter_u64_free(rack_used_tlpmethod2);
1548 	counter_u64_free(rack_progress_drops);
1549 	counter_u64_free(rack_input_idle_reduces);
1550 	counter_u64_free(rack_collapsed_win);
1551 	counter_u64_free(rack_tlp_does_nada);
1552 	COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
1553 	COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
1554 }
1555 
1556 static struct rack_sendmap *
1557 rack_alloc(struct tcp_rack *rack)
1558 {
1559 	struct rack_sendmap *rsm;
1560 
1561 	rsm = uma_zalloc(rack_zone, M_NOWAIT);
1562 	if (rsm) {
1563 		rack->r_ctl.rc_num_maps_alloced++;
1564 		counter_u64_add(rack_to_alloc, 1);
1565 		return (rsm);
1566 	}
1567 	if (rack->rc_free_cnt) {
1568 		counter_u64_add(rack_to_alloc_emerg, 1);
1569 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
1570 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
1571 		rack->rc_free_cnt--;
1572 		return (rsm);
1573 	}
1574 	return (NULL);
1575 }
1576 
1577 static struct rack_sendmap *
1578 rack_alloc_full_limit(struct tcp_rack *rack)
1579 {
1580 	if ((rack_tcp_map_entries_limit > 0) &&
1581 	    (rack->do_detection == 0) &&
1582 	    (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) {
1583 		counter_u64_add(rack_to_alloc_limited, 1);
1584 		if (!rack->alloc_limit_reported) {
1585 			rack->alloc_limit_reported = 1;
1586 			counter_u64_add(rack_alloc_limited_conns, 1);
1587 		}
1588 		return (NULL);
1589 	}
1590 	return (rack_alloc(rack));
1591 }
1592 
1593 /* wrapper to allocate a sendmap entry, subject to a specific limit */
1594 static struct rack_sendmap *
1595 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
1596 {
1597 	struct rack_sendmap *rsm;
1598 
1599 	if (limit_type) {
1600 		/* currently there is only one limit type */
1601 		if (rack_tcp_map_split_limit > 0 &&
1602 		    (rack->do_detection == 0) &&
1603 		    rack->r_ctl.rc_num_split_allocs >= rack_tcp_map_split_limit) {
1604 			counter_u64_add(rack_split_limited, 1);
1605 			if (!rack->alloc_limit_reported) {
1606 				rack->alloc_limit_reported = 1;
1607 				counter_u64_add(rack_alloc_limited_conns, 1);
1608 			}
1609 			return (NULL);
1610 		}
1611 	}
1612 
1613 	/* allocate and mark in the limit type, if set */
1614 	rsm = rack_alloc(rack);
1615 	if (rsm != NULL && limit_type) {
1616 		rsm->r_limit_type = limit_type;
1617 		rack->r_ctl.rc_num_split_allocs++;
1618 	}
1619 	return (rsm);
1620 }
1621 
1622 static void
1623 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
1624 {
1625 	if (rsm->r_limit_type) {
1626 		/* currently there is only one limit type */
1627 		rack->r_ctl.rc_num_split_allocs--;
1628 	}
1629 	if (rack->r_ctl.rc_tlpsend == rsm)
1630 		rack->r_ctl.rc_tlpsend = NULL;
1631 	if (rack->r_ctl.rc_sacklast == rsm)
1632 		rack->r_ctl.rc_sacklast = NULL;
1633 	if (rack->rc_free_cnt < rack_free_cache) {
1634 		memset(rsm, 0, sizeof(struct rack_sendmap));
1635 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
1636 		rsm->r_limit_type = 0;
1637 		rack->rc_free_cnt++;
1638 		return;
1639 	}
1640 	rack->r_ctl.rc_num_maps_alloced--;
1641 	uma_zfree(rack_zone, rsm);
1642 }
1643 
1644 /*
1645  * CC wrapper hook functions
1646  */
1647 static void
1648 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
1649     uint16_t type, int32_t recovery)
1650 {
1651 #ifdef NETFLIX_STATS
1652 	int32_t gput;
1653 #endif
1654 
1655 	INP_WLOCK_ASSERT(tp->t_inpcb);
1656 	tp->ccv->nsegs = nsegs;
1657 	tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
1658 	if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
1659 		uint32_t max;
1660 
1661 		max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
1662 		if (tp->ccv->bytes_this_ack > max) {
1663 			tp->ccv->bytes_this_ack = max;
1664 		}
1665 	}
1666 	if (tp->snd_cwnd <= tp->snd_wnd)
1667 		tp->ccv->flags |= CCF_CWND_LIMITED;
1668 	else
1669 		tp->ccv->flags &= ~CCF_CWND_LIMITED;
1670 
1671 	if (type == CC_ACK) {
1672 #ifdef NETFLIX_STATS
1673 		stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
1674 		    ((int32_t) tp->snd_cwnd) - tp->snd_wnd);
1675 		if ((tp->t_flags & TF_GPUTINPROG) &&
1676 		    SEQ_GEQ(th->th_ack, tp->gput_ack)) {
1677 			gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) /
1678 			    max(1, tcp_ts_getticks() - tp->gput_ts);
1679 			/* We store it in bytes per ms (or kbytes per sec) */
1680 			rack->r_ctl.rc_gp_history[rack->r_ctl.rc_gp_hist_idx] = gput / 8;
1681 			rack->r_ctl.rc_gp_hist_idx++;
1682 			if (rack->r_ctl.rc_gp_hist_idx >= RACK_GP_HIST)
1683 				rack->r_ctl.rc_gp_hist_filled = 1;
1684 			rack->r_ctl.rc_gp_hist_idx %= RACK_GP_HIST;
1685 			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
1686 			    gput);
1687 			/*
1688 			 * XXXLAS: This is a temporary hack, and should be
1689 			 * chained off VOI_TCP_GPUT when stats(9) grows an
1690 			 * API to deal with chained VOIs.
1691 			 */
1692 			if (tp->t_stats_gput_prev > 0)
1693 				stats_voi_update_abs_s32(tp->t_stats,
1694 				    VOI_TCP_GPUT_ND,
1695 				    ((gput - tp->t_stats_gput_prev) * 100) /
1696 				    tp->t_stats_gput_prev);
1697 			tp->t_flags &= ~TF_GPUTINPROG;
1698 			tp->t_stats_gput_prev = gput;
1699 
1700 			if (tp->t_maxpeakrate) {
1701 				/*
1702 				 * We update t_peakrate_thr. This gives us roughly
1703 				 * one update per round trip time.
1704 				 */
1705 				tcp_update_peakrate_thr(tp);
1706 			}
1707 		}
1708 #endif
1709 		if (tp->snd_cwnd > tp->snd_ssthresh) {
1710 			tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
1711 			    nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
1712 			if (tp->t_bytes_acked >= tp->snd_cwnd) {
1713 				tp->t_bytes_acked -= tp->snd_cwnd;
1714 				tp->ccv->flags |= CCF_ABC_SENTAWND;
1715 			}
1716 		} else {
1717 			tp->ccv->flags &= ~CCF_ABC_SENTAWND;
1718 			tp->t_bytes_acked = 0;
1719 		}
1720 	}
1721 	if (CC_ALGO(tp)->ack_received != NULL) {
1722 		/* XXXLAS: Find a way to live without this */
1723 		tp->ccv->curack = th->th_ack;
1724 		CC_ALGO(tp)->ack_received(tp->ccv, type);
1725 	}
1726 #ifdef NETFLIX_STATS
1727 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
1728 #endif
1729 	if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) {
1730 		rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd;
1731 	}
1732 	/* we enforce max peak rate if it is set. */
1733 	if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) {
1734 		tp->snd_cwnd = tp->t_peakrate_thr;
1735 	}
1736 }
1737 
1738 static void
1739 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
1740 {
1741 	struct tcp_rack *rack;
1742 
1743 	rack = (struct tcp_rack *)tp->t_fb_ptr;
1744 	INP_WLOCK_ASSERT(tp->t_inpcb);
1745 	if (rack->r_ctl.rc_prr_sndcnt > 0)
1746 		rack->r_wanted_output++;
1747 }
1748 
1749 static void
1750 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
1751 {
1752 	struct tcp_rack *rack;
1753 
1754 	INP_WLOCK_ASSERT(tp->t_inpcb);
1755 	rack = (struct tcp_rack *)tp->t_fb_ptr;
1756 	if (CC_ALGO(tp)->post_recovery != NULL) {
1757 		tp->ccv->curack = th->th_ack;
1758 		CC_ALGO(tp)->post_recovery(tp->ccv);
1759 	}
1760 	/*
1761 	 * Here we can in theory adjust cwnd to be based on the number of
1762 	 * losses in the window (rack->r_ctl.rc_loss_count). This is done
1763 	 * based on the rack_use_proportional flag.
1764 	 */
1765 	if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) {
1766 		int32_t reduce;
1767 
1768 		reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate);
1769 		if (reduce > 50) {
1770 			reduce = 50;
1771 		}
1772 		tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100);
1773 	} else {
1774 		if (tp->snd_cwnd > tp->snd_ssthresh) {
1775 			/* Drop us down to the ssthresh (1/2 cwnd at loss) */
1776 			tp->snd_cwnd = tp->snd_ssthresh;
1777 		}
1778 	}
1779 	if (rack->r_ctl.rc_prr_sndcnt > 0) {
1780 		/* Suck the next prr cnt back into cwnd */
1781 		tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
1782 		rack->r_ctl.rc_prr_sndcnt = 0;
1783 		rack_log_to_prr(rack, 1);
1784 	}
1785 	tp->snd_recover = tp->snd_una;
1786 	EXIT_RECOVERY(tp->t_flags);
1787 
1788 
1789 }
1790 
1791 static void
1792 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
1793 {
1794 	struct tcp_rack *rack;
1795 
1796 	INP_WLOCK_ASSERT(tp->t_inpcb);
1797 
1798 	rack = (struct tcp_rack *)tp->t_fb_ptr;
1799 	switch (type) {
1800 	case CC_NDUPACK:
1801 		tp->t_flags &= ~TF_WASFRECOVERY;
1802 		tp->t_flags &= ~TF_WASCRECOVERY;
1803 		if (!IN_FASTRECOVERY(tp->t_flags)) {
1804 			rack->r_ctl.rc_tlp_rtx_out = 0;
1805 			rack->r_ctl.rc_prr_delivered = 0;
1806 			rack->r_ctl.rc_prr_out = 0;
1807 			rack->r_ctl.rc_loss_count = 0;
1808 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
1809 			rack_log_to_prr(rack, 2);
1810 			rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
1811 			tp->snd_recover = tp->snd_max;
1812 			if (tp->t_flags & TF_ECN_PERMIT)
1813 				tp->t_flags |= TF_ECN_SND_CWR;
1814 		}
1815 		break;
1816 	case CC_ECN:
1817 		if (!IN_CONGRECOVERY(tp->t_flags)) {
1818 			TCPSTAT_INC(tcps_ecn_rcwnd);
1819 			tp->snd_recover = tp->snd_max;
1820 			if (tp->t_flags & TF_ECN_PERMIT)
1821 				tp->t_flags |= TF_ECN_SND_CWR;
1822 		}
1823 		break;
1824 	case CC_RTO:
1825 		tp->t_dupacks = 0;
1826 		tp->t_bytes_acked = 0;
1827 		EXIT_RECOVERY(tp->t_flags);
1828 		tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1829 		    ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
1830 		tp->snd_cwnd = ctf_fixed_maxseg(tp);
1831 		break;
1832 	case CC_RTO_ERR:
1833 		TCPSTAT_INC(tcps_sndrexmitbad);
1834 		/* RTO was unnecessary, so reset everything. */
1835 		tp->snd_cwnd = tp->snd_cwnd_prev;
1836 		tp->snd_ssthresh = tp->snd_ssthresh_prev;
1837 		tp->snd_recover = tp->snd_recover_prev;
1838 		if (tp->t_flags & TF_WASFRECOVERY) {
1839 			ENTER_FASTRECOVERY(tp->t_flags);
1840 			tp->t_flags &= ~TF_WASFRECOVERY;
1841 		}
1842 		if (tp->t_flags & TF_WASCRECOVERY) {
1843 			ENTER_CONGRECOVERY(tp->t_flags);
1844 			tp->t_flags &= ~TF_WASCRECOVERY;
1845 		}
1846 		tp->snd_nxt = tp->snd_max;
1847 		tp->t_badrxtwin = 0;
1848 		break;
1849 	}
1850 
1851 	if (CC_ALGO(tp)->cong_signal != NULL) {
1852 		if (th != NULL)
1853 			tp->ccv->curack = th->th_ack;
1854 		CC_ALGO(tp)->cong_signal(tp->ccv, type);
1855 	}
1856 }
1857 
1858 
1859 
1860 static inline void
1861 rack_cc_after_idle(struct tcpcb *tp)
1862 {
1863 	uint32_t i_cwnd;
1864 
1865 	INP_WLOCK_ASSERT(tp->t_inpcb);
1866 
1867 #ifdef NETFLIX_STATS
1868 	TCPSTAT_INC(tcps_idle_restarts);
1869 	if (tp->t_state == TCPS_ESTABLISHED)
1870 		TCPSTAT_INC(tcps_idle_estrestarts);
1871 #endif
1872 	if (CC_ALGO(tp)->after_idle != NULL)
1873 		CC_ALGO(tp)->after_idle(tp->ccv);
1874 
1875 	if (tp->snd_cwnd == 1)
1876 		i_cwnd = tp->t_maxseg;		/* SYN(-ACK) lost */
1877 	else
1878 		i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp));
1879 
1880 	/*
1881 	 * Being idle is no differnt than the initial window. If the cc
1882 	 * clamps it down below the initial window raise it to the initial
1883 	 * window.
1884 	 */
1885 	if (tp->snd_cwnd < i_cwnd) {
1886 		tp->snd_cwnd = i_cwnd;
1887 	}
1888 }
1889 
1890 
1891 /*
1892  * Indicate whether this ack should be delayed.  We can delay the ack if
1893  * following conditions are met:
1894  *	- There is no delayed ack timer in progress.
1895  *	- Our last ack wasn't a 0-sized window. We never want to delay
1896  *	  the ack that opens up a 0-sized window.
1897  *	- LRO wasn't used for this segment. We make sure by checking that the
1898  *	  segment size is not larger than the MSS.
1899  *	- Delayed acks are enabled or this is a half-synchronized T/TCP
1900  *	  connection.
1901  */
1902 #define DELAY_ACK(tp, tlen)			 \
1903 	(((tp->t_flags & TF_RXWIN0SENT) == 0) && \
1904 	((tp->t_flags & TF_DELACK) == 0) && 	 \
1905 	(tlen <= tp->t_maxseg) &&		 \
1906 	(tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
1907 
1908 static struct rack_sendmap *
1909 rack_find_lowest_rsm(struct tcp_rack *rack)
1910 {
1911 	struct rack_sendmap *rsm;
1912 
1913 	/*
1914 	 * Walk the time-order transmitted list looking for an rsm that is
1915 	 * not acked. This will be the one that was sent the longest time
1916 	 * ago that is still outstanding.
1917 	 */
1918 	TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
1919 		if (rsm->r_flags & RACK_ACKED) {
1920 			continue;
1921 		}
1922 		goto finish;
1923 	}
1924 finish:
1925 	return (rsm);
1926 }
1927 
1928 static struct rack_sendmap *
1929 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
1930 {
1931 	struct rack_sendmap *prsm;
1932 
1933 	/*
1934 	 * Walk the sequence order list backward until we hit and arrive at
1935 	 * the highest seq not acked. In theory when this is called it
1936 	 * should be the last segment (which it was not).
1937 	 */
1938 	counter_u64_add(rack_find_high, 1);
1939 	prsm = rsm;
1940 	RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
1941 		if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
1942 			continue;
1943 		}
1944 		return (prsm);
1945 	}
1946 	return (NULL);
1947 }
1948 
1949 
1950 static uint32_t
1951 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
1952 {
1953 	int32_t lro;
1954 	uint32_t thresh;
1955 
1956 	/*
1957 	 * lro is the flag we use to determine if we have seen reordering.
1958 	 * If it gets set we have seen reordering. The reorder logic either
1959 	 * works in one of two ways:
1960 	 *
1961 	 * If reorder-fade is configured, then we track the last time we saw
1962 	 * re-ordering occur. If we reach the point where enough time as
1963 	 * passed we no longer consider reordering has occuring.
1964 	 *
1965 	 * Or if reorder-face is 0, then once we see reordering we consider
1966 	 * the connection to alway be subject to reordering and just set lro
1967 	 * to 1.
1968 	 *
1969 	 * In the end if lro is non-zero we add the extra time for
1970 	 * reordering in.
1971 	 */
1972 	if (srtt == 0)
1973 		srtt = 1;
1974 	if (rack->r_ctl.rc_reorder_ts) {
1975 		if (rack->r_ctl.rc_reorder_fade) {
1976 			if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
1977 				lro = cts - rack->r_ctl.rc_reorder_ts;
1978 				if (lro == 0) {
1979 					/*
1980 					 * No time as passed since the last
1981 					 * reorder, mark it as reordering.
1982 					 */
1983 					lro = 1;
1984 				}
1985 			} else {
1986 				/* Negative time? */
1987 				lro = 0;
1988 			}
1989 			if (lro > rack->r_ctl.rc_reorder_fade) {
1990 				/* Turn off reordering seen too */
1991 				rack->r_ctl.rc_reorder_ts = 0;
1992 				lro = 0;
1993 			}
1994 		} else {
1995 			/* Reodering does not fade */
1996 			lro = 1;
1997 		}
1998 	} else {
1999 		lro = 0;
2000 	}
2001 	thresh = srtt + rack->r_ctl.rc_pkt_delay;
2002 	if (lro) {
2003 		/* It must be set, if not you get 1/4 rtt */
2004 		if (rack->r_ctl.rc_reorder_shift)
2005 			thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
2006 		else
2007 			thresh += (srtt >> 2);
2008 	} else {
2009 		thresh += 1;
2010 	}
2011 	/* We don't let the rack timeout be above a RTO */
2012 	if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
2013 		thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
2014 	}
2015 	/* And we don't want it above the RTO max either */
2016 	if (thresh > rack_rto_max) {
2017 		thresh = rack_rto_max;
2018 	}
2019 	return (thresh);
2020 }
2021 
2022 static uint32_t
2023 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
2024 		     struct rack_sendmap *rsm, uint32_t srtt)
2025 {
2026 	struct rack_sendmap *prsm;
2027 	uint32_t thresh, len;
2028 	int maxseg;
2029 
2030 	if (srtt == 0)
2031 		srtt = 1;
2032 	if (rack->r_ctl.rc_tlp_threshold)
2033 		thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
2034 	else
2035 		thresh = (srtt * 2);
2036 
2037 	/* Get the previous sent packet, if any  */
2038 	maxseg = ctf_fixed_maxseg(tp);
2039 	counter_u64_add(rack_enter_tlp_calc, 1);
2040 	len = rsm->r_end - rsm->r_start;
2041 	if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
2042 		/* Exactly like the ID */
2043 		if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) {
2044 			uint32_t alt_thresh;
2045 			/*
2046 			 * Compensate for delayed-ack with the d-ack time.
2047 			 */
2048 			counter_u64_add(rack_used_tlpmethod, 1);
2049 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
2050 			if (alt_thresh > thresh)
2051 				thresh = alt_thresh;
2052 		}
2053 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
2054 		/* 2.1 behavior */
2055 		prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
2056 		if (prsm && (len <= maxseg)) {
2057 			/*
2058 			 * Two packets outstanding, thresh should be (2*srtt) +
2059 			 * possible inter-packet delay (if any).
2060 			 */
2061 			uint32_t inter_gap = 0;
2062 			int idx, nidx;
2063 
2064 			counter_u64_add(rack_used_tlpmethod, 1);
2065 			idx = rsm->r_rtr_cnt - 1;
2066 			nidx = prsm->r_rtr_cnt - 1;
2067 			if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
2068 				/* Yes it was sent later (or at the same time) */
2069 				inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
2070 			}
2071 			thresh += inter_gap;
2072 		} else 	if (len <= maxseg) {
2073 			/*
2074 			 * Possibly compensate for delayed-ack.
2075 			 */
2076 			uint32_t alt_thresh;
2077 
2078 			counter_u64_add(rack_used_tlpmethod2, 1);
2079 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
2080 			if (alt_thresh > thresh)
2081 				thresh = alt_thresh;
2082 		}
2083 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
2084 		/* 2.2 behavior */
2085 		if (len <= maxseg) {
2086 			uint32_t alt_thresh;
2087 			/*
2088 			 * Compensate for delayed-ack with the d-ack time.
2089 			 */
2090 			counter_u64_add(rack_used_tlpmethod, 1);
2091 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
2092 			if (alt_thresh > thresh)
2093 				thresh = alt_thresh;
2094 		}
2095 	}
2096  	/* Not above an RTO */
2097 	if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
2098 		thresh = TICKS_2_MSEC(tp->t_rxtcur);
2099 	}
2100 	/* Not above a RTO max */
2101 	if (thresh > rack_rto_max) {
2102 		thresh = rack_rto_max;
2103 	}
2104 	/* Apply user supplied min TLP */
2105 	if (thresh < rack_tlp_min) {
2106 		thresh = rack_tlp_min;
2107 	}
2108 	return (thresh);
2109 }
2110 
2111 static uint32_t
2112 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
2113 {
2114 	/*
2115 	 * We want the rack_rtt which is the
2116 	 * last rtt we measured. However if that
2117 	 * does not exist we fallback to the srtt (which
2118 	 * we probably will never do) and then as a last
2119 	 * resort we use RACK_INITIAL_RTO if no srtt is
2120 	 * yet set.
2121 	 */
2122 	if (rack->rc_rack_rtt)
2123 		return(rack->rc_rack_rtt);
2124 	else if (tp->t_srtt == 0)
2125 		return(RACK_INITIAL_RTO);
2126 	return (TICKS_2_MSEC(tp->t_srtt >> TCP_RTT_SHIFT));
2127 }
2128 
2129 static struct rack_sendmap *
2130 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
2131 {
2132 	/*
2133 	 * Check to see that we don't need to fall into recovery. We will
2134 	 * need to do so if our oldest transmit is past the time we should
2135 	 * have had an ack.
2136 	 */
2137 	struct tcp_rack *rack;
2138 	struct rack_sendmap *rsm;
2139 	int32_t idx;
2140 	uint32_t srtt, thresh;
2141 
2142 	rack = (struct tcp_rack *)tp->t_fb_ptr;
2143 	if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
2144 		return (NULL);
2145 	}
2146 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2147 	if (rsm == NULL)
2148 		return (NULL);
2149 
2150 	if (rsm->r_flags & RACK_ACKED) {
2151 		rsm = rack_find_lowest_rsm(rack);
2152 		if (rsm == NULL)
2153 			return (NULL);
2154 	}
2155 	idx = rsm->r_rtr_cnt - 1;
2156 	srtt = rack_grab_rtt(tp, rack);
2157 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
2158 	if (tsused < rsm->r_tim_lastsent[idx]) {
2159 		return (NULL);
2160 	}
2161 	if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
2162 		return (NULL);
2163 	}
2164 	/* Ok if we reach here we are over-due */
2165 	rack->r_ctl.rc_rsm_start = rsm->r_start;
2166 	rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
2167 	rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
2168 	rack_cong_signal(tp, NULL, CC_NDUPACK);
2169 	return (rsm);
2170 }
2171 
2172 static uint32_t
2173 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
2174 {
2175 	int32_t t;
2176 	int32_t tt;
2177 	uint32_t ret_val;
2178 
2179 	t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
2180 	TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
2181 	    rack_persist_min, rack_persist_max);
2182 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
2183 		tp->t_rxtshift++;
2184 	rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
2185 	ret_val = (uint32_t)tt;
2186 	return (ret_val);
2187 }
2188 
2189 static uint32_t
2190 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
2191 {
2192 	/*
2193 	 * Start the FR timer, we do this based on getting the first one in
2194 	 * the rc_tmap. Note that if its NULL we must stop the timer. in all
2195 	 * events we need to stop the running timer (if its running) before
2196 	 * starting the new one.
2197 	 */
2198 	uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
2199 	uint32_t srtt_cur;
2200 	int32_t idx;
2201 	int32_t is_tlp_timer = 0;
2202 	struct rack_sendmap *rsm;
2203 
2204 	if (rack->t_timers_stopped) {
2205 		/* All timers have been stopped none are to run */
2206 		return (0);
2207 	}
2208 	if (rack->rc_in_persist) {
2209 		/* We can't start any timer in persists */
2210 		return (rack_get_persists_timer_val(tp, rack));
2211 	}
2212 	if ((tp->t_state < TCPS_ESTABLISHED) ||
2213 	    ((tp->t_flags & TF_SACK_PERMIT) == 0))
2214 		goto activate_rxt;
2215 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2216 	if ((rsm == NULL) || sup_rack) {
2217 		/* Nothing on the send map */
2218 activate_rxt:
2219 		time_since_sent = 0;
2220 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2221 		if (rsm) {
2222 			idx = rsm->r_rtr_cnt - 1;
2223 			if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
2224 				tstmp_touse = rsm->r_tim_lastsent[idx];
2225 			else
2226 				tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
2227 			if (TSTMP_GT(tstmp_touse, cts))
2228 			    time_since_sent = cts - tstmp_touse;
2229 		}
2230 		if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
2231 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
2232 			to = TICKS_2_MSEC(tp->t_rxtcur);
2233 			if (to > time_since_sent)
2234 				to -= time_since_sent;
2235 			else
2236 				to = rack->r_ctl.rc_min_to;
2237 			if (to == 0)
2238 				to = 1;
2239 			return (to);
2240 		}
2241 		return (0);
2242 	}
2243 	if (rsm->r_flags & RACK_ACKED) {
2244 		rsm = rack_find_lowest_rsm(rack);
2245 		if (rsm == NULL) {
2246 			/* No lowest? */
2247 			goto activate_rxt;
2248 		}
2249 	}
2250 	if (rack->sack_attack_disable) {
2251 		/*
2252 		 * We don't want to do
2253 		 * any TLP's if you are an attacker.
2254 		 * Though if you are doing what
2255 		 * is expected you may still have
2256 		 * SACK-PASSED marks.
2257 		 */
2258 		goto activate_rxt;
2259 	}
2260 	/* Convert from ms to usecs */
2261 	if (rsm->r_flags & RACK_SACK_PASSED) {
2262 		if ((tp->t_flags & TF_SENTFIN) &&
2263 		    ((tp->snd_max - tp->snd_una) == 1) &&
2264 		    (rsm->r_flags & RACK_HAS_FIN)) {
2265 			/*
2266 			 * We don't start a rack timer if all we have is a
2267 			 * FIN outstanding.
2268 			 */
2269 			goto activate_rxt;
2270 		}
2271 		if ((rack->use_rack_cheat == 0) &&
2272 		    (IN_RECOVERY(tp->t_flags)) &&
2273 		     (rack->r_ctl.rc_prr_sndcnt  < ctf_fixed_maxseg(tp))) {
2274 			/*
2275 			 * We are not cheating, in recovery  and
2276 			 * not enough ack's to yet get our next
2277 			 * retransmission out.
2278 			 *
2279 			 * Note that classified attackers do not
2280 			 * get to use the rack-cheat.
2281 			 */
2282 			goto activate_tlp;
2283 		}
2284 		srtt = rack_grab_rtt(tp, rack);
2285 		thresh = rack_calc_thresh_rack(rack, srtt, cts);
2286 		idx = rsm->r_rtr_cnt - 1;
2287 		exp = rsm->r_tim_lastsent[idx] + thresh;
2288 		if (SEQ_GEQ(exp, cts)) {
2289 			to = exp - cts;
2290 			if (to < rack->r_ctl.rc_min_to) {
2291 				to = rack->r_ctl.rc_min_to;
2292 			}
2293 		} else {
2294 			to = rack->r_ctl.rc_min_to;
2295 		}
2296 	} else {
2297 		/* Ok we need to do a TLP not RACK */
2298 activate_tlp:
2299 		if ((rack->rc_tlp_in_progress != 0) ||
2300 		    (rack->r_ctl.rc_tlp_rtx_out != 0)) {
2301 			/*
2302 			 * The previous send was a TLP or a tlp_rtx is in
2303 			 * process.
2304 			 */
2305 			goto activate_rxt;
2306 		}
2307 		rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
2308 		if (rsm == NULL) {
2309 			/* We found no rsm to TLP with. */
2310 			goto activate_rxt;
2311 		}
2312 		if (rsm->r_flags & RACK_HAS_FIN) {
2313 			/* If its a FIN we dont do TLP */
2314 			rsm = NULL;
2315 			goto activate_rxt;
2316 		}
2317 		idx = rsm->r_rtr_cnt - 1;
2318 		time_since_sent = 0;
2319 		if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
2320 			tstmp_touse = rsm->r_tim_lastsent[idx];
2321 		else
2322 			tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
2323 		if (TSTMP_GT(tstmp_touse, cts))
2324 		    time_since_sent = cts - tstmp_touse;
2325 		is_tlp_timer = 1;
2326 		if (tp->t_srtt) {
2327 			srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2328 			srtt = TICKS_2_MSEC(srtt_cur);
2329 		} else
2330 			srtt = RACK_INITIAL_RTO;
2331 		thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
2332 		if (thresh > time_since_sent)
2333 			to = thresh - time_since_sent;
2334 		else
2335 			to = rack->r_ctl.rc_min_to;
2336 		if (to > TCPTV_REXMTMAX) {
2337 			/*
2338 			 * If the TLP time works out to larger than the max
2339 			 * RTO lets not do TLP.. just RTO.
2340 			 */
2341 			goto activate_rxt;
2342 		}
2343 		if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) {
2344 			/*
2345 			 * The tail is no longer the last one I did a probe
2346 			 * on
2347 			 */
2348 			rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2349 			rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2350 		}
2351 	}
2352 	if (is_tlp_timer == 0) {
2353 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
2354 	} else {
2355 		if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) ||
2356 		    (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2357 			/*
2358 			 * We have exceeded how many times we can retran the
2359 			 * current TLP timer, switch to the RTO timer.
2360 			 */
2361 			goto activate_rxt;
2362 		} else {
2363 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
2364 		}
2365 	}
2366 	if (to == 0)
2367 		to = 1;
2368 	return (to);
2369 }
2370 
2371 static void
2372 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2373 {
2374 	if (rack->rc_in_persist == 0) {
2375 		rack->r_ctl.rc_went_idle_time = cts;
2376 		rack_timer_cancel(tp, rack, cts, __LINE__);
2377 		tp->t_rxtshift = 0;
2378 		rack->rc_in_persist = 1;
2379 	}
2380 }
2381 
2382 static void
2383 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack)
2384 {
2385 	if (rack->rc_inp->inp_in_hpts)  {
2386 		tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
2387 		rack->r_ctl.rc_hpts_flags  = 0;
2388 	}
2389 	rack->rc_in_persist = 0;
2390 	rack->r_ctl.rc_went_idle_time = 0;
2391 	tp->t_flags &= ~TF_FORCEDATA;
2392 	tp->t_rxtshift = 0;
2393 }
2394 
2395 static void
2396 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
2397       int32_t slot, uint32_t tot_len_this_send, int sup_rack)
2398 {
2399 	struct inpcb *inp;
2400 	uint32_t delayed_ack = 0;
2401 	uint32_t hpts_timeout;
2402 	uint8_t stopped;
2403 	uint32_t left = 0;
2404 
2405 	inp = tp->t_inpcb;
2406 	if (inp->inp_in_hpts) {
2407 		/* A previous call is already set up */
2408 		return;
2409 	}
2410 	if ((tp->t_state == TCPS_CLOSED) ||
2411 	    (tp->t_state == TCPS_LISTEN)) {
2412 		return;
2413 	}
2414 	stopped = rack->rc_tmr_stopped;
2415 	if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
2416 		left = rack->r_ctl.rc_timer_exp - cts;
2417 	}
2418 	rack->tlp_timer_up = 0;
2419 	rack->r_ctl.rc_timer_exp = 0;
2420 	if (rack->rc_inp->inp_in_hpts == 0) {
2421 		rack->r_ctl.rc_hpts_flags = 0;
2422 	}
2423 	if (slot) {
2424 		/* We are hptsi too */
2425 		rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
2426 	} else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
2427 		/*
2428 		 * We are still left on the hpts when the to goes
2429 		 * it will be for output.
2430 		 */
2431 		if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts))
2432 			slot = rack->r_ctl.rc_last_output_to - cts;
2433 		else
2434 			slot = 1;
2435 	}
2436 	hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
2437 	if (rack->sack_attack_disable &&
2438 	    (slot < USEC_TO_MSEC(tcp_sad_pacing_interval))) {
2439 		/*
2440 		 * We have a potential attacker on
2441 		 * the line. We have possibly some
2442 		 * (or now) pacing time set. We want to
2443 		 * slow down the processing of sacks by some
2444 		 * amount (if it is an attacker). Set the default
2445 		 * slot for attackers in place (unless the orginal
2446 		 * interval is longer). Its stored in
2447 		 * micro-seconds, so lets convert to msecs.
2448 		 */
2449 		slot = USEC_TO_MSEC(tcp_sad_pacing_interval);
2450 	}
2451 	if (tp->t_flags & TF_DELACK) {
2452 		delayed_ack = TICKS_2_MSEC(tcp_delacktime);
2453 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
2454 	}
2455 	if (delayed_ack && ((hpts_timeout == 0) ||
2456 			    (delayed_ack < hpts_timeout)))
2457 		hpts_timeout = delayed_ack;
2458 	else
2459 		rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2460 	/*
2461 	 * If no timers are going to run and we will fall off the hptsi
2462 	 * wheel, we resort to a keep-alive timer if its configured.
2463 	 */
2464 	if ((hpts_timeout == 0) &&
2465 	    (slot == 0)) {
2466 		if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2467 		    (tp->t_state <= TCPS_CLOSING)) {
2468 			/*
2469 			 * Ok we have no timer (persists, rack, tlp, rxt  or
2470 			 * del-ack), we don't have segments being paced. So
2471 			 * all that is left is the keepalive timer.
2472 			 */
2473 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2474 				/* Get the established keep-alive time */
2475 				hpts_timeout = TP_KEEPIDLE(tp);
2476 			} else {
2477 				/* Get the initial setup keep-alive time */
2478 				hpts_timeout = TP_KEEPINIT(tp);
2479 			}
2480 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
2481 		}
2482 	}
2483 	if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
2484 	    (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
2485 		/*
2486 		 * RACK, TLP, persists and RXT timers all are restartable
2487 		 * based on actions input .. i.e we received a packet (ack
2488 		 * or sack) and that changes things (rw, or snd_una etc).
2489 		 * Thus we can restart them with a new value. For
2490 		 * keep-alive, delayed_ack we keep track of what was left
2491 		 * and restart the timer with a smaller value.
2492 		 */
2493 		if (left < hpts_timeout)
2494 			hpts_timeout = left;
2495 	}
2496 	if (hpts_timeout) {
2497 		/*
2498 		 * Hack alert for now we can't time-out over 2,147,483
2499 		 * seconds (a bit more than 596 hours), which is probably ok
2500 		 * :).
2501 		 */
2502 		if (hpts_timeout > 0x7ffffffe)
2503 			hpts_timeout = 0x7ffffffe;
2504 		rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
2505 	}
2506 	if (slot) {
2507 		rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
2508 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)
2509 			inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
2510 		else
2511 			inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
2512 		rack->r_ctl.rc_last_output_to = cts + slot;
2513 		if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
2514 			if (rack->rc_inp->inp_in_hpts == 0)
2515 				tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot));
2516 			rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
2517 		} else {
2518 			/*
2519 			 * Arrange for the hpts to kick back in after the
2520 			 * t-o if the t-o does not cause a send.
2521 			 */
2522 			if (rack->rc_inp->inp_in_hpts == 0)
2523 				tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2524 			rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2525 		}
2526 	} else if (hpts_timeout) {
2527 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)  {
2528 			/* For a rack timer, don't wake us */
2529 			rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
2530 			inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
2531 		} else {
2532 			/* All other timers wake us up */
2533 			rack->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
2534 			inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
2535 		}
2536 		if (rack->rc_inp->inp_in_hpts == 0)
2537 			tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2538 		rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2539 	} else {
2540 		/* No timer starting */
2541 #ifdef INVARIANTS
2542 		if (SEQ_GT(tp->snd_max, tp->snd_una)) {
2543 			panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
2544 			    tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
2545 		}
2546 #endif
2547 	}
2548 	rack->rc_tmr_stopped = 0;
2549 	if (slot)
2550 		rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts);
2551 }
2552 
2553 /*
2554  * RACK Timer, here we simply do logging and house keeping.
2555  * the normal rack_output() function will call the
2556  * appropriate thing to check if we need to do a RACK retransmit.
2557  * We return 1, saying don't proceed with rack_output only
2558  * when all timers have been stopped (destroyed PCB?).
2559  */
2560 static int
2561 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2562 {
2563 	/*
2564 	 * This timer simply provides an internal trigger to send out data.
2565 	 * The check_recovery_mode call will see if there are needed
2566 	 * retransmissions, if so we will enter fast-recovery. The output
2567 	 * call may or may not do the same thing depending on sysctl
2568 	 * settings.
2569 	 */
2570 	struct rack_sendmap *rsm;
2571 	int32_t recovery, ll;
2572 
2573 	if (tp->t_timers->tt_flags & TT_STOPPED) {
2574 		return (1);
2575 	}
2576 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2577 		/* Its not time yet */
2578 		return (0);
2579 	}
2580 	recovery = IN_RECOVERY(tp->t_flags);
2581 	counter_u64_add(rack_to_tot, 1);
2582 	if (rack->r_state && (rack->r_state != tp->t_state))
2583 		rack_set_state(tp, rack);
2584 	rsm = rack_check_recovery_mode(tp, cts);
2585 	if (rsm)
2586 		ll = rsm->r_end - rsm->r_start;
2587 	else
2588 		ll = 0;
2589 	rack_log_to_event(rack, RACK_TO_FRM_RACK, ll);
2590 	if (rsm) {
2591 		uint32_t rtt;
2592 
2593 		rtt = rack->rc_rack_rtt;
2594 		if (rtt == 0)
2595 			rtt = 1;
2596 		if ((recovery == 0) &&
2597 		    (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
2598 			/*
2599 			 * The rack-timeout that enter's us into recovery
2600 			 * will force out one MSS and set us up so that we
2601 			 * can do one more send in 2*rtt (transitioning the
2602 			 * rack timeout into a rack-tlp).
2603 			 */
2604 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
2605 			rack_log_to_prr(rack, 3);
2606 		} else if ((rack->r_ctl.rc_prr_sndcnt < (rsm->r_end - rsm->r_start)) &&
2607 			   rack->use_rack_cheat) {
2608 			/*
2609 			 * When a rack timer goes, if the rack cheat is
2610 			 * on, arrange it so we can send a full segment.
2611 			 */
2612 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
2613 			rack_log_to_prr(rack, 4);
2614 		}
2615 	} else {
2616 		/* This is a case that should happen rarely if ever */
2617 		counter_u64_add(rack_tlp_does_nada, 1);
2618 #ifdef TCP_BLACKBOX
2619 		tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2620 #endif
2621 		rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2622 	}
2623 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
2624 	return (0);
2625 }
2626 
2627 static __inline void
2628 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
2629 	       struct rack_sendmap *rsm, uint32_t start)
2630 {
2631 	int idx;
2632 
2633 	nrsm->r_start = start;
2634 	nrsm->r_end = rsm->r_end;
2635 	nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
2636 	nrsm->r_flags = rsm->r_flags;
2637 	nrsm->r_dupack = rsm->r_dupack;
2638 	nrsm->r_rtr_bytes = 0;
2639 	rsm->r_end = nrsm->r_start;
2640 	for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
2641 		nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
2642 	}
2643 }
2644 
2645 static struct rack_sendmap *
2646 rack_merge_rsm(struct tcp_rack *rack,
2647 	       struct rack_sendmap *l_rsm,
2648 	       struct rack_sendmap *r_rsm)
2649 {
2650 	/*
2651 	 * We are merging two ack'd RSM's,
2652 	 * the l_rsm is on the left (lower seq
2653 	 * values) and the r_rsm is on the right
2654 	 * (higher seq value). The simplest way
2655 	 * to merge these is to move the right
2656 	 * one into the left. I don't think there
2657 	 * is any reason we need to try to find
2658 	 * the oldest (or last oldest retransmitted).
2659 	 */
2660 	struct rack_sendmap *rm;
2661 
2662 	l_rsm->r_end = r_rsm->r_end;
2663 	if (l_rsm->r_dupack < r_rsm->r_dupack)
2664 		l_rsm->r_dupack = r_rsm->r_dupack;
2665 	if (r_rsm->r_rtr_bytes)
2666 		l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
2667 	if (r_rsm->r_in_tmap) {
2668 		/* This really should not happen */
2669 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
2670 		r_rsm->r_in_tmap = 0;
2671 	}
2672 	/* Now the flags */
2673 	if (r_rsm->r_flags & RACK_HAS_FIN)
2674 		l_rsm->r_flags |= RACK_HAS_FIN;
2675 	if (r_rsm->r_flags & RACK_TLP)
2676 		l_rsm->r_flags |= RACK_TLP;
2677 	if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
2678 		l_rsm->r_flags |= RACK_RWND_COLLAPSED;
2679 	rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
2680 #ifdef INVARIANTS
2681 	if (rm != r_rsm) {
2682 		panic("removing head in rack:%p rsm:%p rm:%p",
2683 		      rack, r_rsm, rm);
2684 	}
2685 #endif
2686 	if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
2687 		/* Transfer the split limit to the map we free */
2688 		r_rsm->r_limit_type = l_rsm->r_limit_type;
2689 		l_rsm->r_limit_type = 0;
2690 	}
2691 	rack_free(rack, r_rsm);
2692 	return(l_rsm);
2693 }
2694 
2695 /*
2696  * TLP Timer, here we simply setup what segment we want to
2697  * have the TLP expire on, the normal rack_output() will then
2698  * send it out.
2699  *
2700  * We return 1, saying don't proceed with rack_output only
2701  * when all timers have been stopped (destroyed PCB?).
2702  */
2703 static int
2704 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2705 {
2706 	/*
2707 	 * Tail Loss Probe.
2708 	 */
2709 	struct rack_sendmap *rsm = NULL;
2710 	struct rack_sendmap *insret;
2711 	struct socket *so;
2712 	uint32_t amm, old_prr_snd = 0;
2713 	uint32_t out, avail;
2714 	int collapsed_win = 0;
2715 
2716 	if (tp->t_timers->tt_flags & TT_STOPPED) {
2717 		return (1);
2718 	}
2719 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2720 		/* Its not time yet */
2721 		return (0);
2722 	}
2723 	if (rack_progress_timeout_check(tp)) {
2724 		tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
2725 		return (1);
2726 	}
2727 	/*
2728 	 * A TLP timer has expired. We have been idle for 2 rtts. So we now
2729 	 * need to figure out how to force a full MSS segment out.
2730 	 */
2731 	rack_log_to_event(rack, RACK_TO_FRM_TLP, 0);
2732 	counter_u64_add(rack_tlp_tot, 1);
2733 	if (rack->r_state && (rack->r_state != tp->t_state))
2734 		rack_set_state(tp, rack);
2735 	so = tp->t_inpcb->inp_socket;
2736 #ifdef KERN_TLS
2737 	if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
2738 		/*
2739 		 * For hardware TLS we do *not* want to send
2740 		 * new data, lets instead just do a retransmission.
2741 		 */
2742 		goto need_retran;
2743 	}
2744 #endif
2745 	avail = sbavail(&so->so_snd);
2746 	out = tp->snd_max - tp->snd_una;
2747 	rack->tlp_timer_up = 1;
2748 	if (out > tp->snd_wnd) {
2749 		/* special case, we need a retransmission */
2750 		collapsed_win = 1;
2751 		goto need_retran;
2752 	}
2753 	/*
2754 	 * If we are in recovery we can jazz out a segment if new data is
2755 	 * present simply by setting rc_prr_sndcnt to a segment.
2756 	 */
2757 	if ((avail > out) &&
2758 	    ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) {
2759 		/* New data is available */
2760 		amm = avail - out;
2761 		if (amm > ctf_fixed_maxseg(tp)) {
2762 			amm = ctf_fixed_maxseg(tp);
2763 		} else if ((amm < ctf_fixed_maxseg(tp)) && ((tp->t_flags & TF_NODELAY) == 0)) {
2764 			/* not enough to fill a MTU and no-delay is off */
2765 			goto need_retran;
2766 		}
2767 		if (IN_RECOVERY(tp->t_flags)) {
2768 			/* Unlikely */
2769 			old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
2770 			if (out + amm <= tp->snd_wnd) {
2771 				rack->r_ctl.rc_prr_sndcnt = amm;
2772 				rack_log_to_prr(rack, 4);
2773 			} else
2774 				goto need_retran;
2775 		} else {
2776 			/* Set the send-new override */
2777 			if (out + amm <= tp->snd_wnd)
2778 				rack->r_ctl.rc_tlp_new_data = amm;
2779 			else
2780 				goto need_retran;
2781 		}
2782 		rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2783 		rack->r_ctl.rc_last_tlp_seq = tp->snd_max;
2784 		rack->r_ctl.rc_tlpsend = NULL;
2785 		counter_u64_add(rack_tlp_newdata, 1);
2786 		goto send;
2787 	}
2788 need_retran:
2789 	/*
2790 	 * Ok we need to arrange the last un-acked segment to be re-sent, or
2791 	 * optionally the first un-acked segment.
2792 	 */
2793 	if (collapsed_win == 0) {
2794 		if (rack_always_send_oldest)
2795 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2796 		else {
2797 			rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
2798 			if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
2799 				rsm = rack_find_high_nonack(rack, rsm);
2800 			}
2801 		}
2802 		if (rsm == NULL) {
2803 			counter_u64_add(rack_tlp_does_nada, 1);
2804 #ifdef TCP_BLACKBOX
2805 			tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2806 #endif
2807 			goto out;
2808 		}
2809 	} else {
2810 		/*
2811 		 * We must find the last segment
2812 		 * that was acceptable by the client.
2813 		 */
2814 		RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
2815 			if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
2816 				/* Found one */
2817 				break;
2818 			}
2819 		}
2820 		if (rsm == NULL) {
2821 			/* None? if so send the first */
2822 			rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
2823 			if (rsm == NULL) {
2824 				counter_u64_add(rack_tlp_does_nada, 1);
2825 #ifdef TCP_BLACKBOX
2826 				tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2827 #endif
2828 				goto out;
2829 			}
2830 		}
2831 	}
2832 	if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
2833 		/*
2834 		 * We need to split this the last segment in two.
2835 		 */
2836 		struct rack_sendmap *nrsm;
2837 
2838 
2839 		nrsm = rack_alloc_full_limit(rack);
2840 		if (nrsm == NULL) {
2841 			/*
2842 			 * No memory to split, we will just exit and punt
2843 			 * off to the RXT timer.
2844 			 */
2845 			counter_u64_add(rack_tlp_does_nada, 1);
2846 			goto out;
2847 		}
2848 		rack_clone_rsm(rack, nrsm, rsm,
2849 			       (rsm->r_end - ctf_fixed_maxseg(tp)));
2850 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
2851 #ifdef INVARIANTS
2852 		if (insret != NULL) {
2853 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
2854 			      nrsm, insret, rack, rsm);
2855 		}
2856 #endif
2857 		if (rsm->r_in_tmap) {
2858 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
2859 			nrsm->r_in_tmap = 1;
2860 		}
2861 		rsm->r_flags &= (~RACK_HAS_FIN);
2862 		rsm = nrsm;
2863 	}
2864 	rack->r_ctl.rc_tlpsend = rsm;
2865 	rack->r_ctl.rc_tlp_rtx_out = 1;
2866 	if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) {
2867 		rack->r_ctl.rc_tlp_seg_send_cnt++;
2868 		tp->t_rxtshift++;
2869 	} else {
2870 		rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2871 		rack->r_ctl.rc_tlp_seg_send_cnt = 1;
2872 	}
2873 send:
2874 	rack->r_ctl.rc_tlp_send_cnt++;
2875 	if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) {
2876 		/*
2877 		 * Can't [re]/transmit a segment we have not heard from the
2878 		 * peer in max times. We need the retransmit timer to take
2879 		 * over.
2880 		 */
2881 	restore:
2882 		rack->r_ctl.rc_tlpsend = NULL;
2883 		if (rsm)
2884 			rsm->r_flags &= ~RACK_TLP;
2885 		rack->r_ctl.rc_prr_sndcnt = old_prr_snd;
2886 		rack_log_to_prr(rack, 5);
2887 		counter_u64_add(rack_tlp_retran_fail, 1);
2888 		goto out;
2889 	} else if (rsm) {
2890 		rsm->r_flags |= RACK_TLP;
2891 	}
2892 	if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) &&
2893 	    (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2894 		/*
2895 		 * We don't want to send a single segment more than the max
2896 		 * either.
2897 		 */
2898 		goto restore;
2899 	}
2900 	rack->r_timer_override = 1;
2901 	rack->r_tlp_running = 1;
2902 	rack->rc_tlp_in_progress = 1;
2903 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2904 	return (0);
2905 out:
2906 	rack->tlp_timer_up = 0;
2907 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2908 	return (0);
2909 }
2910 
2911 /*
2912  * Delayed ack Timer, here we simply need to setup the
2913  * ACK_NOW flag and remove the DELACK flag. From there
2914  * the output routine will send the ack out.
2915  *
2916  * We only return 1, saying don't proceed, if all timers
2917  * are stopped (destroyed PCB?).
2918  */
2919 static int
2920 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2921 {
2922 	if (tp->t_timers->tt_flags & TT_STOPPED) {
2923 		return (1);
2924 	}
2925 	rack_log_to_event(rack, RACK_TO_FRM_DELACK, 0);
2926 	tp->t_flags &= ~TF_DELACK;
2927 	tp->t_flags |= TF_ACKNOW;
2928 	TCPSTAT_INC(tcps_delack);
2929 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2930 	return (0);
2931 }
2932 
2933 /*
2934  * Persists timer, here we simply need to setup the
2935  * FORCE-DATA flag the output routine will send
2936  * the one byte send.
2937  *
2938  * We only return 1, saying don't proceed, if all timers
2939  * are stopped (destroyed PCB?).
2940  */
2941 static int
2942 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2943 {
2944 	struct tcptemp *t_template;
2945 	struct inpcb *inp;
2946 	int32_t retval = 1;
2947 
2948 	inp = tp->t_inpcb;
2949 
2950 	if (tp->t_timers->tt_flags & TT_STOPPED) {
2951 		return (1);
2952 	}
2953 	if (rack->rc_in_persist == 0)
2954 		return (0);
2955 	if (rack_progress_timeout_check(tp)) {
2956 		tcp_set_inp_to_drop(inp, ETIMEDOUT);
2957 		return (1);
2958 	}
2959 	KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
2960 	/*
2961 	 * Persistence timer into zero window. Force a byte to be output, if
2962 	 * possible.
2963 	 */
2964 	TCPSTAT_INC(tcps_persisttimeo);
2965 	/*
2966 	 * Hack: if the peer is dead/unreachable, we do not time out if the
2967 	 * window is closed.  After a full backoff, drop the connection if
2968 	 * the idle time (no responses to probes) reaches the maximum
2969 	 * backoff that we would use if retransmitting.
2970 	 */
2971 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
2972 	    (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
2973 	    ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
2974 		TCPSTAT_INC(tcps_persistdrop);
2975 		retval = 1;
2976 		tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2977 		goto out;
2978 	}
2979 	if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
2980 	    tp->snd_una == tp->snd_max)
2981 		rack_exit_persist(tp, rack);
2982 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
2983 	/*
2984 	 * If the user has closed the socket then drop a persisting
2985 	 * connection after a much reduced timeout.
2986 	 */
2987 	if (tp->t_state > TCPS_CLOSE_WAIT &&
2988 	    (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
2989 		retval = 1;
2990 		TCPSTAT_INC(tcps_persistdrop);
2991 		tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2992 		goto out;
2993 	}
2994 	t_template = tcpip_maketemplate(rack->rc_inp);
2995 	if (t_template) {
2996 		tcp_respond(tp, t_template->tt_ipgen,
2997 			    &t_template->tt_t, (struct mbuf *)NULL,
2998 			    tp->rcv_nxt, tp->snd_una - 1, 0);
2999 		/* This sends an ack */
3000 		if (tp->t_flags & TF_DELACK)
3001 			tp->t_flags &= ~TF_DELACK;
3002 		free(t_template, M_TEMP);
3003 	}
3004 	if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
3005 		tp->t_rxtshift++;
3006 out:
3007 	rack_log_to_event(rack, RACK_TO_FRM_PERSIST, 0);
3008 	rack_start_hpts_timer(rack, tp, cts,
3009 			      0, 0, 0);
3010 	return (retval);
3011 }
3012 
3013 /*
3014  * If a keepalive goes off, we had no other timers
3015  * happening. We always return 1 here since this
3016  * routine either drops the connection or sends
3017  * out a segment with respond.
3018  */
3019 static int
3020 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
3021 {
3022 	struct tcptemp *t_template;
3023 	struct inpcb *inp;
3024 
3025 	if (tp->t_timers->tt_flags & TT_STOPPED) {
3026 		return (1);
3027 	}
3028 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
3029 	inp = tp->t_inpcb;
3030 	rack_log_to_event(rack, RACK_TO_FRM_KEEP, 0);
3031 	/*
3032 	 * Keep-alive timer went off; send something or drop connection if
3033 	 * idle for too long.
3034 	 */
3035 	TCPSTAT_INC(tcps_keeptimeo);
3036 	if (tp->t_state < TCPS_ESTABLISHED)
3037 		goto dropit;
3038 	if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
3039 	    tp->t_state <= TCPS_CLOSING) {
3040 		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
3041 			goto dropit;
3042 		/*
3043 		 * Send a packet designed to force a response if the peer is
3044 		 * up and reachable: either an ACK if the connection is
3045 		 * still alive, or an RST if the peer has closed the
3046 		 * connection due to timeout or reboot. Using sequence
3047 		 * number tp->snd_una-1 causes the transmitted zero-length
3048 		 * segment to lie outside the receive window; by the
3049 		 * protocol spec, this requires the correspondent TCP to
3050 		 * respond.
3051 		 */
3052 		TCPSTAT_INC(tcps_keepprobe);
3053 		t_template = tcpip_maketemplate(inp);
3054 		if (t_template) {
3055 			tcp_respond(tp, t_template->tt_ipgen,
3056 			    &t_template->tt_t, (struct mbuf *)NULL,
3057 			    tp->rcv_nxt, tp->snd_una - 1, 0);
3058 			free(t_template, M_TEMP);
3059 		}
3060 	}
3061 	rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
3062 	return (1);
3063 dropit:
3064 	TCPSTAT_INC(tcps_keepdrops);
3065 	tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
3066 	return (1);
3067 }
3068 
3069 /*
3070  * Retransmit helper function, clear up all the ack
3071  * flags and take care of important book keeping.
3072  */
3073 static void
3074 rack_remxt_tmr(struct tcpcb *tp)
3075 {
3076 	/*
3077 	 * The retransmit timer went off, all sack'd blocks must be
3078 	 * un-acked.
3079 	 */
3080 	struct rack_sendmap *rsm, *trsm = NULL;
3081 	struct tcp_rack *rack;
3082 	int32_t cnt = 0;
3083 
3084 	rack = (struct tcp_rack *)tp->t_fb_ptr;
3085 	rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
3086 	rack_log_to_event(rack, RACK_TO_FRM_TMR, 0);
3087 	if (rack->r_state && (rack->r_state != tp->t_state))
3088 		rack_set_state(tp, rack);
3089 	/*
3090 	 * Ideally we would like to be able to
3091 	 * mark SACK-PASS on anything not acked here.
3092 	 * However, if we do that we would burst out
3093 	 * all that data 1ms apart. This would be unwise,
3094 	 * so for now we will just let the normal rxt timer
3095 	 * and tlp timer take care of it.
3096 	 */
3097 	RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
3098 		if (rsm->r_flags & RACK_ACKED) {
3099 			cnt++;
3100 			rsm->r_dupack = 0;
3101 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
3102 			if (rsm->r_in_tmap == 0) {
3103 				/* We must re-add it back to the tlist */
3104 				if (trsm == NULL) {
3105 					TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3106 				} else {
3107 					TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
3108 				}
3109 				rsm->r_in_tmap = 1;
3110 			}
3111 		}
3112 		trsm = rsm;
3113 		rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
3114 	}
3115 	/* Clear the count (we just un-acked them) */
3116 	rack->r_ctl.rc_sacked = 0;
3117 	/* Clear the tlp rtx mark */
3118 	rack->r_ctl.rc_tlp_rtx_out = 0;
3119 	rack->r_ctl.rc_tlp_seg_send_cnt = 0;
3120 	rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3121 	rack->r_ctl.rc_prr_sndcnt = 0;
3122 	rack_log_to_prr(rack, 6);
3123 	rack->r_timer_override = 1;
3124 }
3125 
3126 /*
3127  * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
3128  * we will setup to retransmit the lowest seq number outstanding.
3129  */
3130 static int
3131 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
3132 {
3133 	int32_t rexmt;
3134 	struct inpcb *inp;
3135 	int32_t retval = 0;
3136 
3137 	inp = tp->t_inpcb;
3138 	if (tp->t_timers->tt_flags & TT_STOPPED) {
3139 		return (1);
3140 	}
3141 	if (rack_progress_timeout_check(tp)) {
3142 		tcp_set_inp_to_drop(inp, ETIMEDOUT);
3143 		return (1);
3144 	}
3145 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
3146 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
3147 	    (tp->snd_una == tp->snd_max)) {
3148 		/* Nothing outstanding .. nothing to do */
3149 		return (0);
3150 	}
3151 	/*
3152 	 * Retransmission timer went off.  Message has not been acked within
3153 	 * retransmit interval.  Back off to a longer retransmit interval
3154 	 * and retransmit one segment.
3155 	 */
3156 	rack_remxt_tmr(tp);
3157 	if ((rack->r_ctl.rc_resend == NULL) ||
3158 	    ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
3159 		/*
3160 		 * If the rwnd collapsed on
3161 		 * the one we are retransmitting
3162 		 * it does not count against the
3163 		 * rxt count.
3164 		 */
3165 		tp->t_rxtshift++;
3166 	}
3167 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
3168 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
3169 		TCPSTAT_INC(tcps_timeoutdrop);
3170 		retval = 1;
3171 		tcp_set_inp_to_drop(rack->rc_inp,
3172 		    (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
3173 		goto out;
3174 	}
3175 	if (tp->t_state == TCPS_SYN_SENT) {
3176 		/*
3177 		 * If the SYN was retransmitted, indicate CWND to be limited
3178 		 * to 1 segment in cc_conn_init().
3179 		 */
3180 		tp->snd_cwnd = 1;
3181 	} else if (tp->t_rxtshift == 1) {
3182 		/*
3183 		 * first retransmit; record ssthresh and cwnd so they can be
3184 		 * recovered if this turns out to be a "bad" retransmit. A
3185 		 * retransmit is considered "bad" if an ACK for this segment
3186 		 * is received within RTT/2 interval; the assumption here is
3187 		 * that the ACK was already in flight.  See "On Estimating
3188 		 * End-to-End Network Path Properties" by Allman and Paxson
3189 		 * for more details.
3190 		 */
3191 		tp->snd_cwnd_prev = tp->snd_cwnd;
3192 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
3193 		tp->snd_recover_prev = tp->snd_recover;
3194 		if (IN_FASTRECOVERY(tp->t_flags))
3195 			tp->t_flags |= TF_WASFRECOVERY;
3196 		else
3197 			tp->t_flags &= ~TF_WASFRECOVERY;
3198 		if (IN_CONGRECOVERY(tp->t_flags))
3199 			tp->t_flags |= TF_WASCRECOVERY;
3200 		else
3201 			tp->t_flags &= ~TF_WASCRECOVERY;
3202 		tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
3203 		tp->t_flags |= TF_PREVVALID;
3204 	} else
3205 		tp->t_flags &= ~TF_PREVVALID;
3206 	TCPSTAT_INC(tcps_rexmttimeo);
3207 	if ((tp->t_state == TCPS_SYN_SENT) ||
3208 	    (tp->t_state == TCPS_SYN_RECEIVED))
3209 		rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]);
3210 	else
3211 		rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
3212 	TCPT_RANGESET(tp->t_rxtcur, rexmt,
3213 	   max(MSEC_2_TICKS(rack_rto_min), rexmt),
3214 	   MSEC_2_TICKS(rack_rto_max));
3215 	/*
3216 	 * We enter the path for PLMTUD if connection is established or, if
3217 	 * connection is FIN_WAIT_1 status, reason for the last is that if
3218 	 * amount of data we send is very small, we could send it in couple
3219 	 * of packets and process straight to FIN. In that case we won't
3220 	 * catch ESTABLISHED state.
3221 	 */
3222 	if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED))
3223 	    || (tp->t_state == TCPS_FIN_WAIT_1))) {
3224 #ifdef INET6
3225 		int32_t isipv6;
3226 #endif
3227 
3228 		/*
3229 		 * Idea here is that at each stage of mtu probe (usually,
3230 		 * 1448 -> 1188 -> 524) should be given 2 chances to recover
3231 		 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
3232 		 * should take care of that.
3233 		 */
3234 		if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
3235 		    (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
3236 		    (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
3237 		    tp->t_rxtshift % 2 == 0)) {
3238 			/*
3239 			 * Enter Path MTU Black-hole Detection mechanism: -
3240 			 * Disable Path MTU Discovery (IP "DF" bit). -
3241 			 * Reduce MTU to lower value than what we negotiated
3242 			 * with peer.
3243 			 */
3244 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
3245 				/* Record that we may have found a black hole. */
3246 				tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
3247 				/* Keep track of previous MSS. */
3248 				tp->t_pmtud_saved_maxseg = tp->t_maxseg;
3249 			}
3250 
3251 			/*
3252 			 * Reduce the MSS to blackhole value or to the
3253 			 * default in an attempt to retransmit.
3254 			 */
3255 #ifdef INET6
3256 			isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
3257 			if (isipv6 &&
3258 			    tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
3259 				/* Use the sysctl tuneable blackhole MSS. */
3260 				tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
3261 				TCPSTAT_INC(tcps_pmtud_blackhole_activated);
3262 			} else if (isipv6) {
3263 				/* Use the default MSS. */
3264 				tp->t_maxseg = V_tcp_v6mssdflt;
3265 				/*
3266 				 * Disable Path MTU Discovery when we switch
3267 				 * to minmss.
3268 				 */
3269 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
3270 				TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
3271 			}
3272 #endif
3273 #if defined(INET6) && defined(INET)
3274 			else
3275 #endif
3276 #ifdef INET
3277 			if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
3278 				/* Use the sysctl tuneable blackhole MSS. */
3279 				tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
3280 				TCPSTAT_INC(tcps_pmtud_blackhole_activated);
3281 			} else {
3282 				/* Use the default MSS. */
3283 				tp->t_maxseg = V_tcp_mssdflt;
3284 				/*
3285 				 * Disable Path MTU Discovery when we switch
3286 				 * to minmss.
3287 				 */
3288 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
3289 				TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
3290 			}
3291 #endif
3292 		} else {
3293 			/*
3294 			 * If further retransmissions are still unsuccessful
3295 			 * with a lowered MTU, maybe this isn't a blackhole
3296 			 * and we restore the previous MSS and blackhole
3297 			 * detection flags. The limit '6' is determined by
3298 			 * giving each probe stage (1448, 1188, 524) 2
3299 			 * chances to recover.
3300 			 */
3301 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
3302 			    (tp->t_rxtshift >= 6)) {
3303 				tp->t_flags2 |= TF2_PLPMTU_PMTUD;
3304 				tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
3305 				tp->t_maxseg = tp->t_pmtud_saved_maxseg;
3306 				TCPSTAT_INC(tcps_pmtud_blackhole_failed);
3307 			}
3308 		}
3309 	}
3310 	/*
3311 	 * If we backed off this far, our srtt estimate is probably bogus.
3312 	 * Clobber it so we'll take the next rtt measurement as our srtt;
3313 	 * move the current srtt into rttvar to keep the current retransmit
3314 	 * times until then.
3315 	 */
3316 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
3317 #ifdef INET6
3318 		if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
3319 			in6_losing(tp->t_inpcb);
3320 		else
3321 #endif
3322 			in_losing(tp->t_inpcb);
3323 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
3324 		tp->t_srtt = 0;
3325 	}
3326 	if (rack_use_sack_filter)
3327 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
3328 	tp->snd_recover = tp->snd_max;
3329 	tp->t_flags |= TF_ACKNOW;
3330 	tp->t_rtttime = 0;
3331 	rack_cong_signal(tp, NULL, CC_RTO);
3332 out:
3333 	return (retval);
3334 }
3335 
3336 static int
3337 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
3338 {
3339 	int32_t ret = 0;
3340 	int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
3341 
3342 	if (timers == 0) {
3343 		return (0);
3344 	}
3345 	if (tp->t_state == TCPS_LISTEN) {
3346 		/* no timers on listen sockets */
3347 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
3348 			return (0);
3349 		return (1);
3350 	}
3351 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
3352 		uint32_t left;
3353 
3354 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
3355 			ret = -1;
3356 			rack_log_to_processing(rack, cts, ret, 0);
3357 			return (0);
3358 		}
3359 		if (hpts_calling == 0) {
3360 			ret = -2;
3361 			rack_log_to_processing(rack, cts, ret, 0);
3362 			return (0);
3363 		}
3364 		/*
3365 		 * Ok our timer went off early and we are not paced false
3366 		 * alarm, go back to sleep.
3367 		 */
3368 		ret = -3;
3369 		left = rack->r_ctl.rc_timer_exp - cts;
3370 		tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
3371 		rack_log_to_processing(rack, cts, ret, left);
3372 		rack->rc_last_pto_set = 0;
3373 		return (1);
3374 	}
3375 	rack->rc_tmr_stopped = 0;
3376 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
3377 	if (timers & PACE_TMR_DELACK) {
3378 		ret = rack_timeout_delack(tp, rack, cts);
3379 	} else if (timers & PACE_TMR_RACK) {
3380 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
3381 		ret = rack_timeout_rack(tp, rack, cts);
3382 	} else if (timers & PACE_TMR_TLP) {
3383 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
3384 		ret = rack_timeout_tlp(tp, rack, cts);
3385 	} else if (timers & PACE_TMR_RXT) {
3386 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
3387 		ret = rack_timeout_rxt(tp, rack, cts);
3388 	} else if (timers & PACE_TMR_PERSIT) {
3389 		ret = rack_timeout_persist(tp, rack, cts);
3390 	} else if (timers & PACE_TMR_KEEP) {
3391 		ret = rack_timeout_keepalive(tp, rack, cts);
3392 	}
3393 	rack_log_to_processing(rack, cts, ret, timers);
3394 	return (ret);
3395 }
3396 
3397 static void
3398 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
3399 {
3400 	uint8_t hpts_removed = 0;
3401 
3402 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
3403 	    TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
3404 		tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3405 		hpts_removed = 1;
3406 	}
3407 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
3408 		rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
3409 		if (rack->rc_inp->inp_in_hpts &&
3410 		    ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
3411 			/*
3412 			 * Canceling timer's when we have no output being
3413 			 * paced. We also must remove ourselves from the
3414 			 * hpts.
3415 			 */
3416 			tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3417 			hpts_removed = 1;
3418 		}
3419 		rack_log_to_cancel(rack, hpts_removed, line);
3420 		rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
3421 	}
3422 }
3423 
3424 static void
3425 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
3426 {
3427 	return;
3428 }
3429 
3430 static int
3431 rack_stopall(struct tcpcb *tp)
3432 {
3433 	struct tcp_rack *rack;
3434 	rack = (struct tcp_rack *)tp->t_fb_ptr;
3435 	rack->t_timers_stopped = 1;
3436 	return (0);
3437 }
3438 
3439 static void
3440 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
3441 {
3442 	return;
3443 }
3444 
3445 static int
3446 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
3447 {
3448 	return (0);
3449 }
3450 
3451 static void
3452 rack_stop_all_timers(struct tcpcb *tp)
3453 {
3454 	struct tcp_rack *rack;
3455 
3456 	/*
3457 	 * Assure no timers are running.
3458 	 */
3459 	if (tcp_timer_active(tp, TT_PERSIST)) {
3460 		/* We enter in persists, set the flag appropriately */
3461 		rack = (struct tcp_rack *)tp->t_fb_ptr;
3462 		rack->rc_in_persist = 1;
3463 	}
3464 	tcp_timer_suspend(tp, TT_PERSIST);
3465 	tcp_timer_suspend(tp, TT_REXMT);
3466 	tcp_timer_suspend(tp, TT_KEEP);
3467 	tcp_timer_suspend(tp, TT_DELACK);
3468 }
3469 
3470 static void
3471 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
3472     struct rack_sendmap *rsm, uint32_t ts)
3473 {
3474 	int32_t idx;
3475 
3476 	rsm->r_rtr_cnt++;
3477 	rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
3478 	rsm->r_dupack = 0;
3479 	if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
3480 		rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
3481 		rsm->r_flags |= RACK_OVERMAX;
3482 	}
3483 	if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) {
3484 		rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
3485 		rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
3486 	}
3487 	idx = rsm->r_rtr_cnt - 1;
3488 	rsm->r_tim_lastsent[idx] = ts;
3489 	if (rsm->r_flags & RACK_ACKED) {
3490 		/* Problably MTU discovery messing with us */
3491 		rsm->r_flags &= ~RACK_ACKED;
3492 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
3493 	}
3494 	if (rsm->r_in_tmap) {
3495 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3496 		rsm->r_in_tmap = 0;
3497 	}
3498 	TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3499 	rsm->r_in_tmap = 1;
3500 	if (rsm->r_flags & RACK_SACK_PASSED) {
3501 		/* We have retransmitted due to the SACK pass */
3502 		rsm->r_flags &= ~RACK_SACK_PASSED;
3503 		rsm->r_flags |= RACK_WAS_SACKPASS;
3504 	}
3505 }
3506 
3507 
3508 static uint32_t
3509 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
3510     struct rack_sendmap *rsm, uint32_t ts, int32_t *lenp)
3511 {
3512 	/*
3513 	 * We (re-)transmitted starting at rsm->r_start for some length
3514 	 * (possibly less than r_end.
3515 	 */
3516 	struct rack_sendmap *nrsm, *insret;
3517 	uint32_t c_end;
3518 	int32_t len;
3519 
3520 	len = *lenp;
3521 	c_end = rsm->r_start + len;
3522 	if (SEQ_GEQ(c_end, rsm->r_end)) {
3523 		/*
3524 		 * We retransmitted the whole piece or more than the whole
3525 		 * slopping into the next rsm.
3526 		 */
3527 		rack_update_rsm(tp, rack, rsm, ts);
3528 		if (c_end == rsm->r_end) {
3529 			*lenp = 0;
3530 			return (0);
3531 		} else {
3532 			int32_t act_len;
3533 
3534 			/* Hangs over the end return whats left */
3535 			act_len = rsm->r_end - rsm->r_start;
3536 			*lenp = (len - act_len);
3537 			return (rsm->r_end);
3538 		}
3539 		/* We don't get out of this block. */
3540 	}
3541 	/*
3542 	 * Here we retransmitted less than the whole thing which means we
3543 	 * have to split this into what was transmitted and what was not.
3544 	 */
3545 	nrsm = rack_alloc_full_limit(rack);
3546 	if (nrsm == NULL) {
3547 		/*
3548 		 * We can't get memory, so lets not proceed.
3549 		 */
3550 		*lenp = 0;
3551 		return (0);
3552 	}
3553 	/*
3554 	 * So here we are going to take the original rsm and make it what we
3555 	 * retransmitted. nrsm will be the tail portion we did not
3556 	 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
3557 	 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
3558 	 * 1, 6 and the new piece will be 6, 11.
3559 	 */
3560 	rack_clone_rsm(rack, nrsm, rsm, c_end);
3561 	nrsm->r_dupack = 0;
3562 	rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
3563 	insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
3564 #ifdef INVARIANTS
3565 	if (insret != NULL) {
3566 		panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
3567 		      nrsm, insret, rack, rsm);
3568 	}
3569 #endif
3570 	if (rsm->r_in_tmap) {
3571 		TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3572 		nrsm->r_in_tmap = 1;
3573 	}
3574 	rsm->r_flags &= (~RACK_HAS_FIN);
3575 	rack_update_rsm(tp, rack, rsm, ts);
3576 	*lenp = 0;
3577 	return (0);
3578 }
3579 
3580 
3581 static void
3582 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
3583     uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
3584     uint8_t pass, struct rack_sendmap *hintrsm)
3585 {
3586 	struct tcp_rack *rack;
3587 	struct rack_sendmap *rsm, *nrsm, *insret, fe;
3588 	register uint32_t snd_max, snd_una;
3589 
3590 	/*
3591 	 * Add to the RACK log of packets in flight or retransmitted. If
3592 	 * there is a TS option we will use the TS echoed, if not we will
3593 	 * grab a TS.
3594 	 *
3595 	 * Retransmissions will increment the count and move the ts to its
3596 	 * proper place. Note that if options do not include TS's then we
3597 	 * won't be able to effectively use the ACK for an RTT on a retran.
3598 	 *
3599 	 * Notes about r_start and r_end. Lets consider a send starting at
3600 	 * sequence 1 for 10 bytes. In such an example the r_start would be
3601 	 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
3602 	 * This means that r_end is actually the first sequence for the next
3603 	 * slot (11).
3604 	 *
3605 	 */
3606 	/*
3607 	 * If err is set what do we do XXXrrs? should we not add the thing?
3608 	 * -- i.e. return if err != 0 or should we pretend we sent it? --
3609 	 * i.e. proceed with add ** do this for now.
3610 	 */
3611 	INP_WLOCK_ASSERT(tp->t_inpcb);
3612 	if (err)
3613 		/*
3614 		 * We don't log errors -- we could but snd_max does not
3615 		 * advance in this case either.
3616 		 */
3617 		return;
3618 
3619 	if (th_flags & TH_RST) {
3620 		/*
3621 		 * We don't log resets and we return immediately from
3622 		 * sending
3623 		 */
3624 		return;
3625 	}
3626 	rack = (struct tcp_rack *)tp->t_fb_ptr;
3627 	snd_una = tp->snd_una;
3628 	if (SEQ_LEQ((seq_out + len), snd_una)) {
3629 		/* Are sending an old segment to induce an ack (keep-alive)? */
3630 		return;
3631 	}
3632 	if (SEQ_LT(seq_out, snd_una)) {
3633 		/* huh? should we panic? */
3634 		uint32_t end;
3635 
3636 		end = seq_out + len;
3637 		seq_out = snd_una;
3638 		if (SEQ_GEQ(end, seq_out))
3639 			len = end - seq_out;
3640 		else
3641 			len = 0;
3642 	}
3643 	snd_max = tp->snd_max;
3644 	if (th_flags & (TH_SYN | TH_FIN)) {
3645 		/*
3646 		 * The call to rack_log_output is made before bumping
3647 		 * snd_max. This means we can record one extra byte on a SYN
3648 		 * or FIN if seq_out is adding more on and a FIN is present
3649 		 * (and we are not resending).
3650 		 */
3651 		if (th_flags & TH_SYN)
3652 			len++;
3653 		if (th_flags & TH_FIN)
3654 			len++;
3655 		if (SEQ_LT(snd_max, tp->snd_nxt)) {
3656 			/*
3657 			 * The add/update as not been done for the FIN/SYN
3658 			 * yet.
3659 			 */
3660 			snd_max = tp->snd_nxt;
3661 		}
3662 	}
3663 	if (len == 0) {
3664 		/* We don't log zero window probes */
3665 		return;
3666 	}
3667 	rack->r_ctl.rc_time_last_sent = ts;
3668 	if (IN_RECOVERY(tp->t_flags)) {
3669 		rack->r_ctl.rc_prr_out += len;
3670 	}
3671 	/* First question is it a retransmission or new? */
3672 	if (seq_out == snd_max) {
3673 		/* Its new */
3674 again:
3675 		rsm = rack_alloc(rack);
3676 		if (rsm == NULL) {
3677 			/*
3678 			 * Hmm out of memory and the tcb got destroyed while
3679 			 * we tried to wait.
3680 			 */
3681 			return;
3682 		}
3683 		if (th_flags & TH_FIN) {
3684 			rsm->r_flags = RACK_HAS_FIN;
3685 		} else {
3686 			rsm->r_flags = 0;
3687 		}
3688 		rsm->r_tim_lastsent[0] = ts;
3689 		rsm->r_rtr_cnt = 1;
3690 		rsm->r_rtr_bytes = 0;
3691 		if (th_flags & TH_SYN) {
3692 			/* The data space is one beyond snd_una */
3693 			rsm->r_start = seq_out + 1;
3694 			rsm->r_end = rsm->r_start + (len - 1);
3695 		} else {
3696 			/* Normal case */
3697 			rsm->r_start = seq_out;
3698 			rsm->r_end = rsm->r_start + len;
3699 		}
3700 		rsm->r_dupack = 0;
3701 		rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
3702 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
3703 #ifdef INVARIANTS
3704 		if (insret != NULL) {
3705 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
3706 			      nrsm, insret, rack, rsm);
3707 		}
3708 #endif
3709 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3710 		rsm->r_in_tmap = 1;
3711 		return;
3712 	}
3713 	/*
3714 	 * If we reach here its a retransmission and we need to find it.
3715 	 */
3716 	memset(&fe, 0, sizeof(fe));
3717 more:
3718 	if (hintrsm && (hintrsm->r_start == seq_out)) {
3719 		rsm = hintrsm;
3720 		hintrsm = NULL;
3721 	} else {
3722 		/* No hints sorry */
3723 		rsm = NULL;
3724 	}
3725 	if ((rsm) && (rsm->r_start == seq_out)) {
3726 		seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3727 		if (len == 0) {
3728 			return;
3729 		} else {
3730 			goto more;
3731 		}
3732 	}
3733 	/* Ok it was not the last pointer go through it the hard way. */
3734 refind:
3735 	fe.r_start = seq_out;
3736 	rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3737 	if (rsm) {
3738 		if (rsm->r_start == seq_out) {
3739 			seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3740 			if (len == 0) {
3741 				return;
3742 			} else {
3743 				goto refind;
3744 			}
3745 		}
3746 		if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
3747 			/* Transmitted within this piece */
3748 			/*
3749 			 * Ok we must split off the front and then let the
3750 			 * update do the rest
3751 			 */
3752 			nrsm = rack_alloc_full_limit(rack);
3753 			if (nrsm == NULL) {
3754 				rack_update_rsm(tp, rack, rsm, ts);
3755 				return;
3756 			}
3757 			/*
3758 			 * copy rsm to nrsm and then trim the front of rsm
3759 			 * to not include this part.
3760 			 */
3761 			rack_clone_rsm(rack, nrsm, rsm, seq_out);
3762 			insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
3763 #ifdef INVARIANTS
3764 			if (insret != NULL) {
3765 				panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
3766 				      nrsm, insret, rack, rsm);
3767 			}
3768 #endif
3769 			if (rsm->r_in_tmap) {
3770 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3771 				nrsm->r_in_tmap = 1;
3772 			}
3773 			rsm->r_flags &= (~RACK_HAS_FIN);
3774 			seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
3775 			if (len == 0) {
3776 				return;
3777 			}
3778 		}
3779 	}
3780 	/*
3781 	 * Hmm not found in map did they retransmit both old and on into the
3782 	 * new?
3783 	 */
3784 	if (seq_out == tp->snd_max) {
3785 		goto again;
3786 	} else if (SEQ_LT(seq_out, tp->snd_max)) {
3787 #ifdef INVARIANTS
3788 		printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
3789 		    seq_out, len, tp->snd_una, tp->snd_max);
3790 		printf("Starting Dump of all rack entries\n");
3791 		RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
3792 			printf("rsm:%p start:%u end:%u\n",
3793 			    rsm, rsm->r_start, rsm->r_end);
3794 		}
3795 		printf("Dump complete\n");
3796 		panic("seq_out not found rack:%p tp:%p",
3797 		    rack, tp);
3798 #endif
3799 	} else {
3800 #ifdef INVARIANTS
3801 		/*
3802 		 * Hmm beyond sndmax? (only if we are using the new rtt-pack
3803 		 * flag)
3804 		 */
3805 		panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
3806 		    seq_out, len, tp->snd_max, tp);
3807 #endif
3808 	}
3809 }
3810 
3811 /*
3812  * Record one of the RTT updates from an ack into
3813  * our sample structure.
3814  */
3815 static void
3816 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt)
3817 {
3818 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3819 	    (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
3820 		rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
3821 	}
3822 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3823 	    (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
3824 		rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
3825 	}
3826 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
3827 	rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
3828 	rack->r_ctl.rack_rs.rs_rtt_cnt++;
3829 }
3830 
3831 /*
3832  * Collect new round-trip time estimate
3833  * and update averages and current timeout.
3834  */
3835 static void
3836 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
3837 {
3838 	int32_t delta;
3839 	uint32_t o_srtt, o_var;
3840 	int32_t rtt;
3841 
3842 	if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
3843 		/* No valid sample */
3844 		return;
3845 	if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
3846 		/* We are to use the lowest RTT seen in a single ack */
3847 		rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
3848 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
3849 		/* We are to use the highest RTT seen in a single ack */
3850 		rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
3851 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
3852 		/* We are to use the average RTT seen in a single ack */
3853 		rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
3854 				(uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
3855 	} else {
3856 #ifdef INVARIANTS
3857 		panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
3858 #endif
3859 		return;
3860 	}
3861 	if (rtt == 0)
3862 		rtt = 1;
3863 	rack_log_rtt_sample(rack, rtt);
3864 	o_srtt = tp->t_srtt;
3865 	o_var = tp->t_rttvar;
3866 	rack = (struct tcp_rack *)tp->t_fb_ptr;
3867 	if (tp->t_srtt != 0) {
3868 		/*
3869 		 * srtt is stored as fixed point with 5 bits after the
3870 		 * binary point (i.e., scaled by 8).  The following magic is
3871 		 * equivalent to the smoothing algorithm in rfc793 with an
3872 		 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
3873 		 * Adjust rtt to origin 0.
3874 		 */
3875 		delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3876 		    - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3877 
3878 		tp->t_srtt += delta;
3879 		if (tp->t_srtt <= 0)
3880 			tp->t_srtt = 1;
3881 
3882 		/*
3883 		 * We accumulate a smoothed rtt variance (actually, a
3884 		 * smoothed mean difference), then set the retransmit timer
3885 		 * to smoothed rtt + 4 times the smoothed variance. rttvar
3886 		 * is stored as fixed point with 4 bits after the binary
3887 		 * point (scaled by 16).  The following is equivalent to
3888 		 * rfc793 smoothing with an alpha of .75 (rttvar =
3889 		 * rttvar*3/4 + |delta| / 4).  This replaces rfc793's
3890 		 * wired-in beta.
3891 		 */
3892 		if (delta < 0)
3893 			delta = -delta;
3894 		delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3895 		tp->t_rttvar += delta;
3896 		if (tp->t_rttvar <= 0)
3897 			tp->t_rttvar = 1;
3898 		if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3899 			tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3900 	} else {
3901 		/*
3902 		 * No rtt measurement yet - use the unsmoothed rtt. Set the
3903 		 * variance to half the rtt (so our first retransmit happens
3904 		 * at 3*rtt).
3905 		 */
3906 		tp->t_srtt = rtt << TCP_RTT_SHIFT;
3907 		tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3908 		tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3909 	}
3910 	TCPSTAT_INC(tcps_rttupdated);
3911 	rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var);
3912 	tp->t_rttupdated++;
3913 #ifdef NETFLIX_STATS
3914 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
3915 #endif
3916 	tp->t_rxtshift = 0;
3917 
3918 	/*
3919 	 * the retransmit should happen at rtt + 4 * rttvar. Because of the
3920 	 * way we do the smoothing, srtt and rttvar will each average +1/2
3921 	 * tick of bias.  When we compute the retransmit timer, we want 1/2
3922 	 * tick of rounding and 1 extra tick because of +-1/2 tick
3923 	 * uncertainty in the firing of the timer.  The bias will give us
3924 	 * exactly the 1.5 tick we need.  But, because the bias is
3925 	 * statistical, we have to test that we don't drop below the minimum
3926 	 * feasible timer (which is 2 ticks).
3927 	 */
3928 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3929 	   max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
3930 	tp->t_softerror = 0;
3931 }
3932 
3933 static void
3934 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
3935     uint32_t t, uint32_t cts)
3936 {
3937 	/*
3938 	 * For this RSM, we acknowledged the data from a previous
3939 	 * transmission, not the last one we made. This means we did a false
3940 	 * retransmit.
3941 	 */
3942 	struct tcp_rack *rack;
3943 
3944 	if (rsm->r_flags & RACK_HAS_FIN) {
3945 		/*
3946 		 * The sending of the FIN often is multiple sent when we
3947 		 * have everything outstanding ack'd. We ignore this case
3948 		 * since its over now.
3949 		 */
3950 		return;
3951 	}
3952 	if (rsm->r_flags & RACK_TLP) {
3953 		/*
3954 		 * We expect TLP's to have this occur.
3955 		 */
3956 		return;
3957 	}
3958 	rack = (struct tcp_rack *)tp->t_fb_ptr;
3959 	/* should we undo cc changes and exit recovery? */
3960 	if (IN_RECOVERY(tp->t_flags)) {
3961 		if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
3962 			/*
3963 			 * Undo what we ratched down and exit recovery if
3964 			 * possible
3965 			 */
3966 			EXIT_RECOVERY(tp->t_flags);
3967 			tp->snd_recover = tp->snd_una;
3968 			if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
3969 				tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
3970 			if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
3971 				tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
3972 		}
3973 	}
3974 	if (rsm->r_flags & RACK_WAS_SACKPASS) {
3975 		/*
3976 		 * We retransmitted based on a sack and the earlier
3977 		 * retransmission ack'd it - re-ordering is occuring.
3978 		 */
3979 		counter_u64_add(rack_reorder_seen, 1);
3980 		rack->r_ctl.rc_reorder_ts = cts;
3981 	}
3982 	counter_u64_add(rack_badfr, 1);
3983 	counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
3984 }
3985 
3986 
3987 static int
3988 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
3989     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type)
3990 {
3991 	int32_t i;
3992 	uint32_t t;
3993 
3994 	if (rsm->r_flags & RACK_ACKED)
3995 		/* Already done */
3996 		return (0);
3997 
3998 
3999 	if ((rsm->r_rtr_cnt == 1) ||
4000 	    ((ack_type == CUM_ACKED) &&
4001 	    (to->to_flags & TOF_TS) &&
4002 	    (to->to_tsecr) &&
4003 	    (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr))
4004 	    ) {
4005 		/*
4006 		 * We will only find a matching timestamp if its cum-acked.
4007 		 * But if its only one retransmission its for-sure matching
4008 		 * :-)
4009 		 */
4010 		t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
4011 		if ((int)t <= 0)
4012 			t = 1;
4013 		if (!tp->t_rttlow || tp->t_rttlow > t)
4014 			tp->t_rttlow = t;
4015 		if (!rack->r_ctl.rc_rack_min_rtt ||
4016 		    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
4017 			rack->r_ctl.rc_rack_min_rtt = t;
4018 			if (rack->r_ctl.rc_rack_min_rtt == 0) {
4019 				rack->r_ctl.rc_rack_min_rtt = 1;
4020 			}
4021 		}
4022 		tcp_rack_xmit_timer(rack, t + 1);
4023 		if ((rsm->r_flags & RACK_TLP) &&
4024 		    (!IN_RECOVERY(tp->t_flags))) {
4025 			/* Segment was a TLP and our retrans matched */
4026 			if (rack->r_ctl.rc_tlp_cwnd_reduce) {
4027 				rack->r_ctl.rc_rsm_start = tp->snd_max;
4028 				rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4029 				rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4030 				rack_cong_signal(tp, NULL, CC_NDUPACK);
4031 				/*
4032 				 * When we enter recovery we need to assure
4033 				 * we send one packet.
4034 				 */
4035 				rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4036 				rack_log_to_prr(rack, 7);
4037 			}
4038 		}
4039 		if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
4040 			/* New more recent rack_tmit_time */
4041 			rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
4042 			rack->rc_rack_rtt = t;
4043 		}
4044 		return (1);
4045 	}
4046 	/*
4047 	 * We clear the soft/rxtshift since we got an ack.
4048 	 * There is no assurance we will call the commit() function
4049 	 * so we need to clear these to avoid incorrect handling.
4050 	 */
4051 	tp->t_rxtshift = 0;
4052 	tp->t_softerror = 0;
4053 	if ((to->to_flags & TOF_TS) &&
4054 	    (ack_type == CUM_ACKED) &&
4055 	    (to->to_tsecr) &&
4056 	    ((rsm->r_flags & RACK_OVERMAX) == 0)) {
4057 		/*
4058 		 * Now which timestamp does it match? In this block the ACK
4059 		 * must be coming from a previous transmission.
4060 		 */
4061 		for (i = 0; i < rsm->r_rtr_cnt; i++) {
4062 			if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
4063 				t = cts - rsm->r_tim_lastsent[i];
4064 				if ((int)t <= 0)
4065 					t = 1;
4066 				if ((i + 1) < rsm->r_rtr_cnt) {
4067 					/* Likely */
4068 					rack_earlier_retran(tp, rsm, t, cts);
4069 				}
4070 				if (!tp->t_rttlow || tp->t_rttlow > t)
4071 					tp->t_rttlow = t;
4072 				if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
4073 					rack->r_ctl.rc_rack_min_rtt = t;
4074 					if (rack->r_ctl.rc_rack_min_rtt == 0) {
4075 						rack->r_ctl.rc_rack_min_rtt = 1;
4076 					}
4077 				}
4078                                 /*
4079 				 * Note the following calls to
4080 				 * tcp_rack_xmit_timer() are being commented
4081 				 * out for now. They give us no more accuracy
4082 				 * and often lead to a wrong choice. We have
4083 				 * enough samples that have not been
4084 				 * retransmitted. I leave the commented out
4085 				 * code in here in case in the future we
4086 				 * decide to add it back (though I can't forsee
4087 				 * doing that). That way we will easily see
4088 				 * where they need to be placed.
4089 				 */
4090 				if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
4091 				    rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
4092 					/* New more recent rack_tmit_time */
4093 					rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
4094 					rack->rc_rack_rtt = t;
4095 				}
4096 				return (1);
4097 			}
4098 		}
4099 		goto ts_not_found;
4100 	} else {
4101 		/*
4102 		 * Ok its a SACK block that we retransmitted. or a windows
4103 		 * machine without timestamps. We can tell nothing from the
4104 		 * time-stamp since its not there or the time the peer last
4105 		 * recieved a segment that moved forward its cum-ack point.
4106 		 */
4107 ts_not_found:
4108 		i = rsm->r_rtr_cnt - 1;
4109 		t = cts - rsm->r_tim_lastsent[i];
4110 		if ((int)t <= 0)
4111 			t = 1;
4112 		if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
4113 			/*
4114 			 * We retransmitted and the ack came back in less
4115 			 * than the smallest rtt we have observed. We most
4116 			 * likey did an improper retransmit as outlined in
4117 			 * 4.2 Step 3 point 2 in the rack-draft.
4118 			 */
4119 			i = rsm->r_rtr_cnt - 2;
4120 			t = cts - rsm->r_tim_lastsent[i];
4121 			rack_earlier_retran(tp, rsm, t, cts);
4122 		} else if (rack->r_ctl.rc_rack_min_rtt) {
4123 			/*
4124 			 * We retransmitted it and the retransmit did the
4125 			 * job.
4126 			 */
4127 			if (!rack->r_ctl.rc_rack_min_rtt ||
4128 			    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
4129 				rack->r_ctl.rc_rack_min_rtt = t;
4130 				if (rack->r_ctl.rc_rack_min_rtt == 0) {
4131 					rack->r_ctl.rc_rack_min_rtt = 1;
4132 				}
4133 			}
4134 			if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
4135 				/* New more recent rack_tmit_time */
4136 				rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
4137 				rack->rc_rack_rtt = t;
4138 			}
4139 			return (1);
4140 		}
4141 	}
4142 	return (0);
4143 }
4144 
4145 /*
4146  * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
4147  */
4148 static void
4149 rack_log_sack_passed(struct tcpcb *tp,
4150     struct tcp_rack *rack, struct rack_sendmap *rsm)
4151 {
4152 	struct rack_sendmap *nrsm;
4153 
4154 	nrsm = rsm;
4155 	TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
4156 	    rack_head, r_tnext) {
4157 		if (nrsm == rsm) {
4158 			/* Skip orginal segment he is acked */
4159 			continue;
4160 		}
4161 		if (nrsm->r_flags & RACK_ACKED) {
4162 			/*
4163 			 * Skip ack'd segments, though we
4164 			 * should not see these, since tmap
4165 			 * should not have ack'd segments.
4166 			 */
4167 			continue;
4168 		}
4169 		if (nrsm->r_flags & RACK_SACK_PASSED) {
4170 			/*
4171 			 * We found one that is already marked
4172 			 * passed, we have been here before and
4173 			 * so all others below this are marked.
4174 			 */
4175 			break;
4176 		}
4177 		nrsm->r_flags |= RACK_SACK_PASSED;
4178 		nrsm->r_flags &= ~RACK_WAS_SACKPASS;
4179 	}
4180 }
4181 
4182 static uint32_t
4183 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
4184 		   struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
4185 {
4186 	uint32_t start, end, changed = 0;
4187 	struct rack_sendmap stack_map;
4188 	struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
4189 	int32_t used_ref = 1;
4190 	int moved = 0;
4191 
4192 	start = sack->start;
4193 	end = sack->end;
4194 	rsm = *prsm;
4195 	memset(&fe, 0, sizeof(fe));
4196 do_rest_ofb:
4197 	if ((rsm == NULL) ||
4198 	    (SEQ_LT(end, rsm->r_start)) ||
4199 	    (SEQ_GEQ(start, rsm->r_end)) ||
4200 	    (SEQ_LT(start, rsm->r_start))) {
4201 		/*
4202 		 * We are not in the right spot,
4203 		 * find the correct spot in the tree.
4204 		 */
4205 		used_ref = 0;
4206 		fe.r_start = start;
4207 		rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4208 		moved++;
4209 	}
4210 	if (rsm == NULL) {
4211 		/* TSNH */
4212 		goto out;
4213 	}
4214 	/* Ok we have an ACK for some piece of this rsm */
4215 	if (rsm->r_start != start) {
4216 		if ((rsm->r_flags & RACK_ACKED) == 0) {
4217 			/**
4218 			 * Need to split this in two pieces the before and after,
4219 			 * the before remains in the map, the after must be
4220 			 * added. In other words we have:
4221 			 * rsm        |--------------|
4222 			 * sackblk        |------->
4223 			 * rsm will become
4224 			 *     rsm    |---|
4225 			 * and nrsm will be  the sacked piece
4226 			 *     nrsm       |----------|
4227 			 *
4228 			 * But before we start down that path lets
4229 			 * see if the sack spans over on top of
4230 			 * the next guy and it is already sacked.
4231 			 */
4232 			next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4233 			if (next && (next->r_flags & RACK_ACKED) &&
4234 			    SEQ_GEQ(end, next->r_start)) {
4235 				/**
4236 				 * So the next one is already acked, and
4237 				 * we can thus by hookery use our stack_map
4238 				 * to reflect the piece being sacked and
4239 				 * then adjust the two tree entries moving
4240 				 * the start and ends around. So we start like:
4241 				 *  rsm     |------------|             (not-acked)
4242 				 *  next                 |-----------| (acked)
4243 				 *  sackblk        |-------->
4244 				 *  We want to end like so:
4245 				 *  rsm     |------|                   (not-acked)
4246 				 *  next           |-----------------| (acked)
4247 				 *  nrsm           |-----|
4248 				 * Where nrsm is a temporary stack piece we
4249 				 * use to update all the gizmos.
4250 				 */
4251 				/* Copy up our fudge block */
4252 				nrsm = &stack_map;
4253 				memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
4254 				/* Now adjust our tree blocks */
4255 				rsm->r_end = start;
4256 				next->r_start = start;
4257 				/* Clear out the dup ack count of the remainder */
4258 				rsm->r_dupack = 0;
4259 				rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
4260 				/* Now lets make sure our fudge block is right */
4261 				nrsm->r_start = start;
4262 				/* Now lets update all the stats and such */
4263 				rack_update_rtt(tp, rack, nrsm, to, cts, SACKED);
4264 				changed += (nrsm->r_end - nrsm->r_start);
4265 				rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
4266 				if (nrsm->r_flags & RACK_SACK_PASSED) {
4267 					counter_u64_add(rack_reorder_seen, 1);
4268 					rack->r_ctl.rc_reorder_ts = cts;
4269 				}
4270 				/*
4271 				 * Now we want to go up from rsm (the
4272 				 * one left un-acked) to the next one
4273 				 * in the tmap. We do this so when
4274 				 * we walk backwards we include marking
4275 				 * sack-passed on rsm (The one passed in
4276 				 * is skipped since it is generally called
4277 				 * on something sacked before removing it
4278 				 * from the tmap).
4279 				 */
4280 				if (rsm->r_in_tmap) {
4281 					nrsm = TAILQ_NEXT(rsm, r_tnext);
4282 					/*
4283 					 * Now that we have the next
4284 					 * one walk backwards from there.
4285 					 */
4286 					if (nrsm && nrsm->r_in_tmap)
4287 						rack_log_sack_passed(tp, rack, nrsm);
4288 				}
4289 				/* Now are we done? */
4290 				if (SEQ_LT(end, next->r_end) ||
4291 				    (end == next->r_end)) {
4292 					/* Done with block */
4293 					goto out;
4294 				}
4295 				counter_u64_add(rack_sack_used_next_merge, 1);
4296 				/* Postion for the next block */
4297 				start = next->r_end;
4298 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
4299 				if (rsm == NULL)
4300 					goto out;
4301 			} else {
4302 				/**
4303 				 * We can't use any hookery here, so we
4304 				 * need to split the map. We enter like
4305 				 * so:
4306 				 *  rsm      |--------|
4307 				 *  sackblk       |----->
4308 				 * We will add the new block nrsm and
4309 				 * that will be the new portion, and then
4310 				 * fall through after reseting rsm. So we
4311 				 * split and look like this:
4312 				 *  rsm      |----|
4313 				 *  sackblk       |----->
4314 				 *  nrsm          |---|
4315 				 * We then fall through reseting
4316 				 * rsm to nrsm, so the next block
4317 				 * picks it up.
4318 				 */
4319 				nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
4320 				if (nrsm == NULL) {
4321 					/*
4322 					 * failed XXXrrs what can we do but loose the sack
4323 					 * info?
4324 					 */
4325 					goto out;
4326 				}
4327 				counter_u64_add(rack_sack_splits, 1);
4328 				rack_clone_rsm(rack, nrsm, rsm, start);
4329 				insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
4330 #ifdef INVARIANTS
4331 				if (insret != NULL) {
4332 					panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
4333 					      nrsm, insret, rack, rsm);
4334 				}
4335 #endif
4336 				if (rsm->r_in_tmap) {
4337 					TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4338 					nrsm->r_in_tmap = 1;
4339 				}
4340 				rsm->r_flags &= (~RACK_HAS_FIN);
4341 				/* Position us to point to the new nrsm that starts the sack blk */
4342 				rsm = nrsm;
4343 			}
4344 		} else {
4345 			/* Already sacked this piece */
4346 			counter_u64_add(rack_sack_skipped_acked, 1);
4347 			moved++;
4348 			if (end == rsm->r_end) {
4349 				/* Done with block */
4350 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4351 				goto out;
4352 			} else if (SEQ_LT(end, rsm->r_end)) {
4353 				/* A partial sack to a already sacked block */
4354 				moved++;
4355 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4356 				goto out;
4357 			} else {
4358 				/*
4359 				 * The end goes beyond this guy
4360 				 * repostion the start to the
4361 				 * next block.
4362 				 */
4363 				start = rsm->r_end;
4364 				rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4365 				if (rsm == NULL)
4366 					goto out;
4367 			}
4368 		}
4369 	}
4370 	if (SEQ_GEQ(end, rsm->r_end)) {
4371 		/**
4372 		 * The end of this block is either beyond this guy or right
4373 		 * at this guy. I.e.:
4374 		 *  rsm ---                 |-----|
4375 		 *  end                     |-----|
4376 		 *  <or>
4377 		 *  end                     |---------|
4378 		 */
4379 		if (rsm->r_flags & RACK_TLP)
4380 			rack->r_ctl.rc_tlp_rtx_out = 0;
4381 		if ((rsm->r_flags & RACK_ACKED) == 0) {
4382 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4383 			changed += (rsm->r_end - rsm->r_start);
4384 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4385 			if (rsm->r_in_tmap) /* should be true */
4386 				rack_log_sack_passed(tp, rack, rsm);
4387 			/* Is Reordering occuring? */
4388 			if (rsm->r_flags & RACK_SACK_PASSED) {
4389 				rsm->r_flags &= ~RACK_SACK_PASSED;
4390 				counter_u64_add(rack_reorder_seen, 1);
4391 				rack->r_ctl.rc_reorder_ts = cts;
4392 			}
4393 			rsm->r_flags |= RACK_ACKED;
4394 			rsm->r_flags &= ~RACK_TLP;
4395 			if (rsm->r_in_tmap) {
4396 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4397 				rsm->r_in_tmap = 0;
4398 			}
4399 		} else {
4400 			counter_u64_add(rack_sack_skipped_acked, 1);
4401 			moved++;
4402 		}
4403 		if (end == rsm->r_end) {
4404 			/* This block only - done, setup for next  */
4405 			goto out;
4406 		}
4407 		/*
4408 		 * There is more not coverend by this rsm move on
4409 		 * to the next block in the RB tree.
4410 		 */
4411 		nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4412 		start = rsm->r_end;
4413 		rsm = nrsm;
4414 		if (rsm == NULL)
4415 			goto out;
4416 		goto do_rest_ofb;
4417 	}
4418 	/**
4419 	 * The end of this sack block is smaller than
4420 	 * our rsm i.e.:
4421 	 *  rsm ---                 |-----|
4422 	 *  end                     |--|
4423 	 */
4424 	if ((rsm->r_flags & RACK_ACKED) == 0) {
4425 		prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4426 		if (prev && (prev->r_flags & RACK_ACKED)) {
4427 			/**
4428 			 * Goal, we want the right remainder of rsm to shrink
4429 			 * in place and span from (rsm->r_start = end) to rsm->r_end.
4430 			 * We want to expand prev to go all the way
4431 			 * to prev->r_end <- end.
4432 			 * so in the tree we have before:
4433 			 *   prev     |--------|         (acked)
4434 			 *   rsm               |-------| (non-acked)
4435 			 *   sackblk           |-|
4436 			 * We churn it so we end up with
4437 			 *   prev     |----------|       (acked)
4438 			 *   rsm                 |-----| (non-acked)
4439 			 *   nrsm              |-| (temporary)
4440 			 */
4441 			nrsm = &stack_map;
4442 			memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
4443 			prev->r_end = end;
4444 			rsm->r_start = end;
4445 			/* Now adjust nrsm (stack copy) to be
4446 			 * the one that is the small
4447 			 * piece that was "sacked".
4448 			 */
4449 			nrsm->r_end = end;
4450 			rsm->r_dupack = 0;
4451 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
4452 			/*
4453 			 * Now nrsm is our new little piece
4454 			 * that is acked (which was merged
4455 			 * to prev). Update the rtt and changed
4456 			 * based on that. Also check for reordering.
4457 			 */
4458 			rack_update_rtt(tp, rack, nrsm, to, cts, SACKED);
4459 			changed += (nrsm->r_end - nrsm->r_start);
4460 			rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
4461 			if (nrsm->r_flags & RACK_SACK_PASSED) {
4462 				counter_u64_add(rack_reorder_seen, 1);
4463 				rack->r_ctl.rc_reorder_ts = cts;
4464 			}
4465 			rsm = prev;
4466 			counter_u64_add(rack_sack_used_prev_merge, 1);
4467 		} else {
4468 			/**
4469 			 * This is the case where our previous
4470 			 * block is not acked either, so we must
4471 			 * split the block in two.
4472 			 */
4473 			nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
4474 			if (nrsm == NULL) {
4475 				/* failed rrs what can we do but loose the sack info? */
4476 				goto out;
4477 			}
4478 			/**
4479 			 * In this case nrsm becomes
4480 			 * nrsm->r_start = end;
4481 			 * nrsm->r_end = rsm->r_end;
4482 			 * which is un-acked.
4483 			 * <and>
4484 			 * rsm->r_end = nrsm->r_start;
4485 			 * i.e. the remaining un-acked
4486 			 * piece is left on the left
4487 			 * hand side.
4488 			 *
4489 			 * So we start like this
4490 			 * rsm      |----------| (not acked)
4491 			 * sackblk  |---|
4492 			 * build it so we have
4493 			 * rsm      |---|         (acked)
4494 			 * nrsm         |------|  (not acked)
4495 			 */
4496 			counter_u64_add(rack_sack_splits, 1);
4497 			rack_clone_rsm(rack, nrsm, rsm, end);
4498 			rsm->r_flags &= (~RACK_HAS_FIN);
4499 			insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
4500 #ifdef INVARIANTS
4501 			if (insret != NULL) {
4502 				panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
4503 				      nrsm, insret, rack, rsm);
4504 			}
4505 #endif
4506 			if (rsm->r_in_tmap) {
4507 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4508 				nrsm->r_in_tmap = 1;
4509 			}
4510 			nrsm->r_dupack = 0;
4511 			rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
4512 			if (rsm->r_flags & RACK_TLP)
4513 				rack->r_ctl.rc_tlp_rtx_out = 0;
4514 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4515 			changed += (rsm->r_end - rsm->r_start);
4516 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4517 			if (rsm->r_in_tmap) /* should be true */
4518 				rack_log_sack_passed(tp, rack, rsm);
4519 			/* Is Reordering occuring? */
4520 			if (rsm->r_flags & RACK_SACK_PASSED) {
4521 				rsm->r_flags &= ~RACK_SACK_PASSED;
4522 				counter_u64_add(rack_reorder_seen, 1);
4523 				rack->r_ctl.rc_reorder_ts = cts;
4524 			}
4525 			rsm->r_flags |= RACK_ACKED;
4526 			rsm->r_flags &= ~RACK_TLP;
4527 			if (rsm->r_in_tmap) {
4528 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4529 				rsm->r_in_tmap = 0;
4530 			}
4531 		}
4532 	} else if (start != end){
4533 		/*
4534 		 * The block was already acked.
4535 		 */
4536 		counter_u64_add(rack_sack_skipped_acked, 1);
4537 		moved++;
4538 	}
4539 out:
4540 	if (rsm && (rsm->r_flags & RACK_ACKED)) {
4541 		/*
4542 		 * Now can we merge where we worked
4543 		 * with either the previous or
4544 		 * next block?
4545 		 */
4546 		next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4547 		while (next) {
4548 		    if (next->r_flags & RACK_ACKED) {
4549 			/* yep this and next can be merged */
4550 			rsm = rack_merge_rsm(rack, rsm, next);
4551 			next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4552 		    } else
4553 			    break;
4554 		}
4555 		/* Now what about the previous? */
4556 		prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4557 		while (prev) {
4558 		    if (prev->r_flags & RACK_ACKED) {
4559 			/* yep the previous and this can be merged */
4560 			rsm = rack_merge_rsm(rack, prev, rsm);
4561 			prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4562 		    } else
4563 			    break;
4564 		}
4565 	}
4566 	if (used_ref == 0) {
4567 		counter_u64_add(rack_sack_proc_all, 1);
4568 	} else {
4569 		counter_u64_add(rack_sack_proc_short, 1);
4570 	}
4571 	/* Save off the next one for quick reference. */
4572 	if (rsm)
4573 		nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4574 	else
4575 		nrsm = NULL;
4576 	*prsm = rack->r_ctl.rc_sacklast = nrsm;
4577 	/* Pass back the moved. */
4578 	*moved_two = moved;
4579 	return (changed);
4580 }
4581 
4582 static void inline
4583 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
4584 {
4585 	struct rack_sendmap *tmap;
4586 
4587 	tmap = NULL;
4588 	while (rsm && (rsm->r_flags & RACK_ACKED)) {
4589 		/* Its no longer sacked, mark it so */
4590 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4591 #ifdef INVARIANTS
4592 		if (rsm->r_in_tmap) {
4593 			panic("rack:%p rsm:%p flags:0x%x in tmap?",
4594 			      rack, rsm, rsm->r_flags);
4595 		}
4596 #endif
4597 		rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
4598 		/* Rebuild it into our tmap */
4599 		if (tmap == NULL) {
4600 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4601 			tmap = rsm;
4602 		} else {
4603 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
4604 			tmap = rsm;
4605 		}
4606 		tmap->r_in_tmap = 1;
4607 		rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4608 	}
4609 	/*
4610 	 * Now lets possibly clear the sack filter so we start
4611 	 * recognizing sacks that cover this area.
4612 	 */
4613 	if (rack_use_sack_filter)
4614 		sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
4615 
4616 }
4617 
4618 static void
4619 rack_do_decay(struct tcp_rack *rack)
4620 {
4621 	struct timeval res;
4622 
4623 #define	timersub(tvp, uvp, vvp)						\
4624 	do {								\
4625 		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
4626 		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
4627 		if ((vvp)->tv_usec < 0) {				\
4628 			(vvp)->tv_sec--;				\
4629 			(vvp)->tv_usec += 1000000;			\
4630 		}							\
4631 	} while (0)
4632 
4633 	timersub(&rack->r_ctl.rc_last_ack, &rack->r_ctl.rc_last_time_decay, &res);
4634 #undef timersub
4635 
4636 	rack->r_ctl.input_pkt++;
4637 	if ((rack->rc_in_persist) ||
4638 	    (res.tv_sec >= 1) ||
4639 	    (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
4640 		/*
4641 		 * Check for decay of non-SAD,
4642 		 * we want all SAD detection metrics to
4643 		 * decay 1/4 per second (or more) passed.
4644 		 */
4645 		uint32_t pkt_delta;
4646 
4647 		pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
4648 		/* Update our saved tracking values */
4649 		rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
4650 		rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack;
4651 		/* Now do we escape without decay? */
4652 		if (rack->rc_in_persist ||
4653 		    (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
4654 		    (pkt_delta < tcp_sad_low_pps)){
4655 			/*
4656 			 * We don't decay idle connections
4657 			 * or ones that have a low input pps.
4658 			 */
4659 			return;
4660 		}
4661 		/* Decay the counters */
4662 		rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
4663 							tcp_sad_decay_val);
4664 		rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
4665 							 tcp_sad_decay_val);
4666 		rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
4667 							       tcp_sad_decay_val);
4668 		rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
4669 								tcp_sad_decay_val);
4670 	}
4671 }
4672 
4673 static void
4674 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
4675 {
4676 	uint32_t changed, entered_recovery = 0;
4677 	struct tcp_rack *rack;
4678 	struct rack_sendmap *rsm, *rm;
4679 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
4680 	register uint32_t th_ack;
4681 	int32_t i, j, k, num_sack_blks = 0;
4682 	uint32_t cts, acked, ack_point, sack_changed = 0;
4683 	int loop_start = 0, moved_two = 0;
4684 
4685 	INP_WLOCK_ASSERT(tp->t_inpcb);
4686 	if (th->th_flags & TH_RST) {
4687 		/* We don't log resets */
4688 		return;
4689 	}
4690 	rack = (struct tcp_rack *)tp->t_fb_ptr;
4691 	cts = tcp_ts_getticks();
4692 	rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
4693 	changed = 0;
4694 	th_ack = th->th_ack;
4695 	if (rack->sack_attack_disable == 0)
4696 		rack_do_decay(rack);
4697 	if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
4698 		/*
4699 		 * You only get credit for
4700 		 * MSS and greater (and you get extra
4701 		 * credit for larger cum-ack moves).
4702 		 */
4703 		int ac;
4704 
4705 		ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
4706 		rack->r_ctl.ack_count += ac;
4707 		counter_u64_add(rack_ack_total, ac);
4708 	}
4709 	if (rack->r_ctl.ack_count > 0xfff00000) {
4710 		/*
4711 		 * reduce the number to keep us under
4712 		 * a uint32_t.
4713 		 */
4714 		rack->r_ctl.ack_count /= 2;
4715 		rack->r_ctl.sack_count /= 2;
4716 	}
4717 	if (SEQ_GT(th_ack, tp->snd_una)) {
4718 		rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
4719 		tp->t_acktime = ticks;
4720 	}
4721 	if (rsm && SEQ_GT(th_ack, rsm->r_start))
4722 		changed = th_ack - rsm->r_start;
4723 	if (changed) {
4724 		/*
4725 		 * The ACK point is advancing to th_ack, we must drop off
4726 		 * the packets in the rack log and calculate any eligble
4727 		 * RTT's.
4728 		 */
4729 		rack->r_wanted_output++;
4730 	more:
4731 		rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
4732 		if (rsm == NULL) {
4733 			if ((th_ack - 1) == tp->iss) {
4734 				/*
4735 				 * For the SYN incoming case we will not
4736 				 * have called tcp_output for the sending of
4737 				 * the SYN, so there will be no map. All
4738 				 * other cases should probably be a panic.
4739 				 */
4740 				goto proc_sack;
4741 			}
4742 			if (tp->t_flags & TF_SENTFIN) {
4743 				/* if we send a FIN we will not hav a map */
4744 				goto proc_sack;
4745 			}
4746 #ifdef INVARIANTS
4747 			panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
4748 			      tp,
4749 			      th, tp->t_state, rack,
4750 			      tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
4751 #endif
4752 			goto proc_sack;
4753 		}
4754 		if (SEQ_LT(th_ack, rsm->r_start)) {
4755 			/* Huh map is missing this */
4756 #ifdef INVARIANTS
4757 			printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
4758 			       rsm->r_start,
4759 			       th_ack, tp->t_state, rack->r_state);
4760 #endif
4761 			goto proc_sack;
4762 		}
4763 		rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED);
4764 		/* Now do we consume the whole thing? */
4765 		if (SEQ_GEQ(th_ack, rsm->r_end)) {
4766 			/* Its all consumed. */
4767 			uint32_t left;
4768 
4769 			rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4770 			rsm->r_rtr_bytes = 0;
4771 			if (rsm->r_flags & RACK_TLP)
4772 				rack->r_ctl.rc_tlp_rtx_out = 0;
4773 			rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
4774 #ifdef INVARIANTS
4775 			if (rm != rsm) {
4776 				panic("removing head in rack:%p rsm:%p rm:%p",
4777 				      rack, rsm, rm);
4778 			}
4779 #endif
4780 			if (rsm->r_in_tmap) {
4781 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4782 				rsm->r_in_tmap = 0;
4783 			}
4784 			if (rsm->r_flags & RACK_ACKED) {
4785 				/*
4786 				 * It was acked on the scoreboard -- remove
4787 				 * it from total
4788 				 */
4789 				rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4790 			} else if (rsm->r_flags & RACK_SACK_PASSED) {
4791 				/*
4792 				 * There are segments ACKED on the
4793 				 * scoreboard further up. We are seeing
4794 				 * reordering.
4795 				 */
4796 				rsm->r_flags &= ~RACK_SACK_PASSED;
4797 				counter_u64_add(rack_reorder_seen, 1);
4798 				rsm->r_flags |= RACK_ACKED;
4799 				rack->r_ctl.rc_reorder_ts = cts;
4800 			}
4801 			left = th_ack - rsm->r_end;
4802 			if (rsm->r_rtr_cnt > 1) {
4803 				/*
4804 				 * Technically we should make r_rtr_cnt be
4805 				 * monotonicly increasing and just mod it to
4806 				 * the timestamp it is replacing.. that way
4807 				 * we would have the last 3 retransmits. Now
4808 				 * rc_loss_count will be wrong if we
4809 				 * retransmit something more than 2 times in
4810 				 * recovery :(
4811 				 */
4812 				rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1);
4813 			}
4814 			/* Free back to zone */
4815 			rack_free(rack, rsm);
4816 			if (left) {
4817 				goto more;
4818 			}
4819 			goto proc_sack;
4820 		}
4821 		if (rsm->r_flags & RACK_ACKED) {
4822 			/*
4823 			 * It was acked on the scoreboard -- remove it from
4824 			 * total for the part being cum-acked.
4825 			 */
4826 			rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
4827 		}
4828 		/*
4829 		 * Clear the dup ack count for
4830 		 * the piece that remains.
4831 		 */
4832 		rsm->r_dupack = 0;
4833 		rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
4834 		if (rsm->r_rtr_bytes) {
4835 			/*
4836 			 * It was retransmitted adjust the
4837 			 * sack holes for what was acked.
4838 			 */
4839 			int ack_am;
4840 
4841 			ack_am = (th_ack - rsm->r_start);
4842 			if (ack_am >= rsm->r_rtr_bytes) {
4843 				rack->r_ctl.rc_holes_rxt -= ack_am;
4844 				rsm->r_rtr_bytes -= ack_am;
4845 			}
4846 		}
4847 		/* Update where the piece starts */
4848 		rsm->r_start = th_ack;
4849 	}
4850 proc_sack:
4851 	/* Check for reneging */
4852 	rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
4853 	if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
4854 		/*
4855 		 * The peer has moved snd_una up to
4856 		 * the edge of this send, i.e. one
4857 		 * that it had previously acked. The only
4858 		 * way that can be true if the peer threw
4859 		 * away data (space issues) that it had
4860 		 * previously sacked (else it would have
4861 		 * given us snd_una up to (rsm->r_end).
4862 		 * We need to undo the acked markings here.
4863 		 *
4864 		 * Note we have to look to make sure th_ack is
4865 		 * our rsm->r_start in case we get an old ack
4866 		 * where th_ack is behind snd_una.
4867 		 */
4868 		rack_peer_reneges(rack, rsm, th->th_ack);
4869 	}
4870 	if ((to->to_flags & TOF_SACK) == 0) {
4871 		/* We are done nothing left */
4872 		goto out;
4873 	}
4874 	/* Sack block processing */
4875 	if (SEQ_GT(th_ack, tp->snd_una))
4876 		ack_point = th_ack;
4877 	else
4878 		ack_point = tp->snd_una;
4879 	for (i = 0; i < to->to_nsacks; i++) {
4880 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
4881 		      &sack, sizeof(sack));
4882 		sack.start = ntohl(sack.start);
4883 		sack.end = ntohl(sack.end);
4884 		if (SEQ_GT(sack.end, sack.start) &&
4885 		    SEQ_GT(sack.start, ack_point) &&
4886 		    SEQ_LT(sack.start, tp->snd_max) &&
4887 		    SEQ_GT(sack.end, ack_point) &&
4888 		    SEQ_LEQ(sack.end, tp->snd_max)) {
4889 			sack_blocks[num_sack_blks] = sack;
4890 			num_sack_blks++;
4891 #ifdef NETFLIX_STATS
4892 		} else if (SEQ_LEQ(sack.start, th_ack) &&
4893 			   SEQ_LEQ(sack.end, th_ack)) {
4894 			/*
4895 			 * Its a D-SACK block.
4896 			 */
4897 			tcp_record_dsack(sack.start, sack.end);
4898 #endif
4899 		}
4900 
4901 	}
4902 	/*
4903 	 * Sort the SACK blocks so we can update the rack scoreboard with
4904 	 * just one pass.
4905 	 */
4906 	if (rack_use_sack_filter) {
4907 		num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
4908 						 num_sack_blks, th->th_ack);
4909 		ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
4910 	}
4911 	if (num_sack_blks == 0)  {
4912 		/* Nothing to sack (DSACKs?) */
4913 		goto out_with_totals;
4914 	}
4915 	if (num_sack_blks < 2) {
4916 		/* Only one, we don't need to sort */
4917 		goto do_sack_work;
4918 	}
4919 	/* Sort the sacks */
4920 	for (i = 0; i < num_sack_blks; i++) {
4921 		for (j = i + 1; j < num_sack_blks; j++) {
4922 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
4923 				sack = sack_blocks[i];
4924 				sack_blocks[i] = sack_blocks[j];
4925 				sack_blocks[j] = sack;
4926 			}
4927 		}
4928 	}
4929 	/*
4930 	 * Now are any of the sack block ends the same (yes some
4931 	 * implementations send these)?
4932 	 */
4933 again:
4934 	if (num_sack_blks == 0)
4935 		goto out_with_totals;
4936 	if (num_sack_blks > 1) {
4937 		for (i = 0; i < num_sack_blks; i++) {
4938 			for (j = i + 1; j < num_sack_blks; j++) {
4939 				if (sack_blocks[i].end == sack_blocks[j].end) {
4940 					/*
4941 					 * Ok these two have the same end we
4942 					 * want the smallest end and then
4943 					 * throw away the larger and start
4944 					 * again.
4945 					 */
4946 					if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
4947 						/*
4948 						 * The second block covers
4949 						 * more area use that
4950 						 */
4951 						sack_blocks[i].start = sack_blocks[j].start;
4952 					}
4953 					/*
4954 					 * Now collapse out the dup-sack and
4955 					 * lower the count
4956 					 */
4957 					for (k = (j + 1); k < num_sack_blks; k++) {
4958 						sack_blocks[j].start = sack_blocks[k].start;
4959 						sack_blocks[j].end = sack_blocks[k].end;
4960 						j++;
4961 					}
4962 					num_sack_blks--;
4963 					goto again;
4964 				}
4965 			}
4966 		}
4967 	}
4968 do_sack_work:
4969 	/*
4970 	 * First lets look to see if
4971 	 * we have retransmitted and
4972 	 * can use the transmit next?
4973 	 */
4974 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
4975 	if (rsm &&
4976 	    SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
4977 	    SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
4978 		/*
4979 		 * We probably did the FR and the next
4980 		 * SACK in continues as we would expect.
4981 		 */
4982 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
4983 		if (acked) {
4984 			rack->r_wanted_output++;
4985 			changed += acked;
4986 			sack_changed += acked;
4987 		}
4988 		if (num_sack_blks == 1) {
4989 			/*
4990 			 * This is what we would expect from
4991 			 * a normal implementation to happen
4992 			 * after we have retransmitted the FR,
4993 			 * i.e the sack-filter pushes down
4994 			 * to 1 block and the next to be retransmitted
4995 			 * is the sequence in the sack block (has more
4996 			 * are acked). Count this as ACK'd data to boost
4997 			 * up the chances of recovering any false positives.
4998 			 */
4999 			rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
5000 			counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
5001 			counter_u64_add(rack_express_sack, 1);
5002 			if (rack->r_ctl.ack_count > 0xfff00000) {
5003 				/*
5004 				 * reduce the number to keep us under
5005 				 * a uint32_t.
5006 				 */
5007 				rack->r_ctl.ack_count /= 2;
5008 				rack->r_ctl.sack_count /= 2;
5009 			}
5010 			goto out_with_totals;
5011 		} else {
5012 			/*
5013 			 * Start the loop through the
5014 			 * rest of blocks, past the first block.
5015 			 */
5016 			moved_two = 0;
5017 			loop_start = 1;
5018 		}
5019 	}
5020 	/* Its a sack of some sort */
5021 	rack->r_ctl.sack_count++;
5022 	if (rack->r_ctl.sack_count > 0xfff00000) {
5023 		/*
5024 		 * reduce the number to keep us under
5025 		 * a uint32_t.
5026 		 */
5027 		rack->r_ctl.ack_count /= 2;
5028 		rack->r_ctl.sack_count /= 2;
5029 	}
5030 	counter_u64_add(rack_sack_total, 1);
5031 	if (rack->sack_attack_disable) {
5032 		/* An attacker disablement is in place */
5033 		if (num_sack_blks > 1) {
5034 			rack->r_ctl.sack_count += (num_sack_blks - 1);
5035 			rack->r_ctl.sack_moved_extra++;
5036 			counter_u64_add(rack_move_some, 1);
5037 			if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
5038 				rack->r_ctl.sack_moved_extra /= 2;
5039 				rack->r_ctl.sack_noextra_move /= 2;
5040 			}
5041 		}
5042 		goto out;
5043 	}
5044 	rsm = rack->r_ctl.rc_sacklast;
5045 	for (i = loop_start; i < num_sack_blks; i++) {
5046 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
5047 		if (acked) {
5048 			rack->r_wanted_output++;
5049 			changed += acked;
5050 			sack_changed += acked;
5051 		}
5052 		if (moved_two) {
5053 			/*
5054 			 * If we did not get a SACK for at least a MSS and
5055 			 * had to move at all, or if we moved more than our
5056 			 * threshold, it counts against the "extra" move.
5057 			 */
5058 			rack->r_ctl.sack_moved_extra += moved_two;
5059 			counter_u64_add(rack_move_some, 1);
5060 		} else {
5061 			/*
5062 			 * else we did not have to move
5063 			 * any more than we would expect.
5064 			 */
5065 			rack->r_ctl.sack_noextra_move++;
5066 			counter_u64_add(rack_move_none, 1);
5067 		}
5068 		if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
5069 			/*
5070 			 * If the SACK was not a full MSS then
5071 			 * we add to sack_count the number of
5072 			 * MSS's (or possibly more than
5073 			 * a MSS if its a TSO send) we had to skip by.
5074 			 */
5075 			rack->r_ctl.sack_count += moved_two;
5076 			counter_u64_add(rack_sack_total, moved_two);
5077 		}
5078 		/*
5079 		 * Now we need to setup for the next
5080 		 * round. First we make sure we won't
5081 		 * exceed the size of our uint32_t on
5082 		 * the various counts, and then clear out
5083 		 * moved_two.
5084 		 */
5085 		if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
5086 		    (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
5087 			rack->r_ctl.sack_moved_extra /= 2;
5088 			rack->r_ctl.sack_noextra_move /= 2;
5089 		}
5090 		if (rack->r_ctl.sack_count > 0xfff00000) {
5091 			rack->r_ctl.ack_count /= 2;
5092 			rack->r_ctl.sack_count /= 2;
5093 		}
5094 		moved_two = 0;
5095 	}
5096 out_with_totals:
5097 	if (num_sack_blks > 1) {
5098 		/*
5099 		 * You get an extra stroke if
5100 		 * you have more than one sack-blk, this
5101 		 * could be where we are skipping forward
5102 		 * and the sack-filter is still working, or
5103 		 * it could be an attacker constantly
5104 		 * moving us.
5105 		 */
5106 		rack->r_ctl.sack_moved_extra++;
5107 		counter_u64_add(rack_move_some, 1);
5108 	}
5109 out:
5110 #ifdef NETFLIX_EXP_DETECTION
5111 	if ((rack->do_detection || tcp_force_detection) &&
5112 	    tcp_sack_to_ack_thresh &&
5113 	    tcp_sack_to_move_thresh &&
5114 	    ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
5115 		/*
5116 		 * We have thresholds set to find
5117 		 * possible attackers and disable sack.
5118 		 * Check them.
5119 		 */
5120 		uint64_t ackratio, moveratio, movetotal;
5121 
5122 		/* Log detecting */
5123 		rack_log_sad(rack, 1);
5124 		ackratio = (uint64_t)(rack->r_ctl.sack_count);
5125 		ackratio *= (uint64_t)(1000);
5126 		if (rack->r_ctl.ack_count)
5127 			ackratio /= (uint64_t)(rack->r_ctl.ack_count);
5128 		else {
5129 			/* We really should not hit here */
5130 			ackratio = 1000;
5131 		}
5132 		if ((rack->sack_attack_disable  == 0) &&
5133 		    (ackratio > rack_highest_sack_thresh_seen))
5134 			rack_highest_sack_thresh_seen = (uint32_t)ackratio;
5135 		movetotal = rack->r_ctl.sack_moved_extra;
5136 		movetotal += rack->r_ctl.sack_noextra_move;
5137 		moveratio = rack->r_ctl.sack_moved_extra;
5138 		moveratio *= (uint64_t)1000;
5139 		if (movetotal)
5140 			moveratio /= movetotal;
5141 		else {
5142 			/* No moves, thats pretty good */
5143 			moveratio = 0;
5144 		}
5145 		if ((rack->sack_attack_disable == 0) &&
5146 		    (moveratio > rack_highest_move_thresh_seen))
5147 			rack_highest_move_thresh_seen = (uint32_t)moveratio;
5148 		if (rack->sack_attack_disable == 0) {
5149 			if ((ackratio > tcp_sack_to_ack_thresh) &&
5150 			    (moveratio > tcp_sack_to_move_thresh)) {
5151 				/* Disable sack processing */
5152 				rack->sack_attack_disable = 1;
5153 				if (rack->r_rep_attack == 0) {
5154 					rack->r_rep_attack = 1;
5155 					counter_u64_add(rack_sack_attacks_detected, 1);
5156 				}
5157 				if (tcp_attack_on_turns_on_logging) {
5158 					/*
5159 					 * Turn on logging, used for debugging
5160 					 * false positives.
5161 					 */
5162 					rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
5163 				}
5164 				/* Clamp the cwnd at flight size */
5165 				rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
5166 				rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
5167 				rack_log_sad(rack, 2);
5168 			}
5169 		} else {
5170 			/* We are sack-disabled check for false positives */
5171 			if ((ackratio <= tcp_restoral_thresh) ||
5172 			    (rack->r_ctl.rc_num_maps_alloced  < tcp_map_minimum)) {
5173 				rack->sack_attack_disable  = 0;
5174 				rack_log_sad(rack, 3);
5175 				/* Restart counting */
5176 				rack->r_ctl.sack_count = 0;
5177 				rack->r_ctl.sack_moved_extra = 0;
5178 				rack->r_ctl.sack_noextra_move = 1;
5179 				rack->r_ctl.ack_count = max(1,
5180 				      (BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp)));
5181 
5182 				if (rack->r_rep_reverse == 0) {
5183 					rack->r_rep_reverse = 1;
5184 					counter_u64_add(rack_sack_attacks_reversed, 1);
5185 				}
5186 				/* Restore the cwnd */
5187 				if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
5188 					rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
5189 			}
5190 		}
5191 	}
5192 #endif
5193 	if (changed) {
5194 		/* Something changed cancel the rack timer */
5195 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5196 	}
5197 	if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) {
5198 		/*
5199 		 * Ok we have a high probability that we need to go in to
5200 		 * recovery since we have data sack'd
5201 		 */
5202 		struct rack_sendmap *rsm;
5203 		uint32_t tsused;
5204 
5205 		tsused = tcp_ts_getticks();
5206 		rsm = tcp_rack_output(tp, rack, tsused);
5207 		if (rsm) {
5208 			/* Enter recovery */
5209 			rack->r_ctl.rc_rsm_start = rsm->r_start;
5210 			rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
5211 			rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
5212 			entered_recovery = 1;
5213 			rack_cong_signal(tp, NULL, CC_NDUPACK);
5214 			/*
5215 			 * When we enter recovery we need to assure we send
5216 			 * one packet.
5217 			 */
5218 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5219 			rack_log_to_prr(rack, 8);
5220 			rack->r_timer_override = 1;
5221 		}
5222 	}
5223 	if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) {
5224 		/* Deal with changed and PRR here (in recovery only) */
5225 		uint32_t pipe, snd_una;
5226 
5227 		rack->r_ctl.rc_prr_delivered += changed;
5228 		/* Compute prr_sndcnt */
5229 		if (SEQ_GT(tp->snd_una, th_ack)) {
5230 			snd_una = tp->snd_una;
5231 		} else {
5232 			snd_una = th_ack;
5233 		}
5234 		pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
5235 		if (pipe > tp->snd_ssthresh) {
5236 			long sndcnt;
5237 
5238 			sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
5239 			if (rack->r_ctl.rc_prr_recovery_fs > 0)
5240 				sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
5241 			else {
5242 				rack->r_ctl.rc_prr_sndcnt = 0;
5243 				rack_log_to_prr(rack, 9);
5244 				sndcnt = 0;
5245 			}
5246 			sndcnt++;
5247 			if (sndcnt > (long)rack->r_ctl.rc_prr_out)
5248 				sndcnt -= rack->r_ctl.rc_prr_out;
5249 			else
5250 				sndcnt = 0;
5251 			rack->r_ctl.rc_prr_sndcnt = sndcnt;
5252 			rack_log_to_prr(rack, 10);
5253 		} else {
5254 			uint32_t limit;
5255 
5256 			if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
5257 				limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
5258 			else
5259 				limit = 0;
5260 			if (changed > limit)
5261 				limit = changed;
5262 			limit += ctf_fixed_maxseg(tp);
5263 			if (tp->snd_ssthresh > pipe) {
5264 				rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
5265 				rack_log_to_prr(rack, 11);
5266 			} else {
5267 				rack->r_ctl.rc_prr_sndcnt = min(0, limit);
5268 				rack_log_to_prr(rack, 12);
5269 			}
5270 		}
5271 		if (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) {
5272 			rack->r_timer_override = 1;
5273 		}
5274 	}
5275 }
5276 
5277 static void
5278 rack_strike_dupack(struct tcp_rack *rack)
5279 {
5280 	struct rack_sendmap *rsm;
5281 
5282 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5283 	if (rsm && (rsm->r_dupack < 0xff)) {
5284 		rsm->r_dupack++;
5285 		if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
5286 			rack->r_wanted_output = 1;
5287 			rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
5288 		} else {
5289 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
5290 		}
5291 	}
5292 }
5293 
5294 /*
5295  * Return value of 1, we do not need to call rack_process_data().
5296  * return value of 0, rack_process_data can be called.
5297  * For ret_val if its 0 the TCP is locked, if its non-zero
5298  * its unlocked and probably unsafe to touch the TCB.
5299  */
5300 static int
5301 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
5302     struct tcpcb *tp, struct tcpopt *to,
5303     uint32_t tiwin, int32_t tlen,
5304     int32_t * ofia, int32_t thflags, int32_t * ret_val)
5305 {
5306 	int32_t ourfinisacked = 0;
5307 	int32_t nsegs, acked_amount;
5308 	int32_t acked;
5309 	struct mbuf *mfree;
5310 	struct tcp_rack *rack;
5311 	int32_t recovery = 0;
5312 
5313 	rack = (struct tcp_rack *)tp->t_fb_ptr;
5314 	if (SEQ_GT(th->th_ack, tp->snd_max)) {
5315 		ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
5316 		rack->r_wanted_output++;
5317 		return (1);
5318 	}
5319 	if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
5320 		if (rack->rc_in_persist)
5321 			tp->t_rxtshift = 0;
5322 		if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd))
5323 			rack_strike_dupack(rack);
5324 		rack_log_ack(tp, to, th);
5325 	}
5326 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
5327 		/*
5328 		 * Old ack, behind (or duplicate to) the last one rcv'd
5329 		 * Note: Should mark reordering is occuring! We should also
5330 		 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
5331 		 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
5332 		 * retran and> ack 3
5333 		 */
5334 		return (0);
5335 	}
5336 	/*
5337 	 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
5338 	 * something we sent.
5339 	 */
5340 	if (tp->t_flags & TF_NEEDSYN) {
5341 		/*
5342 		 * T/TCP: Connection was half-synchronized, and our SYN has
5343 		 * been ACK'd (so connection is now fully synchronized).  Go
5344 		 * to non-starred state, increment snd_una for ACK of SYN,
5345 		 * and check if we can do window scaling.
5346 		 */
5347 		tp->t_flags &= ~TF_NEEDSYN;
5348 		tp->snd_una++;
5349 		/* Do window scaling? */
5350 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5351 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5352 			tp->rcv_scale = tp->request_r_scale;
5353 			/* Send window already scaled. */
5354 		}
5355 	}
5356 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
5357 	INP_WLOCK_ASSERT(tp->t_inpcb);
5358 
5359 	acked = BYTES_THIS_ACK(tp, th);
5360 	TCPSTAT_ADD(tcps_rcvackpack, nsegs);
5361 	TCPSTAT_ADD(tcps_rcvackbyte, acked);
5362 
5363 	/*
5364 	 * If we just performed our first retransmit, and the ACK arrives
5365 	 * within our recovery window, then it was a mistake to do the
5366 	 * retransmit in the first place.  Recover our original cwnd and
5367 	 * ssthresh, and proceed to transmit where we left off.
5368 	 */
5369 	if (tp->t_flags & TF_PREVVALID) {
5370 		tp->t_flags &= ~TF_PREVVALID;
5371 		if (tp->t_rxtshift == 1 &&
5372 		    (int)(ticks - tp->t_badrxtwin) < 0)
5373 			rack_cong_signal(tp, th, CC_RTO_ERR);
5374 	}
5375 	/*
5376 	 * If we have a timestamp reply, update smoothed round trip time. If
5377 	 * no timestamp is present but transmit timer is running and timed
5378 	 * sequence number was acked, update smoothed round trip time. Since
5379 	 * we now have an rtt measurement, cancel the timer backoff (cf.,
5380 	 * Phil Karn's retransmit alg.). Recompute the initial retransmit
5381 	 * timer.
5382 	 *
5383 	 * Some boxes send broken timestamp replies during the SYN+ACK
5384 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
5385 	 * and blow up the retransmit timer.
5386 	 */
5387 	/*
5388 	 * If all outstanding data is acked, stop retransmit timer and
5389 	 * remember to restart (more output or persist). If there is more
5390 	 * data to be acked, restart retransmit timer, using current
5391 	 * (possibly backed-off) value.
5392 	 */
5393 	if (th->th_ack == tp->snd_max) {
5394 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5395 		rack->r_wanted_output++;
5396 	}
5397 	if (acked == 0) {
5398 		if (ofia)
5399 			*ofia = ourfinisacked;
5400 		return (0);
5401 	}
5402 	if (rack->r_ctl.rc_early_recovery) {
5403 		if (IN_RECOVERY(tp->t_flags)) {
5404 			if (SEQ_LT(th->th_ack, tp->snd_recover) &&
5405 			    (SEQ_LT(th->th_ack, tp->snd_max))) {
5406 				tcp_rack_partialack(tp, th);
5407 			} else {
5408 				rack_post_recovery(tp, th);
5409 				recovery = 1;
5410 			}
5411 		}
5412 	}
5413 	/*
5414 	 * Let the congestion control algorithm update congestion control
5415 	 * related information. This typically means increasing the
5416 	 * congestion window.
5417 	 */
5418 	rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
5419 	SOCKBUF_LOCK(&so->so_snd);
5420 	acked_amount = min(acked, (int)sbavail(&so->so_snd));
5421 	tp->snd_wnd -= acked_amount;
5422 	mfree = sbcut_locked(&so->so_snd, acked_amount);
5423 	if ((sbused(&so->so_snd) == 0) &&
5424 	    (acked > acked_amount) &&
5425 	    (tp->t_state >= TCPS_FIN_WAIT_1)) {
5426 		ourfinisacked = 1;
5427 	}
5428 	/* NB: sowwakeup_locked() does an implicit unlock. */
5429 	sowwakeup_locked(so);
5430 	m_freem(mfree);
5431 	if (rack->r_ctl.rc_early_recovery == 0) {
5432 		if (IN_RECOVERY(tp->t_flags)) {
5433 			if (SEQ_LT(th->th_ack, tp->snd_recover) &&
5434 			    (SEQ_LT(th->th_ack, tp->snd_max))) {
5435 				tcp_rack_partialack(tp, th);
5436 			} else {
5437 				rack_post_recovery(tp, th);
5438 			}
5439 		}
5440 	}
5441 	tp->snd_una = th->th_ack;
5442 	if (SEQ_GT(tp->snd_una, tp->snd_recover))
5443 		tp->snd_recover = tp->snd_una;
5444 
5445 	if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
5446 		tp->snd_nxt = tp->snd_una;
5447 	}
5448 	if (tp->snd_una == tp->snd_max) {
5449 		/* Nothing left outstanding */
5450 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
5451 		if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
5452 			tp->t_acktime = 0;
5453 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5454 		/* Set need output so persist might get set */
5455 		rack->r_wanted_output++;
5456 		if (rack_use_sack_filter)
5457 			sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5458 		if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
5459 		    (sbavail(&so->so_snd) == 0) &&
5460 		    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
5461 			/*
5462 			 * The socket was gone and the
5463 			 * peer sent data, time to
5464 			 * reset him.
5465 			 */
5466 			*ret_val = 1;
5467 			tp = tcp_close(tp);
5468 			ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
5469 			return (1);
5470 		}
5471 	}
5472 	if (ofia)
5473 		*ofia = ourfinisacked;
5474 	return (0);
5475 }
5476 
5477 static void
5478 rack_collapsed_window(struct tcp_rack *rack)
5479 {
5480 	/*
5481 	 * Now we must walk the
5482 	 * send map and divide the
5483 	 * ones left stranded. These
5484 	 * guys can't cause us to abort
5485 	 * the connection and are really
5486 	 * "unsent". However if a buggy
5487 	 * client actually did keep some
5488 	 * of the data i.e. collapsed the win
5489 	 * and refused to ack and then opened
5490 	 * the win and acked that data. We would
5491 	 * get into an ack war, the simplier
5492 	 * method then of just pretending we
5493 	 * did not send those segments something
5494 	 * won't work.
5495 	 */
5496 	struct rack_sendmap *rsm, *nrsm, fe, *insret;
5497 	tcp_seq max_seq;
5498 	uint32_t maxseg;
5499 
5500 	max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
5501 	maxseg = ctf_fixed_maxseg(rack->rc_tp);
5502 	memset(&fe, 0, sizeof(fe));
5503 	fe.r_start = max_seq;
5504 	/* Find the first seq past or at maxseq */
5505 	rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
5506 	if (rsm == NULL) {
5507 		/* Nothing to do strange */
5508 		rack->rc_has_collapsed = 0;
5509 		return;
5510 	}
5511 	/*
5512 	 * Now do we need to split at
5513 	 * the collapse point?
5514 	 */
5515 	if (SEQ_GT(max_seq, rsm->r_start)) {
5516 		nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
5517 		if (nrsm == NULL) {
5518 			/* We can't get a rsm, mark all? */
5519 			nrsm = rsm;
5520 			goto no_split;
5521 		}
5522 		/* Clone it */
5523 		rack_clone_rsm(rack, nrsm, rsm, max_seq);
5524 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
5525 #ifdef INVARIANTS
5526 		if (insret != NULL) {
5527 			panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
5528 			      nrsm, insret, rack, rsm);
5529 		}
5530 #endif
5531 		if (rsm->r_in_tmap) {
5532 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
5533 			nrsm->r_in_tmap = 1;
5534 		}
5535 		/*
5536 		 * Set in the new RSM as the
5537 		 * collapsed starting point
5538 		 */
5539 		rsm = nrsm;
5540 	}
5541 no_split:
5542 	counter_u64_add(rack_collapsed_win, 1);
5543 	RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
5544 		nrsm->r_flags |= RACK_RWND_COLLAPSED;
5545 		rack->rc_has_collapsed = 1;
5546 	}
5547 }
5548 
5549 static void
5550 rack_un_collapse_window(struct tcp_rack *rack)
5551 {
5552 	struct rack_sendmap *rsm;
5553 
5554 	RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
5555 		if (rsm->r_flags & RACK_RWND_COLLAPSED)
5556 			rsm->r_flags &= ~RACK_RWND_COLLAPSED;
5557 		else
5558 			break;
5559 	}
5560 	rack->rc_has_collapsed = 0;
5561 }
5562 
5563 /*
5564  * Return value of 1, the TCB is unlocked and most
5565  * likely gone, return value of 0, the TCP is still
5566  * locked.
5567  */
5568 static int
5569 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
5570     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
5571     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5572 {
5573 	/*
5574 	 * Update window information. Don't look at window if no ACK: TAC's
5575 	 * send garbage on first SYN.
5576 	 */
5577 	int32_t nsegs;
5578 	int32_t tfo_syn;
5579 	struct tcp_rack *rack;
5580 
5581 	rack = (struct tcp_rack *)tp->t_fb_ptr;
5582 	INP_WLOCK_ASSERT(tp->t_inpcb);
5583 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
5584 	if ((thflags & TH_ACK) &&
5585 	    (SEQ_LT(tp->snd_wl1, th->th_seq) ||
5586 	    (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
5587 	    (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
5588 		/* keep track of pure window updates */
5589 		if (tlen == 0 &&
5590 		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
5591 			TCPSTAT_INC(tcps_rcvwinupd);
5592 		tp->snd_wnd = tiwin;
5593 		tp->snd_wl1 = th->th_seq;
5594 		tp->snd_wl2 = th->th_ack;
5595 		if (tp->snd_wnd > tp->max_sndwnd)
5596 			tp->max_sndwnd = tp->snd_wnd;
5597 		rack->r_wanted_output++;
5598 	} else if (thflags & TH_ACK) {
5599 		if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
5600 			tp->snd_wnd = tiwin;
5601 			tp->snd_wl1 = th->th_seq;
5602 			tp->snd_wl2 = th->th_ack;
5603 		}
5604 	}
5605 	if (tp->snd_wnd < ctf_outstanding(tp))
5606 		/* The peer collapsed the window */
5607 		rack_collapsed_window(rack);
5608 	else if (rack->rc_has_collapsed)
5609 		rack_un_collapse_window(rack);
5610 	/* Was persist timer active and now we have window space? */
5611 	if ((rack->rc_in_persist != 0) &&
5612 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
5613 				rack->r_ctl.rc_pace_min_segs))) {
5614 		rack_exit_persist(tp, rack);
5615 		tp->snd_nxt = tp->snd_max;
5616 		/* Make sure we output to start the timer */
5617 		rack->r_wanted_output++;
5618 	}
5619 	/* Do we enter persists? */
5620 	if ((rack->rc_in_persist == 0) &&
5621 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
5622 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
5623 	    (tp->snd_max == tp->snd_una) &&
5624 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
5625 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
5626 		/*
5627 		 * Here the rwnd is less than
5628 		 * the pacing size, we are established,
5629 		 * nothing is outstanding, and there is
5630 		 * data to send. Enter persists.
5631 		 */
5632 		tp->snd_nxt = tp->snd_una;
5633 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
5634 	}
5635 	if (tp->t_flags2 & TF2_DROP_AF_DATA) {
5636 		m_freem(m);
5637 		return (0);
5638 	}
5639 	/*
5640 	 * Process segments with URG.
5641 	 */
5642 	if ((thflags & TH_URG) && th->th_urp &&
5643 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5644 		/*
5645 		 * This is a kludge, but if we receive and accept random
5646 		 * urgent pointers, we'll crash in soreceive.  It's hard to
5647 		 * imagine someone actually wanting to send this much urgent
5648 		 * data.
5649 		 */
5650 		SOCKBUF_LOCK(&so->so_rcv);
5651 		if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
5652 			th->th_urp = 0;	/* XXX */
5653 			thflags &= ~TH_URG;	/* XXX */
5654 			SOCKBUF_UNLOCK(&so->so_rcv);	/* XXX */
5655 			goto dodata;	/* XXX */
5656 		}
5657 		/*
5658 		 * If this segment advances the known urgent pointer, then
5659 		 * mark the data stream.  This should not happen in
5660 		 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a
5661 		 * FIN has been received from the remote side. In these
5662 		 * states we ignore the URG.
5663 		 *
5664 		 * According to RFC961 (Assigned Protocols), the urgent
5665 		 * pointer points to the last octet of urgent data.  We
5666 		 * continue, however, to consider it to indicate the first
5667 		 * octet of data past the urgent section as the original
5668 		 * spec states (in one of two places).
5669 		 */
5670 		if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
5671 			tp->rcv_up = th->th_seq + th->th_urp;
5672 			so->so_oobmark = sbavail(&so->so_rcv) +
5673 			    (tp->rcv_up - tp->rcv_nxt) - 1;
5674 			if (so->so_oobmark == 0)
5675 				so->so_rcv.sb_state |= SBS_RCVATMARK;
5676 			sohasoutofband(so);
5677 			tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
5678 		}
5679 		SOCKBUF_UNLOCK(&so->so_rcv);
5680 		/*
5681 		 * Remove out of band data so doesn't get presented to user.
5682 		 * This can happen independent of advancing the URG pointer,
5683 		 * but if two URG's are pending at once, some out-of-band
5684 		 * data may creep in... ick.
5685 		 */
5686 		if (th->th_urp <= (uint32_t) tlen &&
5687 		    !(so->so_options & SO_OOBINLINE)) {
5688 			/* hdr drop is delayed */
5689 			tcp_pulloutofband(so, th, m, drop_hdrlen);
5690 		}
5691 	} else {
5692 		/*
5693 		 * If no out of band data is expected, pull receive urgent
5694 		 * pointer along with the receive window.
5695 		 */
5696 		if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
5697 			tp->rcv_up = tp->rcv_nxt;
5698 	}
5699 dodata:				/* XXX */
5700 	INP_WLOCK_ASSERT(tp->t_inpcb);
5701 
5702 	/*
5703 	 * Process the segment text, merging it into the TCP sequencing
5704 	 * queue, and arranging for acknowledgment of receipt if necessary.
5705 	 * This process logically involves adjusting tp->rcv_wnd as data is
5706 	 * presented to the user (this happens in tcp_usrreq.c, case
5707 	 * PRU_RCVD).  If a FIN has already been received on this connection
5708 	 * then we just ignore the text.
5709 	 */
5710 	tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
5711 		   IS_FASTOPEN(tp->t_flags));
5712 	if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
5713 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5714 		tcp_seq save_start = th->th_seq;
5715 		tcp_seq save_rnxt  = tp->rcv_nxt;
5716 		int     save_tlen  = tlen;
5717 
5718 		m_adj(m, drop_hdrlen);	/* delayed header drop */
5719 		/*
5720 		 * Insert segment which includes th into TCP reassembly
5721 		 * queue with control block tp.  Set thflags to whether
5722 		 * reassembly now includes a segment with FIN.  This handles
5723 		 * the common case inline (segment is the next to be
5724 		 * received on an established connection, and the queue is
5725 		 * empty), avoiding linkage into and removal from the queue
5726 		 * and repetition of various conversions. Set DELACK for
5727 		 * segments received in order, but ack immediately when
5728 		 * segments are out of order (so fast retransmit can work).
5729 		 */
5730 		if (th->th_seq == tp->rcv_nxt &&
5731 		    SEGQ_EMPTY(tp) &&
5732 		    (TCPS_HAVEESTABLISHED(tp->t_state) ||
5733 		    tfo_syn)) {
5734 #ifdef NETFLIX_SB_LIMITS
5735 			u_int mcnt, appended;
5736 
5737 			if (so->so_rcv.sb_shlim) {
5738 				mcnt = m_memcnt(m);
5739 				appended = 0;
5740 				if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
5741 				    CFO_NOSLEEP, NULL) == false) {
5742 					counter_u64_add(tcp_sb_shlim_fails, 1);
5743 					m_freem(m);
5744 					return (0);
5745 				}
5746 			}
5747 #endif
5748 			if (DELAY_ACK(tp, tlen) || tfo_syn) {
5749 				rack_timer_cancel(tp, rack,
5750 				    rack->r_ctl.rc_rcvtime, __LINE__);
5751 				tp->t_flags |= TF_DELACK;
5752 			} else {
5753 				rack->r_wanted_output++;
5754 				tp->t_flags |= TF_ACKNOW;
5755 			}
5756 			tp->rcv_nxt += tlen;
5757 			thflags = th->th_flags & TH_FIN;
5758 			TCPSTAT_ADD(tcps_rcvpack, nsegs);
5759 			TCPSTAT_ADD(tcps_rcvbyte, tlen);
5760 			SOCKBUF_LOCK(&so->so_rcv);
5761 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5762 				m_freem(m);
5763 			} else
5764 #ifdef NETFLIX_SB_LIMITS
5765 				appended =
5766 #endif
5767 					sbappendstream_locked(&so->so_rcv, m, 0);
5768 			/* NB: sorwakeup_locked() does an implicit unlock. */
5769 			sorwakeup_locked(so);
5770 #ifdef NETFLIX_SB_LIMITS
5771 			if (so->so_rcv.sb_shlim && appended != mcnt)
5772 				counter_fo_release(so->so_rcv.sb_shlim,
5773 				    mcnt - appended);
5774 #endif
5775 		} else {
5776 			/*
5777 			 * XXX: Due to the header drop above "th" is
5778 			 * theoretically invalid by now.  Fortunately
5779 			 * m_adj() doesn't actually frees any mbufs when
5780 			 * trimming from the head.
5781 			 */
5782 			tcp_seq temp = save_start;
5783 			thflags = tcp_reass(tp, th, &temp, &tlen, m);
5784 			tp->t_flags |= TF_ACKNOW;
5785 		}
5786 		if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) {
5787 			if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
5788 				/*
5789 				 * DSACK actually handled in the fastpath
5790 				 * above.
5791 				 */
5792 				tcp_update_sack_list(tp, save_start,
5793 				    save_start + save_tlen);
5794 			} else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
5795 				if ((tp->rcv_numsacks >= 1) &&
5796 				    (tp->sackblks[0].end == save_start)) {
5797 					/*
5798 					 * Partial overlap, recorded at todrop
5799 					 * above.
5800 					 */
5801 					tcp_update_sack_list(tp,
5802 					    tp->sackblks[0].start,
5803 					    tp->sackblks[0].end);
5804 				} else {
5805 					tcp_update_dsack_list(tp, save_start,
5806 					    save_start + save_tlen);
5807 				}
5808 			} else if (tlen >= save_tlen) {
5809 				/* Update of sackblks. */
5810 				tcp_update_dsack_list(tp, save_start,
5811 				    save_start + save_tlen);
5812 			} else if (tlen > 0) {
5813 				tcp_update_dsack_list(tp, save_start,
5814 				    save_start + tlen);
5815 			}
5816 		}
5817 	} else {
5818 		m_freem(m);
5819 		thflags &= ~TH_FIN;
5820 	}
5821 
5822 	/*
5823 	 * If FIN is received ACK the FIN and let the user know that the
5824 	 * connection is closing.
5825 	 */
5826 	if (thflags & TH_FIN) {
5827 		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
5828 			socantrcvmore(so);
5829 			/*
5830 			 * If connection is half-synchronized (ie NEEDSYN
5831 			 * flag on) then delay ACK, so it may be piggybacked
5832 			 * when SYN is sent. Otherwise, since we received a
5833 			 * FIN then no more input can be expected, send ACK
5834 			 * now.
5835 			 */
5836 			if (tp->t_flags & TF_NEEDSYN) {
5837 				rack_timer_cancel(tp, rack,
5838 				    rack->r_ctl.rc_rcvtime, __LINE__);
5839 				tp->t_flags |= TF_DELACK;
5840 			} else {
5841 				tp->t_flags |= TF_ACKNOW;
5842 			}
5843 			tp->rcv_nxt++;
5844 		}
5845 		switch (tp->t_state) {
5846 
5847 			/*
5848 			 * In SYN_RECEIVED and ESTABLISHED STATES enter the
5849 			 * CLOSE_WAIT state.
5850 			 */
5851 		case TCPS_SYN_RECEIVED:
5852 			tp->t_starttime = ticks;
5853 			/* FALLTHROUGH */
5854 		case TCPS_ESTABLISHED:
5855 			rack_timer_cancel(tp, rack,
5856 			    rack->r_ctl.rc_rcvtime, __LINE__);
5857 			tcp_state_change(tp, TCPS_CLOSE_WAIT);
5858 			break;
5859 
5860 			/*
5861 			 * If still in FIN_WAIT_1 STATE FIN has not been
5862 			 * acked so enter the CLOSING state.
5863 			 */
5864 		case TCPS_FIN_WAIT_1:
5865 			rack_timer_cancel(tp, rack,
5866 			    rack->r_ctl.rc_rcvtime, __LINE__);
5867 			tcp_state_change(tp, TCPS_CLOSING);
5868 			break;
5869 
5870 			/*
5871 			 * In FIN_WAIT_2 state enter the TIME_WAIT state,
5872 			 * starting the time-wait timer, turning off the
5873 			 * other standard timers.
5874 			 */
5875 		case TCPS_FIN_WAIT_2:
5876 			rack_timer_cancel(tp, rack,
5877 			    rack->r_ctl.rc_rcvtime, __LINE__);
5878 			INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5879 			tcp_twstart(tp);
5880 			return (1);
5881 		}
5882 	}
5883 	/*
5884 	 * Return any desired output.
5885 	 */
5886 	if ((tp->t_flags & TF_ACKNOW) ||
5887 	    (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
5888 		rack->r_wanted_output++;
5889 	}
5890 	INP_WLOCK_ASSERT(tp->t_inpcb);
5891 	return (0);
5892 }
5893 
5894 /*
5895  * Here nothing is really faster, its just that we
5896  * have broken out the fast-data path also just like
5897  * the fast-ack.
5898  */
5899 static int
5900 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
5901     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5902     uint32_t tiwin, int32_t nxt_pkt)
5903 {
5904 	int32_t nsegs;
5905 	int32_t newsize = 0;	/* automatic sockbuf scaling */
5906 	struct tcp_rack *rack;
5907 #ifdef NETFLIX_SB_LIMITS
5908 	u_int mcnt, appended;
5909 #endif
5910 #ifdef TCPDEBUG
5911 	/*
5912 	 * The size of tcp_saveipgen must be the size of the max ip header,
5913 	 * now IPv6.
5914 	 */
5915 	u_char tcp_saveipgen[IP6_HDR_LEN];
5916 	struct tcphdr tcp_savetcp;
5917 	short ostate = 0;
5918 
5919 #endif
5920 	/*
5921 	 * If last ACK falls within this segment's sequence numbers, record
5922 	 * the timestamp. NOTE that the test is modified according to the
5923 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
5924 	 */
5925 	if (__predict_false(th->th_seq != tp->rcv_nxt)) {
5926 		return (0);
5927 	}
5928 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
5929 		return (0);
5930 	}
5931 	if (tiwin && tiwin != tp->snd_wnd) {
5932 		return (0);
5933 	}
5934 	if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
5935 		return (0);
5936 	}
5937 	if (__predict_false((to->to_flags & TOF_TS) &&
5938 	    (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
5939 		return (0);
5940 	}
5941 	if (__predict_false((th->th_ack != tp->snd_una))) {
5942 		return (0);
5943 	}
5944 	if (__predict_false(tlen > sbspace(&so->so_rcv))) {
5945 		return (0);
5946 	}
5947 	if ((to->to_flags & TOF_TS) != 0 &&
5948 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
5949 		tp->ts_recent_age = tcp_ts_getticks();
5950 		tp->ts_recent = to->to_tsval;
5951 	}
5952 	rack = (struct tcp_rack *)tp->t_fb_ptr;
5953 	/*
5954 	 * This is a pure, in-sequence data packet with nothing on the
5955 	 * reassembly queue and we have enough buffer space to take it.
5956 	 */
5957 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
5958 
5959 #ifdef NETFLIX_SB_LIMITS
5960 	if (so->so_rcv.sb_shlim) {
5961 		mcnt = m_memcnt(m);
5962 		appended = 0;
5963 		if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
5964 		    CFO_NOSLEEP, NULL) == false) {
5965 			counter_u64_add(tcp_sb_shlim_fails, 1);
5966 			m_freem(m);
5967 			return (1);
5968 		}
5969 	}
5970 #endif
5971 	/* Clean receiver SACK report if present */
5972 	if (tp->rcv_numsacks)
5973 		tcp_clean_sackreport(tp);
5974 	TCPSTAT_INC(tcps_preddat);
5975 	tp->rcv_nxt += tlen;
5976 	/*
5977 	 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
5978 	 */
5979 	tp->snd_wl1 = th->th_seq;
5980 	/*
5981 	 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
5982 	 */
5983 	tp->rcv_up = tp->rcv_nxt;
5984 	TCPSTAT_ADD(tcps_rcvpack, nsegs);
5985 	TCPSTAT_ADD(tcps_rcvbyte, tlen);
5986 #ifdef TCPDEBUG
5987 	if (so->so_options & SO_DEBUG)
5988 		tcp_trace(TA_INPUT, ostate, tp,
5989 		    (void *)tcp_saveipgen, &tcp_savetcp, 0);
5990 #endif
5991 	newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
5992 
5993 	/* Add data to socket buffer. */
5994 	SOCKBUF_LOCK(&so->so_rcv);
5995 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5996 		m_freem(m);
5997 	} else {
5998 		/*
5999 		 * Set new socket buffer size. Give up when limit is
6000 		 * reached.
6001 		 */
6002 		if (newsize)
6003 			if (!sbreserve_locked(&so->so_rcv,
6004 			    newsize, so, NULL))
6005 				so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
6006 		m_adj(m, drop_hdrlen);	/* delayed header drop */
6007 #ifdef NETFLIX_SB_LIMITS
6008 		appended =
6009 #endif
6010 			sbappendstream_locked(&so->so_rcv, m, 0);
6011 		ctf_calc_rwin(so, tp);
6012 	}
6013 	/* NB: sorwakeup_locked() does an implicit unlock. */
6014 	sorwakeup_locked(so);
6015 #ifdef NETFLIX_SB_LIMITS
6016 	if (so->so_rcv.sb_shlim && mcnt != appended)
6017 		counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
6018 #endif
6019 	if (DELAY_ACK(tp, tlen)) {
6020 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
6021 		tp->t_flags |= TF_DELACK;
6022 	} else {
6023 		tp->t_flags |= TF_ACKNOW;
6024 		rack->r_wanted_output++;
6025 	}
6026 	if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter)
6027 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6028 	return (1);
6029 }
6030 
6031 /*
6032  * This subfunction is used to try to highly optimize the
6033  * fast path. We again allow window updates that are
6034  * in sequence to remain in the fast-path. We also add
6035  * in the __predict's to attempt to help the compiler.
6036  * Note that if we return a 0, then we can *not* process
6037  * it and the caller should push the packet into the
6038  * slow-path.
6039  */
6040 static int
6041 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
6042     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6043     uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
6044 {
6045 	int32_t acked;
6046 	int32_t nsegs;
6047 
6048 #ifdef TCPDEBUG
6049 	/*
6050 	 * The size of tcp_saveipgen must be the size of the max ip header,
6051 	 * now IPv6.
6052 	 */
6053 	u_char tcp_saveipgen[IP6_HDR_LEN];
6054 	struct tcphdr tcp_savetcp;
6055 	short ostate = 0;
6056 
6057 #endif
6058 	struct tcp_rack *rack;
6059 
6060 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
6061 		/* Old ack, behind (or duplicate to) the last one rcv'd */
6062 		return (0);
6063 	}
6064 	if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
6065 		/* Above what we have sent? */
6066 		return (0);
6067 	}
6068 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
6069 		/* We are retransmitting */
6070 		return (0);
6071 	}
6072 	if (__predict_false(tiwin == 0)) {
6073 		/* zero window */
6074 		return (0);
6075 	}
6076 	if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
6077 		/* We need a SYN or a FIN, unlikely.. */
6078 		return (0);
6079 	}
6080 	if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
6081 		/* Timestamp is behind .. old ack with seq wrap? */
6082 		return (0);
6083 	}
6084 	if (__predict_false(IN_RECOVERY(tp->t_flags))) {
6085 		/* Still recovering */
6086 		return (0);
6087 	}
6088 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6089 	if (rack->r_ctl.rc_sacked) {
6090 		/* We have sack holes on our scoreboard */
6091 		return (0);
6092 	}
6093 	/* Ok if we reach here, we can process a fast-ack */
6094 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
6095 	rack_log_ack(tp, to, th);
6096 	/*
6097 	 * We made progress, clear the tlp
6098 	 * out flag so we could start a TLP
6099 	 * again.
6100 	 */
6101 	rack->r_ctl.rc_tlp_rtx_out = 0;
6102 	/* Did the window get updated? */
6103 	if (tiwin != tp->snd_wnd) {
6104 		tp->snd_wnd = tiwin;
6105 		tp->snd_wl1 = th->th_seq;
6106 		if (tp->snd_wnd > tp->max_sndwnd)
6107 			tp->max_sndwnd = tp->snd_wnd;
6108 	}
6109 	/* Do we exit persists? */
6110 	if ((rack->rc_in_persist != 0) &&
6111 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
6112 			       rack->r_ctl.rc_pace_min_segs))) {
6113 		rack_exit_persist(tp, rack);
6114 	}
6115 	/* Do we enter persists? */
6116 	if ((rack->rc_in_persist == 0) &&
6117 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
6118 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
6119 	    (tp->snd_max == tp->snd_una) &&
6120 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
6121 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
6122 		/*
6123 		 * Here the rwnd is less than
6124 		 * the pacing size, we are established,
6125 		 * nothing is outstanding, and there is
6126 		 * data to send. Enter persists.
6127 		 */
6128 		tp->snd_nxt = tp->snd_una;
6129 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
6130 	}
6131 	/*
6132 	 * If last ACK falls within this segment's sequence numbers, record
6133 	 * the timestamp. NOTE that the test is modified according to the
6134 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
6135 	 */
6136 	if ((to->to_flags & TOF_TS) != 0 &&
6137 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
6138 		tp->ts_recent_age = tcp_ts_getticks();
6139 		tp->ts_recent = to->to_tsval;
6140 	}
6141 	/*
6142 	 * This is a pure ack for outstanding data.
6143 	 */
6144 	TCPSTAT_INC(tcps_predack);
6145 
6146 	/*
6147 	 * "bad retransmit" recovery.
6148 	 */
6149 	if (tp->t_flags & TF_PREVVALID) {
6150 		tp->t_flags &= ~TF_PREVVALID;
6151 		if (tp->t_rxtshift == 1 &&
6152 		    (int)(ticks - tp->t_badrxtwin) < 0)
6153 			rack_cong_signal(tp, th, CC_RTO_ERR);
6154 	}
6155 	/*
6156 	 * Recalculate the transmit timer / rtt.
6157 	 *
6158 	 * Some boxes send broken timestamp replies during the SYN+ACK
6159 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
6160 	 * and blow up the retransmit timer.
6161 	 */
6162 	acked = BYTES_THIS_ACK(tp, th);
6163 
6164 #ifdef TCP_HHOOK
6165 	/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
6166 	hhook_run_tcp_est_in(tp, th, to);
6167 #endif
6168 
6169 	TCPSTAT_ADD(tcps_rcvackpack, nsegs);
6170 	TCPSTAT_ADD(tcps_rcvackbyte, acked);
6171 	sbdrop(&so->so_snd, acked);
6172 	/*
6173 	 * Let the congestion control algorithm update congestion control
6174 	 * related information. This typically means increasing the
6175 	 * congestion window.
6176 	 */
6177 	rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
6178 
6179 	tp->snd_una = th->th_ack;
6180 	if (tp->snd_wnd < ctf_outstanding(tp)) {
6181 		/* The peer collapsed the window */
6182 		rack_collapsed_window(rack);
6183 	} else if (rack->rc_has_collapsed)
6184 		rack_un_collapse_window(rack);
6185 
6186 	/*
6187 	 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
6188 	 */
6189 	tp->snd_wl2 = th->th_ack;
6190 	tp->t_dupacks = 0;
6191 	m_freem(m);
6192 	/* ND6_HINT(tp);	 *//* Some progress has been made. */
6193 
6194 	/*
6195 	 * If all outstanding data are acked, stop retransmit timer,
6196 	 * otherwise restart timer using current (possibly backed-off)
6197 	 * value. If process is waiting for space, wakeup/selwakeup/signal.
6198 	 * If data are ready to send, let tcp_output decide between more
6199 	 * output or persist.
6200 	 */
6201 #ifdef TCPDEBUG
6202 	if (so->so_options & SO_DEBUG)
6203 		tcp_trace(TA_INPUT, ostate, tp,
6204 		    (void *)tcp_saveipgen,
6205 		    &tcp_savetcp, 0);
6206 #endif
6207 	if (tp->snd_una == tp->snd_max) {
6208 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
6209 		if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
6210 			tp->t_acktime = 0;
6211 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
6212 	}
6213 	/* Wake up the socket if we have room to write more */
6214 	sowwakeup(so);
6215 	if (sbavail(&so->so_snd)) {
6216 		rack->r_wanted_output++;
6217 	}
6218 	return (1);
6219 }
6220 
6221 /*
6222  * Return value of 1, the TCB is unlocked and most
6223  * likely gone, return value of 0, the TCP is still
6224  * locked.
6225  */
6226 static int
6227 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
6228     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6229     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6230 {
6231 	int32_t ret_val = 0;
6232 	int32_t todrop;
6233 	int32_t ourfinisacked = 0;
6234 	struct tcp_rack *rack;
6235 
6236 	ctf_calc_rwin(so, tp);
6237 	/*
6238 	 * If the state is SYN_SENT: if seg contains an ACK, but not for our
6239 	 * SYN, drop the input. if seg contains a RST, then drop the
6240 	 * connection. if seg does not contain SYN, then drop it. Otherwise
6241 	 * this is an acceptable SYN segment initialize tp->rcv_nxt and
6242 	 * tp->irs if seg contains ack then advance tp->snd_una if seg
6243 	 * contains an ECE and ECN support is enabled, the stream is ECN
6244 	 * capable. if SYN has been acked change to ESTABLISHED else
6245 	 * SYN_RCVD state arrange for segment to be acked (eventually)
6246 	 * continue processing rest of data/controls, beginning with URG
6247 	 */
6248 	if ((thflags & TH_ACK) &&
6249 	    (SEQ_LEQ(th->th_ack, tp->iss) ||
6250 	    SEQ_GT(th->th_ack, tp->snd_max))) {
6251 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6252 		return (1);
6253 	}
6254 	if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
6255 		TCP_PROBE5(connect__refused, NULL, tp,
6256 		    mtod(m, const char *), tp, th);
6257 		tp = tcp_drop(tp, ECONNREFUSED);
6258 		ctf_do_drop(m, tp);
6259 		return (1);
6260 	}
6261 	if (thflags & TH_RST) {
6262 		ctf_do_drop(m, tp);
6263 		return (1);
6264 	}
6265 	if (!(thflags & TH_SYN)) {
6266 		ctf_do_drop(m, tp);
6267 		return (1);
6268 	}
6269 	tp->irs = th->th_seq;
6270 	tcp_rcvseqinit(tp);
6271 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6272 	if (thflags & TH_ACK) {
6273 		int tfo_partial = 0;
6274 
6275 		TCPSTAT_INC(tcps_connects);
6276 		soisconnected(so);
6277 #ifdef MAC
6278 		mac_socketpeer_set_from_mbuf(m, so);
6279 #endif
6280 		/* Do window scaling on this connection? */
6281 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
6282 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
6283 			tp->rcv_scale = tp->request_r_scale;
6284 		}
6285 		tp->rcv_adv += min(tp->rcv_wnd,
6286 		    TCP_MAXWIN << tp->rcv_scale);
6287 		/*
6288 		 * If not all the data that was sent in the TFO SYN
6289 		 * has been acked, resend the remainder right away.
6290 		 */
6291 		if (IS_FASTOPEN(tp->t_flags) &&
6292 		    (tp->snd_una != tp->snd_max)) {
6293 			tp->snd_nxt = th->th_ack;
6294 			tfo_partial = 1;
6295 		}
6296 		/*
6297 		 * If there's data, delay ACK; if there's also a FIN ACKNOW
6298 		 * will be turned on later.
6299 		 */
6300 		if (DELAY_ACK(tp, tlen) && tlen != 0 && (tfo_partial == 0)) {
6301 			rack_timer_cancel(tp, rack,
6302 					  rack->r_ctl.rc_rcvtime, __LINE__);
6303 			tp->t_flags |= TF_DELACK;
6304 		} else {
6305 			rack->r_wanted_output++;
6306 			tp->t_flags |= TF_ACKNOW;
6307 		}
6308 
6309 		if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
6310 		    V_tcp_do_ecn) {
6311 			tp->t_flags |= TF_ECN_PERMIT;
6312 			TCPSTAT_INC(tcps_ecn_shs);
6313 		}
6314 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
6315 			/*
6316 			 * We advance snd_una for the
6317 			 * fast open case. If th_ack is
6318 			 * acknowledging data beyond
6319 			 * snd_una we can't just call
6320 			 * ack-processing since the
6321 			 * data stream in our send-map
6322 			 * will start at snd_una + 1 (one
6323 			 * beyond the SYN). If its just
6324 			 * equal we don't need to do that
6325 			 * and there is no send_map.
6326 			 */
6327 			tp->snd_una++;
6328 		}
6329 		/*
6330 		 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
6331 		 * SYN_SENT  --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
6332 		 */
6333 		tp->t_starttime = ticks;
6334 		if (tp->t_flags & TF_NEEDFIN) {
6335 			tcp_state_change(tp, TCPS_FIN_WAIT_1);
6336 			tp->t_flags &= ~TF_NEEDFIN;
6337 			thflags &= ~TH_SYN;
6338 		} else {
6339 			tcp_state_change(tp, TCPS_ESTABLISHED);
6340 			TCP_PROBE5(connect__established, NULL, tp,
6341 			    mtod(m, const char *), tp, th);
6342 			cc_conn_init(tp);
6343 		}
6344 	} else {
6345 		/*
6346 		 * Received initial SYN in SYN-SENT[*] state => simultaneous
6347 		 * open.  If segment contains CC option and there is a
6348 		 * cached CC, apply TAO test. If it succeeds, connection is *
6349 		 * half-synchronized. Otherwise, do 3-way handshake:
6350 		 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
6351 		 * there was no CC option, clear cached CC value.
6352 		 */
6353 		tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
6354 		tcp_state_change(tp, TCPS_SYN_RECEIVED);
6355 	}
6356 	INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6357 	INP_WLOCK_ASSERT(tp->t_inpcb);
6358 	/*
6359 	 * Advance th->th_seq to correspond to first data byte. If data,
6360 	 * trim to stay within window, dropping FIN if necessary.
6361 	 */
6362 	th->th_seq++;
6363 	if (tlen > tp->rcv_wnd) {
6364 		todrop = tlen - tp->rcv_wnd;
6365 		m_adj(m, -todrop);
6366 		tlen = tp->rcv_wnd;
6367 		thflags &= ~TH_FIN;
6368 		TCPSTAT_INC(tcps_rcvpackafterwin);
6369 		TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
6370 	}
6371 	tp->snd_wl1 = th->th_seq - 1;
6372 	tp->rcv_up = th->th_seq;
6373 	/*
6374 	 * Client side of transaction: already sent SYN and data. If the
6375 	 * remote host used T/TCP to validate the SYN, our data will be
6376 	 * ACK'd; if so, enter normal data segment processing in the middle
6377 	 * of step 5, ack processing. Otherwise, goto step 6.
6378 	 */
6379 	if (thflags & TH_ACK) {
6380 		/* For syn-sent we need to possibly update the rtt */
6381 		if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
6382 			uint32_t t;
6383 
6384 			t = tcp_ts_getticks() - to->to_tsecr;
6385 			if (!tp->t_rttlow || tp->t_rttlow > t)
6386 				tp->t_rttlow = t;
6387 			tcp_rack_xmit_timer(rack, t + 1);
6388 			tcp_rack_xmit_timer_commit(rack, tp);
6389 		}
6390 		if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
6391 			return (ret_val);
6392 		/* We may have changed to FIN_WAIT_1 above */
6393 		if (tp->t_state == TCPS_FIN_WAIT_1) {
6394 			/*
6395 			 * In FIN_WAIT_1 STATE in addition to the processing
6396 			 * for the ESTABLISHED state if our FIN is now
6397 			 * acknowledged then enter FIN_WAIT_2.
6398 			 */
6399 			if (ourfinisacked) {
6400 				/*
6401 				 * If we can't receive any more data, then
6402 				 * closing user can proceed. Starting the
6403 				 * timer is contrary to the specification,
6404 				 * but if we don't get a FIN we'll hang
6405 				 * forever.
6406 				 *
6407 				 * XXXjl: we should release the tp also, and
6408 				 * use a compressed state.
6409 				 */
6410 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6411 					soisdisconnected(so);
6412 					tcp_timer_activate(tp, TT_2MSL,
6413 					    (tcp_fast_finwait2_recycle ?
6414 					    tcp_finwait2_timeout :
6415 					    TP_MAXIDLE(tp)));
6416 				}
6417 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
6418 			}
6419 		}
6420 	}
6421 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6422 	   tiwin, thflags, nxt_pkt));
6423 }
6424 
6425 /*
6426  * Return value of 1, the TCB is unlocked and most
6427  * likely gone, return value of 0, the TCP is still
6428  * locked.
6429  */
6430 static int
6431 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
6432     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6433     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6434 {
6435 	struct tcp_rack *rack;
6436 	int32_t ret_val = 0;
6437 	int32_t ourfinisacked = 0;
6438 
6439 	ctf_calc_rwin(so, tp);
6440 	if ((thflags & TH_ACK) &&
6441 	    (SEQ_LEQ(th->th_ack, tp->snd_una) ||
6442 	    SEQ_GT(th->th_ack, tp->snd_max))) {
6443 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6444 		return (1);
6445 	}
6446 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6447 	if (IS_FASTOPEN(tp->t_flags)) {
6448 		/*
6449 		 * When a TFO connection is in SYN_RECEIVED, the
6450 		 * only valid packets are the initial SYN, a
6451 		 * retransmit/copy of the initial SYN (possibly with
6452 		 * a subset of the original data), a valid ACK, a
6453 		 * FIN, or a RST.
6454 		 */
6455 		if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
6456 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6457 			return (1);
6458 		} else if (thflags & TH_SYN) {
6459 			/* non-initial SYN is ignored */
6460 			if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
6461 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
6462 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
6463 				ctf_do_drop(m, NULL);
6464 				return (0);
6465 			}
6466 		} else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
6467 			ctf_do_drop(m, NULL);
6468 			return (0);
6469 		}
6470 	}
6471 	if ((thflags & TH_RST) ||
6472 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
6473 		return (ctf_process_rst(m, th, so, tp));
6474 	/*
6475 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6476 	 * it's less than ts_recent, drop it.
6477 	 */
6478 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6479 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6480 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
6481 			return (ret_val);
6482 	}
6483 	/*
6484 	 * In the SYN-RECEIVED state, validate that the packet belongs to
6485 	 * this connection before trimming the data to fit the receive
6486 	 * window.  Check the sequence number versus IRS since we know the
6487 	 * sequence numbers haven't wrapped.  This is a partial fix for the
6488 	 * "LAND" DoS attack.
6489 	 */
6490 	if (SEQ_LT(th->th_seq, tp->irs)) {
6491 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6492 		return (1);
6493 	}
6494 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6495 		return (ret_val);
6496 	}
6497 	/*
6498 	 * If last ACK falls within this segment's sequence numbers, record
6499 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
6500 	 * from the latest proposal of the tcplw@cray.com list (Braden
6501 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
6502 	 * with our earlier PAWS tests, so this check should be solely
6503 	 * predicated on the sequence space of this segment. 3) That we
6504 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6505 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6506 	 * SEG.Len, This modified check allows us to overcome RFC1323's
6507 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6508 	 * p.869. In such cases, we can still calculate the RTT correctly
6509 	 * when RCV.NXT == Last.ACK.Sent.
6510 	 */
6511 	if ((to->to_flags & TOF_TS) != 0 &&
6512 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6513 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6514 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6515 		tp->ts_recent_age = tcp_ts_getticks();
6516 		tp->ts_recent = to->to_tsval;
6517 	}
6518 	tp->snd_wnd = tiwin;
6519 	/*
6520 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6521 	 * is on (half-synchronized state), then queue data for later
6522 	 * processing; else drop segment and return.
6523 	 */
6524 	if ((thflags & TH_ACK) == 0) {
6525 		if (IS_FASTOPEN(tp->t_flags)) {
6526 			cc_conn_init(tp);
6527 		}
6528 		return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6529 		    tiwin, thflags, nxt_pkt));
6530 	}
6531 	TCPSTAT_INC(tcps_connects);
6532 	soisconnected(so);
6533 	/* Do window scaling? */
6534 	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
6535 	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
6536 		tp->rcv_scale = tp->request_r_scale;
6537 	}
6538 	/*
6539 	 * Make transitions: SYN-RECEIVED  -> ESTABLISHED SYN-RECEIVED* ->
6540 	 * FIN-WAIT-1
6541 	 */
6542 	tp->t_starttime = ticks;
6543 	if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
6544 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
6545 		tp->t_tfo_pending = NULL;
6546 
6547 		/*
6548 		 * Account for the ACK of our SYN prior to
6549 		 * regular ACK processing below.
6550 		 */
6551 		tp->snd_una++;
6552 	}
6553 	if (tp->t_flags & TF_NEEDFIN) {
6554 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
6555 		tp->t_flags &= ~TF_NEEDFIN;
6556 	} else {
6557 		tcp_state_change(tp, TCPS_ESTABLISHED);
6558 		TCP_PROBE5(accept__established, NULL, tp,
6559 		    mtod(m, const char *), tp, th);
6560 		/*
6561 		 * TFO connections call cc_conn_init() during SYN
6562 		 * processing.  Calling it again here for such connections
6563 		 * is not harmless as it would undo the snd_cwnd reduction
6564 		 * that occurs when a TFO SYN|ACK is retransmitted.
6565 		 */
6566 		if (!IS_FASTOPEN(tp->t_flags))
6567 			cc_conn_init(tp);
6568 	}
6569 	/*
6570 	 * If segment contains data or ACK, will call tcp_reass() later; if
6571 	 * not, do so now to pass queued data to user.
6572 	 */
6573 	if (tlen == 0 && (thflags & TH_FIN) == 0)
6574 		(void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
6575 		    (struct mbuf *)0);
6576 	tp->snd_wl1 = th->th_seq - 1;
6577 	/* For syn-recv we need to possibly update the rtt */
6578 	if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
6579 		uint32_t t;
6580 
6581 		t = tcp_ts_getticks() - to->to_tsecr;
6582 		if (!tp->t_rttlow || tp->t_rttlow > t)
6583 			tp->t_rttlow = t;
6584 		tcp_rack_xmit_timer(rack, t + 1);
6585 		tcp_rack_xmit_timer_commit(rack, tp);
6586 	}
6587 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6588 		return (ret_val);
6589 	}
6590 	if (tp->t_state == TCPS_FIN_WAIT_1) {
6591 		/* We could have went to FIN_WAIT_1 (or EST) above */
6592 		/*
6593 		 * In FIN_WAIT_1 STATE in addition to the processing for the
6594 		 * ESTABLISHED state if our FIN is now acknowledged then
6595 		 * enter FIN_WAIT_2.
6596 		 */
6597 		if (ourfinisacked) {
6598 			/*
6599 			 * If we can't receive any more data, then closing
6600 			 * user can proceed. Starting the timer is contrary
6601 			 * to the specification, but if we don't get a FIN
6602 			 * we'll hang forever.
6603 			 *
6604 			 * XXXjl: we should release the tp also, and use a
6605 			 * compressed state.
6606 			 */
6607 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6608 				soisdisconnected(so);
6609 				tcp_timer_activate(tp, TT_2MSL,
6610 				    (tcp_fast_finwait2_recycle ?
6611 				    tcp_finwait2_timeout :
6612 				    TP_MAXIDLE(tp)));
6613 			}
6614 			tcp_state_change(tp, TCPS_FIN_WAIT_2);
6615 		}
6616 	}
6617 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6618 	    tiwin, thflags, nxt_pkt));
6619 }
6620 
6621 /*
6622  * Return value of 1, the TCB is unlocked and most
6623  * likely gone, return value of 0, the TCP is still
6624  * locked.
6625  */
6626 static int
6627 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
6628     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6629     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6630 {
6631 	int32_t ret_val = 0;
6632 
6633 	/*
6634 	 * Header prediction: check for the two common cases of a
6635 	 * uni-directional data xfer.  If the packet has no control flags,
6636 	 * is in-sequence, the window didn't change and we're not
6637 	 * retransmitting, it's a candidate.  If the length is zero and the
6638 	 * ack moved forward, we're the sender side of the xfer.  Just free
6639 	 * the data acked & wake any higher level process that was blocked
6640 	 * waiting for space.  If the length is non-zero and the ack didn't
6641 	 * move, we're the receiver side.  If we're getting packets in-order
6642 	 * (the reassembly queue is empty), add the data toc The socket
6643 	 * buffer and note that we need a delayed ack. Make sure that the
6644 	 * hidden state-flags are also off. Since we check for
6645 	 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
6646 	 */
6647 	if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
6648 	    __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) &&
6649 	    __predict_true(SEGQ_EMPTY(tp)) &&
6650 	    __predict_true(th->th_seq == tp->rcv_nxt)) {
6651 		struct tcp_rack *rack;
6652 
6653 		rack = (struct tcp_rack *)tp->t_fb_ptr;
6654 		if (tlen == 0) {
6655 			if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
6656 			    tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
6657 				return (0);
6658 			}
6659 		} else {
6660 			if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
6661 			    tiwin, nxt_pkt)) {
6662 				return (0);
6663 			}
6664 		}
6665 	}
6666 	ctf_calc_rwin(so, tp);
6667 
6668 	if ((thflags & TH_RST) ||
6669 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
6670 		return (ctf_process_rst(m, th, so, tp));
6671 
6672 	/*
6673 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6674 	 * synchronized state.
6675 	 */
6676 	if (thflags & TH_SYN) {
6677 		ctf_challenge_ack(m, th, tp, &ret_val);
6678 		return (ret_val);
6679 	}
6680 	/*
6681 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6682 	 * it's less than ts_recent, drop it.
6683 	 */
6684 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6685 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6686 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
6687 			return (ret_val);
6688 	}
6689 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6690 		return (ret_val);
6691 	}
6692 	/*
6693 	 * If last ACK falls within this segment's sequence numbers, record
6694 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
6695 	 * from the latest proposal of the tcplw@cray.com list (Braden
6696 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
6697 	 * with our earlier PAWS tests, so this check should be solely
6698 	 * predicated on the sequence space of this segment. 3) That we
6699 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6700 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6701 	 * SEG.Len, This modified check allows us to overcome RFC1323's
6702 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6703 	 * p.869. In such cases, we can still calculate the RTT correctly
6704 	 * when RCV.NXT == Last.ACK.Sent.
6705 	 */
6706 	if ((to->to_flags & TOF_TS) != 0 &&
6707 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6708 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6709 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6710 		tp->ts_recent_age = tcp_ts_getticks();
6711 		tp->ts_recent = to->to_tsval;
6712 	}
6713 	/*
6714 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6715 	 * is on (half-synchronized state), then queue data for later
6716 	 * processing; else drop segment and return.
6717 	 */
6718 	if ((thflags & TH_ACK) == 0) {
6719 		if (tp->t_flags & TF_NEEDSYN) {
6720 
6721 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6722 			    tiwin, thflags, nxt_pkt));
6723 
6724 		} else if (tp->t_flags & TF_ACKNOW) {
6725 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6726 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
6727 			return (ret_val);
6728 		} else {
6729 			ctf_do_drop(m, NULL);
6730 			return (0);
6731 		}
6732 	}
6733 	/*
6734 	 * Ack processing.
6735 	 */
6736 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
6737 		return (ret_val);
6738 	}
6739 	if (sbavail(&so->so_snd)) {
6740 		if (rack_progress_timeout_check(tp)) {
6741 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6742 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6743 			return (1);
6744 		}
6745 	}
6746 	/* State changes only happen in rack_process_data() */
6747 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6748 	    tiwin, thflags, nxt_pkt));
6749 }
6750 
6751 /*
6752  * Return value of 1, the TCB is unlocked and most
6753  * likely gone, return value of 0, the TCP is still
6754  * locked.
6755  */
6756 static int
6757 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
6758     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6759     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6760 {
6761 	int32_t ret_val = 0;
6762 
6763 	ctf_calc_rwin(so, tp);
6764 	if ((thflags & TH_RST) ||
6765 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
6766 		return (ctf_process_rst(m, th, so, tp));
6767 	/*
6768 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6769 	 * synchronized state.
6770 	 */
6771 	if (thflags & TH_SYN) {
6772 		ctf_challenge_ack(m, th, tp, &ret_val);
6773 		return (ret_val);
6774 	}
6775 	/*
6776 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6777 	 * it's less than ts_recent, drop it.
6778 	 */
6779 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6780 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6781 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
6782 			return (ret_val);
6783 	}
6784 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6785 		return (ret_val);
6786 	}
6787 	/*
6788 	 * If last ACK falls within this segment's sequence numbers, record
6789 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
6790 	 * from the latest proposal of the tcplw@cray.com list (Braden
6791 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
6792 	 * with our earlier PAWS tests, so this check should be solely
6793 	 * predicated on the sequence space of this segment. 3) That we
6794 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6795 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6796 	 * SEG.Len, This modified check allows us to overcome RFC1323's
6797 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6798 	 * p.869. In such cases, we can still calculate the RTT correctly
6799 	 * when RCV.NXT == Last.ACK.Sent.
6800 	 */
6801 	if ((to->to_flags & TOF_TS) != 0 &&
6802 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6803 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6804 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6805 		tp->ts_recent_age = tcp_ts_getticks();
6806 		tp->ts_recent = to->to_tsval;
6807 	}
6808 	/*
6809 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6810 	 * is on (half-synchronized state), then queue data for later
6811 	 * processing; else drop segment and return.
6812 	 */
6813 	if ((thflags & TH_ACK) == 0) {
6814 		if (tp->t_flags & TF_NEEDSYN) {
6815 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6816 			    tiwin, thflags, nxt_pkt));
6817 
6818 		} else if (tp->t_flags & TF_ACKNOW) {
6819 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6820 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
6821 			return (ret_val);
6822 		} else {
6823 			ctf_do_drop(m, NULL);
6824 			return (0);
6825 		}
6826 	}
6827 	/*
6828 	 * Ack processing.
6829 	 */
6830 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
6831 		return (ret_val);
6832 	}
6833 	if (sbavail(&so->so_snd)) {
6834 		if (rack_progress_timeout_check(tp)) {
6835 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6836 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6837 			return (1);
6838 		}
6839 	}
6840 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6841 	    tiwin, thflags, nxt_pkt));
6842 }
6843 
6844 static int
6845 rack_check_data_after_close(struct mbuf *m,
6846     struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
6847 {
6848 	struct tcp_rack *rack;
6849 
6850 	INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6851 	rack = (struct tcp_rack *)tp->t_fb_ptr;
6852 	if (rack->rc_allow_data_af_clo == 0) {
6853 	close_now:
6854 		tp = tcp_close(tp);
6855 		TCPSTAT_INC(tcps_rcvafterclose);
6856 		ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
6857 		return (1);
6858 	}
6859 	if (sbavail(&so->so_snd) == 0)
6860 		goto close_now;
6861 	/* Ok we allow data that is ignored and a followup reset */
6862 	tp->rcv_nxt = th->th_seq + *tlen;
6863 	tp->t_flags2 |= TF2_DROP_AF_DATA;
6864 	rack->r_wanted_output = 1;
6865 	*tlen = 0;
6866 	return (0);
6867 }
6868 
6869 /*
6870  * Return value of 1, the TCB is unlocked and most
6871  * likely gone, return value of 0, the TCP is still
6872  * locked.
6873  */
6874 static int
6875 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
6876     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6877     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6878 {
6879 	int32_t ret_val = 0;
6880 	int32_t ourfinisacked = 0;
6881 
6882 	ctf_calc_rwin(so, tp);
6883 
6884 	if ((thflags & TH_RST) ||
6885 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
6886 		return (ctf_process_rst(m, th, so, tp));
6887 	/*
6888 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6889 	 * synchronized state.
6890 	 */
6891 	if (thflags & TH_SYN) {
6892 		ctf_challenge_ack(m, th, tp, &ret_val);
6893 		return (ret_val);
6894 	}
6895 	/*
6896 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6897 	 * it's less than ts_recent, drop it.
6898 	 */
6899 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6900 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6901 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
6902 			return (ret_val);
6903 	}
6904 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
6905 		return (ret_val);
6906 	}
6907 	/*
6908 	 * If new data are received on a connection after the user processes
6909 	 * are gone, then RST the other end.
6910 	 */
6911 	if ((so->so_state & SS_NOFDREF) && tlen) {
6912 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
6913 			return (1);
6914 	}
6915 	/*
6916 	 * If last ACK falls within this segment's sequence numbers, record
6917 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
6918 	 * from the latest proposal of the tcplw@cray.com list (Braden
6919 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
6920 	 * with our earlier PAWS tests, so this check should be solely
6921 	 * predicated on the sequence space of this segment. 3) That we
6922 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6923 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6924 	 * SEG.Len, This modified check allows us to overcome RFC1323's
6925 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6926 	 * p.869. In such cases, we can still calculate the RTT correctly
6927 	 * when RCV.NXT == Last.ACK.Sent.
6928 	 */
6929 	if ((to->to_flags & TOF_TS) != 0 &&
6930 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6931 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6932 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6933 		tp->ts_recent_age = tcp_ts_getticks();
6934 		tp->ts_recent = to->to_tsval;
6935 	}
6936 	/*
6937 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6938 	 * is on (half-synchronized state), then queue data for later
6939 	 * processing; else drop segment and return.
6940 	 */
6941 	if ((thflags & TH_ACK) == 0) {
6942 		if (tp->t_flags & TF_NEEDSYN) {
6943 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6944 			    tiwin, thflags, nxt_pkt));
6945 		} else if (tp->t_flags & TF_ACKNOW) {
6946 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
6947 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
6948 			return (ret_val);
6949 		} else {
6950 			ctf_do_drop(m, NULL);
6951 			return (0);
6952 		}
6953 	}
6954 	/*
6955 	 * Ack processing.
6956 	 */
6957 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6958 		return (ret_val);
6959 	}
6960 	if (ourfinisacked) {
6961 		/*
6962 		 * If we can't receive any more data, then closing user can
6963 		 * proceed. Starting the timer is contrary to the
6964 		 * specification, but if we don't get a FIN we'll hang
6965 		 * forever.
6966 		 *
6967 		 * XXXjl: we should release the tp also, and use a
6968 		 * compressed state.
6969 		 */
6970 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6971 			soisdisconnected(so);
6972 			tcp_timer_activate(tp, TT_2MSL,
6973 			    (tcp_fast_finwait2_recycle ?
6974 			    tcp_finwait2_timeout :
6975 			    TP_MAXIDLE(tp)));
6976 		}
6977 		tcp_state_change(tp, TCPS_FIN_WAIT_2);
6978 	}
6979 	if (sbavail(&so->so_snd)) {
6980 		if (rack_progress_timeout_check(tp)) {
6981 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6982 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
6983 			return (1);
6984 		}
6985 	}
6986 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6987 	    tiwin, thflags, nxt_pkt));
6988 }
6989 
6990 /*
6991  * Return value of 1, the TCB is unlocked and most
6992  * likely gone, return value of 0, the TCP is still
6993  * locked.
6994  */
6995 static int
6996 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
6997     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6998     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6999 {
7000 	int32_t ret_val = 0;
7001 	int32_t ourfinisacked = 0;
7002 
7003 	ctf_calc_rwin(so, tp);
7004 
7005 	if ((thflags & TH_RST) ||
7006 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
7007 		return (ctf_process_rst(m, th, so, tp));
7008 	/*
7009 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
7010 	 * synchronized state.
7011 	 */
7012 	if (thflags & TH_SYN) {
7013 		ctf_challenge_ack(m, th, tp, &ret_val);
7014 		return (ret_val);
7015 	}
7016 	/*
7017 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
7018 	 * it's less than ts_recent, drop it.
7019 	 */
7020 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
7021 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
7022 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
7023 			return (ret_val);
7024 	}
7025 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
7026 		return (ret_val);
7027 	}
7028 	/*
7029 	 * If new data are received on a connection after the user processes
7030 	 * are gone, then RST the other end.
7031 	 */
7032 	if ((so->so_state & SS_NOFDREF) && tlen) {
7033 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
7034 			return (1);
7035 	}
7036 	/*
7037 	 * If last ACK falls within this segment's sequence numbers, record
7038 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
7039 	 * from the latest proposal of the tcplw@cray.com list (Braden
7040 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
7041 	 * with our earlier PAWS tests, so this check should be solely
7042 	 * predicated on the sequence space of this segment. 3) That we
7043 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
7044 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
7045 	 * SEG.Len, This modified check allows us to overcome RFC1323's
7046 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
7047 	 * p.869. In such cases, we can still calculate the RTT correctly
7048 	 * when RCV.NXT == Last.ACK.Sent.
7049 	 */
7050 	if ((to->to_flags & TOF_TS) != 0 &&
7051 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
7052 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
7053 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
7054 		tp->ts_recent_age = tcp_ts_getticks();
7055 		tp->ts_recent = to->to_tsval;
7056 	}
7057 	/*
7058 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
7059 	 * is on (half-synchronized state), then queue data for later
7060 	 * processing; else drop segment and return.
7061 	 */
7062 	if ((thflags & TH_ACK) == 0) {
7063 		if (tp->t_flags & TF_NEEDSYN) {
7064 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7065 			    tiwin, thflags, nxt_pkt));
7066 		} else if (tp->t_flags & TF_ACKNOW) {
7067 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
7068 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
7069 			return (ret_val);
7070 		} else {
7071 			ctf_do_drop(m, NULL);
7072 			return (0);
7073 		}
7074 	}
7075 	/*
7076 	 * Ack processing.
7077 	 */
7078 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
7079 		return (ret_val);
7080 	}
7081 	if (ourfinisacked) {
7082 		INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
7083 		tcp_twstart(tp);
7084 		m_freem(m);
7085 		return (1);
7086 	}
7087 	if (sbavail(&so->so_snd)) {
7088 		if (rack_progress_timeout_check(tp)) {
7089 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
7090 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
7091 			return (1);
7092 		}
7093 	}
7094 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7095 	    tiwin, thflags, nxt_pkt));
7096 }
7097 
7098 /*
7099  * Return value of 1, the TCB is unlocked and most
7100  * likely gone, return value of 0, the TCP is still
7101  * locked.
7102  */
7103 static int
7104 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
7105     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
7106     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
7107 {
7108 	int32_t ret_val = 0;
7109 	int32_t ourfinisacked = 0;
7110 
7111 	ctf_calc_rwin(so, tp);
7112 
7113 	if ((thflags & TH_RST) ||
7114 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
7115 		return (ctf_process_rst(m, th, so, tp));
7116 	/*
7117 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
7118 	 * synchronized state.
7119 	 */
7120 	if (thflags & TH_SYN) {
7121 		ctf_challenge_ack(m, th, tp, &ret_val);
7122 		return (ret_val);
7123 	}
7124 	/*
7125 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
7126 	 * it's less than ts_recent, drop it.
7127 	 */
7128 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
7129 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
7130 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
7131 			return (ret_val);
7132 	}
7133 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
7134 		return (ret_val);
7135 	}
7136 	/*
7137 	 * If new data are received on a connection after the user processes
7138 	 * are gone, then RST the other end.
7139 	 */
7140 	if ((so->so_state & SS_NOFDREF) && tlen) {
7141 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
7142 			return (1);
7143 	}
7144 	/*
7145 	 * If last ACK falls within this segment's sequence numbers, record
7146 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
7147 	 * from the latest proposal of the tcplw@cray.com list (Braden
7148 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
7149 	 * with our earlier PAWS tests, so this check should be solely
7150 	 * predicated on the sequence space of this segment. 3) That we
7151 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
7152 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
7153 	 * SEG.Len, This modified check allows us to overcome RFC1323's
7154 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
7155 	 * p.869. In such cases, we can still calculate the RTT correctly
7156 	 * when RCV.NXT == Last.ACK.Sent.
7157 	 */
7158 	if ((to->to_flags & TOF_TS) != 0 &&
7159 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
7160 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
7161 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
7162 		tp->ts_recent_age = tcp_ts_getticks();
7163 		tp->ts_recent = to->to_tsval;
7164 	}
7165 	/*
7166 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
7167 	 * is on (half-synchronized state), then queue data for later
7168 	 * processing; else drop segment and return.
7169 	 */
7170 	if ((thflags & TH_ACK) == 0) {
7171 		if (tp->t_flags & TF_NEEDSYN) {
7172 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7173 			    tiwin, thflags, nxt_pkt));
7174 		} else if (tp->t_flags & TF_ACKNOW) {
7175 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
7176 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
7177 			return (ret_val);
7178 		} else {
7179 			ctf_do_drop(m, NULL);
7180 			return (0);
7181 		}
7182 	}
7183 	/*
7184 	 * case TCPS_LAST_ACK: Ack processing.
7185 	 */
7186 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
7187 		return (ret_val);
7188 	}
7189 	if (ourfinisacked) {
7190 		INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
7191 		tp = tcp_close(tp);
7192 		ctf_do_drop(m, tp);
7193 		return (1);
7194 	}
7195 	if (sbavail(&so->so_snd)) {
7196 		if (rack_progress_timeout_check(tp)) {
7197 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
7198 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
7199 			return (1);
7200 		}
7201 	}
7202 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7203 	    tiwin, thflags, nxt_pkt));
7204 }
7205 
7206 
7207 /*
7208  * Return value of 1, the TCB is unlocked and most
7209  * likely gone, return value of 0, the TCP is still
7210  * locked.
7211  */
7212 static int
7213 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
7214     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
7215     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
7216 {
7217 	int32_t ret_val = 0;
7218 	int32_t ourfinisacked = 0;
7219 
7220 	ctf_calc_rwin(so, tp);
7221 
7222 	/* Reset receive buffer auto scaling when not in bulk receive mode. */
7223 	if ((thflags & TH_RST) ||
7224 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
7225 		return (ctf_process_rst(m, th, so, tp));
7226 	/*
7227 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
7228 	 * synchronized state.
7229 	 */
7230 	if (thflags & TH_SYN) {
7231 		ctf_challenge_ack(m, th, tp, &ret_val);
7232 		return (ret_val);
7233 	}
7234 	/*
7235 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
7236 	 * it's less than ts_recent, drop it.
7237 	 */
7238 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
7239 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
7240 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
7241 			return (ret_val);
7242 	}
7243 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
7244 		return (ret_val);
7245 	}
7246 	/*
7247 	 * If new data are received on a connection after the user processes
7248 	 * are gone, then RST the other end.
7249 	 */
7250 	if ((so->so_state & SS_NOFDREF) &&
7251 	    tlen) {
7252 		if (rack_check_data_after_close(m, tp, &tlen, th, so))
7253 			return (1);
7254 	}
7255 	/*
7256 	 * If last ACK falls within this segment's sequence numbers, record
7257 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
7258 	 * from the latest proposal of the tcplw@cray.com list (Braden
7259 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
7260 	 * with our earlier PAWS tests, so this check should be solely
7261 	 * predicated on the sequence space of this segment. 3) That we
7262 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
7263 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
7264 	 * SEG.Len, This modified check allows us to overcome RFC1323's
7265 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
7266 	 * p.869. In such cases, we can still calculate the RTT correctly
7267 	 * when RCV.NXT == Last.ACK.Sent.
7268 	 */
7269 	if ((to->to_flags & TOF_TS) != 0 &&
7270 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
7271 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
7272 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
7273 		tp->ts_recent_age = tcp_ts_getticks();
7274 		tp->ts_recent = to->to_tsval;
7275 	}
7276 	/*
7277 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
7278 	 * is on (half-synchronized state), then queue data for later
7279 	 * processing; else drop segment and return.
7280 	 */
7281 	if ((thflags & TH_ACK) == 0) {
7282 		if (tp->t_flags & TF_NEEDSYN) {
7283 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7284 			    tiwin, thflags, nxt_pkt));
7285 		} else if (tp->t_flags & TF_ACKNOW) {
7286 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
7287 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
7288 			return (ret_val);
7289 		} else {
7290 			ctf_do_drop(m, NULL);
7291 			return (0);
7292 		}
7293 	}
7294 	/*
7295 	 * Ack processing.
7296 	 */
7297 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
7298 		return (ret_val);
7299 	}
7300 	if (sbavail(&so->so_snd)) {
7301 		if (rack_progress_timeout_check(tp)) {
7302 			tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
7303 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
7304 			return (1);
7305 		}
7306 	}
7307 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
7308 	    tiwin, thflags, nxt_pkt));
7309 }
7310 
7311 
7312 static void inline
7313 rack_clear_rate_sample(struct tcp_rack *rack)
7314 {
7315 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
7316 	rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
7317 	rack->r_ctl.rack_rs.rs_rtt_tot = 0;
7318 }
7319 
7320 static void
7321 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack)
7322 {
7323 	uint32_t tls_seg = 0;
7324 
7325 #ifdef KERN_TLS
7326 	if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
7327 		tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd);
7328 		rack->r_ctl.rc_pace_min_segs = tls_seg;
7329 	} else
7330 #endif
7331 		rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
7332 	rack->r_ctl.rc_pace_max_segs = ctf_fixed_maxseg(tp) * rack->rc_pace_max_segs;
7333 	if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES)
7334 		rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
7335 #ifdef KERN_TLS
7336 	if (tls_seg != 0) {
7337 		if (rack_hw_tls_max_seg > 1) {
7338 			rack->r_ctl.rc_pace_max_segs /= tls_seg;
7339 			if (rack_hw_tls_max_seg < rack->r_ctl.rc_pace_max_segs)
7340 				rack->r_ctl.rc_pace_max_segs = rack_hw_tls_max_seg;
7341 		} else {
7342 			rack->r_ctl.rc_pace_max_segs = 1;
7343 		}
7344 		if (rack->r_ctl.rc_pace_max_segs == 0)
7345 			rack->r_ctl.rc_pace_max_segs = 1;
7346 		rack->r_ctl.rc_pace_max_segs *= tls_seg;
7347 	}
7348 #endif
7349 	rack_log_type_hrdwtso(tp, rack, tls_seg, rack->rc_inp->inp_socket->so_snd.sb_flags, 0, 2);
7350 }
7351 
7352 static int
7353 rack_init(struct tcpcb *tp)
7354 {
7355 	struct tcp_rack *rack = NULL;
7356 	struct rack_sendmap *insret;
7357 
7358 	tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
7359 	if (tp->t_fb_ptr == NULL) {
7360 		/*
7361 		 * We need to allocate memory but cant. The INP and INP_INFO
7362 		 * locks and they are recusive (happens during setup. So a
7363 		 * scheme to drop the locks fails :(
7364 		 *
7365 		 */
7366 		return (ENOMEM);
7367 	}
7368 	memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
7369 
7370 	rack = (struct tcp_rack *)tp->t_fb_ptr;
7371 	RB_INIT(&rack->r_ctl.rc_mtree);
7372 	TAILQ_INIT(&rack->r_ctl.rc_free);
7373 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
7374 	rack->rc_tp = tp;
7375 	if (tp->t_inpcb) {
7376 		rack->rc_inp = tp->t_inpcb;
7377 	}
7378 	tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
7379 	/* Probably not needed but lets be sure */
7380 	rack_clear_rate_sample(rack);
7381 	rack->r_cpu = 0;
7382 	rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
7383 	rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
7384 	rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
7385 	rack->rc_pace_reduce = rack_slot_reduction;
7386 	if (use_rack_cheat)
7387 		rack->use_rack_cheat = 1;
7388 	if (V_tcp_delack_enabled)
7389 		tp->t_delayed_ack = 1;
7390 	else
7391 		tp->t_delayed_ack = 0;
7392 	rack->rc_pace_max_segs = rack_hptsi_segments;
7393 	rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
7394 	rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
7395 	rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
7396 	rack->r_enforce_min_pace = rack_min_pace_time;
7397 	rack->r_ctl.rc_prop_rate = rack_proportional_rate;
7398 	rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
7399 	rack->r_ctl.rc_early_recovery = rack_early_recovery;
7400 	rack->rc_always_pace = rack_pace_every_seg;
7401 	rack_set_pace_segments(tp, rack);
7402 	rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
7403 	rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
7404 	rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
7405 	rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
7406 	rack->r_ctl.rc_min_to = rack_min_to;
7407 	rack->rack_per_of_gp = rack_per_of_gp;
7408 	microuptime(&rack->r_ctl.rc_last_ack);
7409 	rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack;
7410 	rack->r_ctl.rc_tlp_rxt_last_time = tcp_ts_getticks();
7411 	/* Do we force on detection? */
7412 	if (tcp_force_detection)
7413 		rack->do_detection = 1;
7414 	else
7415 		rack->do_detection = 0;
7416 	if (tp->snd_una != tp->snd_max) {
7417 		/* Create a send map for the current outstanding data */
7418 		struct rack_sendmap *rsm;
7419 
7420 		rsm = rack_alloc(rack);
7421 		if (rsm == NULL) {
7422 			uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
7423 			tp->t_fb_ptr = NULL;
7424 			return (ENOMEM);
7425 		}
7426 		rsm->r_flags = RACK_OVERMAX;
7427 		rsm->r_tim_lastsent[0] = rack->r_ctl.rc_tlp_rxt_last_time;
7428 		rsm->r_rtr_cnt = 1;
7429 		rsm->r_rtr_bytes = 0;
7430 		rsm->r_start = tp->snd_una;
7431 		rsm->r_end = tp->snd_max;
7432 		rsm->r_dupack = 0;
7433 		insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7434 #ifdef INVARIANTS
7435 		if (insret != NULL) {
7436 			panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
7437 			      insret, rack, rsm);
7438 		}
7439 #endif
7440 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7441 		rsm->r_in_tmap = 1;
7442 	}
7443 	rack_stop_all_timers(tp);
7444 	rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
7445 	return (0);
7446 }
7447 
7448 static int
7449 rack_handoff_ok(struct tcpcb *tp)
7450 {
7451 	if ((tp->t_state == TCPS_CLOSED) ||
7452 	    (tp->t_state == TCPS_LISTEN)) {
7453 		/* Sure no problem though it may not stick */
7454 		return (0);
7455 	}
7456 	if ((tp->t_state == TCPS_SYN_SENT) ||
7457 	    (tp->t_state == TCPS_SYN_RECEIVED)) {
7458 		/*
7459 		 * We really don't know you have to get to ESTAB or beyond
7460 		 * to tell.
7461 		 */
7462 		return (EAGAIN);
7463 	}
7464 	if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
7465 		return (0);
7466 	}
7467 	/*
7468 	 * If we reach here we don't do SACK on this connection so we can
7469 	 * never do rack.
7470 	 */
7471 	return (EINVAL);
7472 }
7473 
7474 static void
7475 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
7476 {
7477 	if (tp->t_fb_ptr) {
7478 		struct tcp_rack *rack;
7479 		struct rack_sendmap *rsm, *nrsm, *rm;
7480 		if (tp->t_inpcb) {
7481 			tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
7482 			tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
7483 		}
7484 		rack = (struct tcp_rack *)tp->t_fb_ptr;
7485 #ifdef TCP_BLACKBOX
7486 		tcp_log_flowend(tp);
7487 #endif
7488 		RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
7489 			rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7490 #ifdef INVARIANTS
7491 			if (rm != rsm) {
7492 				panic("At fini, rack:%p rsm:%p rm:%p",
7493 				      rack, rsm, rm);
7494 			}
7495 #endif
7496 			uma_zfree(rack_zone, rsm);
7497 		}
7498 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
7499 		while (rsm) {
7500 			TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
7501 			uma_zfree(rack_zone, rsm);
7502 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
7503 		}
7504 		rack->rc_free_cnt = 0;
7505 		uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
7506 		tp->t_fb_ptr = NULL;
7507 	}
7508 	/* Make sure snd_nxt is correctly set */
7509 	tp->snd_nxt = tp->snd_max;
7510 }
7511 
7512 
7513 static void
7514 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
7515 {
7516 	switch (tp->t_state) {
7517 	case TCPS_SYN_SENT:
7518 		rack->r_state = TCPS_SYN_SENT;
7519 		rack->r_substate = rack_do_syn_sent;
7520 		break;
7521 	case TCPS_SYN_RECEIVED:
7522 		rack->r_state = TCPS_SYN_RECEIVED;
7523 		rack->r_substate = rack_do_syn_recv;
7524 		break;
7525 	case TCPS_ESTABLISHED:
7526 		rack_set_pace_segments(tp, rack);
7527 		rack->r_state = TCPS_ESTABLISHED;
7528 		rack->r_substate = rack_do_established;
7529 		break;
7530 	case TCPS_CLOSE_WAIT:
7531 		rack->r_state = TCPS_CLOSE_WAIT;
7532 		rack->r_substate = rack_do_close_wait;
7533 		break;
7534 	case TCPS_FIN_WAIT_1:
7535 		rack->r_state = TCPS_FIN_WAIT_1;
7536 		rack->r_substate = rack_do_fin_wait_1;
7537 		break;
7538 	case TCPS_CLOSING:
7539 		rack->r_state = TCPS_CLOSING;
7540 		rack->r_substate = rack_do_closing;
7541 		break;
7542 	case TCPS_LAST_ACK:
7543 		rack->r_state = TCPS_LAST_ACK;
7544 		rack->r_substate = rack_do_lastack;
7545 		break;
7546 	case TCPS_FIN_WAIT_2:
7547 		rack->r_state = TCPS_FIN_WAIT_2;
7548 		rack->r_substate = rack_do_fin_wait_2;
7549 		break;
7550 	case TCPS_LISTEN:
7551 	case TCPS_CLOSED:
7552 	case TCPS_TIME_WAIT:
7553 	default:
7554 		break;
7555 	};
7556 }
7557 
7558 
7559 static void
7560 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
7561 {
7562 	/*
7563 	 * We received an ack, and then did not
7564 	 * call send or were bounced out due to the
7565 	 * hpts was running. Now a timer is up as well, is
7566 	 * it the right timer?
7567 	 */
7568 	struct rack_sendmap *rsm;
7569 	int tmr_up;
7570 
7571 	tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
7572 	if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
7573 		return;
7574 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7575 	if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
7576 	    (tmr_up == PACE_TMR_RXT)) {
7577 		/* Should be an RXT */
7578 		return;
7579 	}
7580 	if (rsm == NULL) {
7581 		/* Nothing outstanding? */
7582 		if (tp->t_flags & TF_DELACK) {
7583 			if (tmr_up == PACE_TMR_DELACK)
7584 				/* We are supposed to have delayed ack up and we do */
7585 				return;
7586 		} else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
7587 			/*
7588 			 * if we hit enobufs then we would expect the possiblity
7589 			 * of nothing outstanding and the RXT up (and the hptsi timer).
7590 			 */
7591 			return;
7592 		} else if (((tcp_always_keepalive ||
7593 			     rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
7594 			    (tp->t_state <= TCPS_CLOSING)) &&
7595 			   (tmr_up == PACE_TMR_KEEP) &&
7596 			   (tp->snd_max == tp->snd_una)) {
7597 			/* We should have keep alive up and we do */
7598 			return;
7599 		}
7600 	}
7601 	if (SEQ_GT(tp->snd_max, tp->snd_una) &&
7602 		   ((tmr_up == PACE_TMR_TLP) ||
7603 		    (tmr_up == PACE_TMR_RACK) ||
7604 		    (tmr_up == PACE_TMR_RXT))) {
7605 		/*
7606 		 * Either a Rack, TLP or RXT is fine if  we
7607 		 * have outstanding data.
7608 		 */
7609 		return;
7610 	} else if (tmr_up == PACE_TMR_DELACK) {
7611 		/*
7612 		 * If the delayed ack was going to go off
7613 		 * before the rtx/tlp/rack timer were going to
7614 		 * expire, then that would be the timer in control.
7615 		 * Note we don't check the time here trusting the
7616 		 * code is correct.
7617 		 */
7618 		return;
7619 	}
7620 	/*
7621 	 * Ok the timer originally started is not what we want now.
7622 	 * We will force the hpts to be stopped if any, and restart
7623 	 * with the slot set to what was in the saved slot.
7624 	 */
7625 	rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
7626 	rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
7627 }
7628 
7629 static int
7630 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
7631     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
7632     int32_t nxt_pkt, struct timeval *tv)
7633 {
7634 	int32_t thflags, retval, did_out = 0;
7635 	int32_t way_out = 0;
7636 	uint32_t cts;
7637 	uint32_t tiwin;
7638 	struct tcpopt to;
7639 	struct tcp_rack *rack;
7640 	struct rack_sendmap *rsm;
7641 	int32_t prev_state = 0;
7642 
7643 	if (m->m_flags & M_TSTMP_LRO) {
7644 		tv->tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
7645 		tv->tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
7646 	}
7647 	cts = tcp_tv_to_mssectick(tv);
7648 	rack = (struct tcp_rack *)tp->t_fb_ptr;
7649 
7650 	kern_prefetch(rack, &prev_state);
7651 	prev_state = 0;
7652 	thflags = th->th_flags;
7653 	/*
7654 	 * If this is either a state-changing packet or current state isn't
7655 	 * established, we require a read lock on tcbinfo.  Otherwise, we
7656 	 * allow the tcbinfo to be in either locked or unlocked, as the
7657 	 * caller may have unnecessarily acquired a lock due to a race.
7658 	 */
7659 	if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
7660 	    tp->t_state != TCPS_ESTABLISHED) {
7661 		INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
7662 	}
7663 	INP_WLOCK_ASSERT(tp->t_inpcb);
7664 	KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
7665 	    __func__));
7666 	KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
7667 	    __func__));
7668 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
7669 		union tcp_log_stackspecific log;
7670 		struct timeval tv;
7671 
7672 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
7673 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
7674 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
7675 		log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
7676 		log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
7677 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
7678 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
7679 		log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
7680 		TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
7681 		    tlen, &log, true, &tv);
7682 	}
7683 	if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
7684 		way_out = 4;
7685 		retval = 0;
7686 		goto done_with_input;
7687 	}
7688 	/*
7689 	 * If a segment with the ACK-bit set arrives in the SYN-SENT state
7690 	 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
7691 	 */
7692 	if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
7693 	    (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
7694 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
7695 		return(1);
7696 	}
7697 	/*
7698 	 * Segment received on connection. Reset idle time and keep-alive
7699 	 * timer. XXX: This should be done after segment validation to
7700 	 * ignore broken/spoofed segs.
7701 	 */
7702 	if  (tp->t_idle_reduce &&
7703 	     (tp->snd_max == tp->snd_una) &&
7704 	     ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
7705 		counter_u64_add(rack_input_idle_reduces, 1);
7706 		rack_cc_after_idle(tp);
7707 	}
7708 	tp->t_rcvtime = ticks;
7709 
7710 	/*
7711 	 * Unscale the window into a 32-bit value. For the SYN_SENT state
7712 	 * the scale is zero.
7713 	 */
7714 	tiwin = th->th_win << tp->snd_scale;
7715 #ifdef NETFLIX_STATS
7716 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
7717 #endif
7718 	if (tiwin > rack->r_ctl.rc_high_rwnd)
7719 		rack->r_ctl.rc_high_rwnd = tiwin;
7720 	/*
7721 	 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
7722 	 * this to occur after we've validated the segment.
7723 	 */
7724 	if (tp->t_flags & TF_ECN_PERMIT) {
7725 		if (thflags & TH_CWR)
7726 			tp->t_flags &= ~TF_ECN_SND_ECE;
7727 		switch (iptos & IPTOS_ECN_MASK) {
7728 		case IPTOS_ECN_CE:
7729 			tp->t_flags |= TF_ECN_SND_ECE;
7730 			TCPSTAT_INC(tcps_ecn_ce);
7731 			break;
7732 		case IPTOS_ECN_ECT0:
7733 			TCPSTAT_INC(tcps_ecn_ect0);
7734 			break;
7735 		case IPTOS_ECN_ECT1:
7736 			TCPSTAT_INC(tcps_ecn_ect1);
7737 			break;
7738 		}
7739 		/* Congestion experienced. */
7740 		if (thflags & TH_ECE) {
7741 			rack_cong_signal(tp, th, CC_ECN);
7742 		}
7743 	}
7744 	/*
7745 	 * Parse options on any incoming segment.
7746 	 */
7747 	tcp_dooptions(&to, (u_char *)(th + 1),
7748 	    (th->th_off << 2) - sizeof(struct tcphdr),
7749 	    (thflags & TH_SYN) ? TO_SYN : 0);
7750 
7751 	/*
7752 	 * If echoed timestamp is later than the current time, fall back to
7753 	 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
7754 	 * were used when this connection was established.
7755 	 */
7756 	if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
7757 		to.to_tsecr -= tp->ts_offset;
7758 		if (TSTMP_GT(to.to_tsecr, cts))
7759 			to.to_tsecr = 0;
7760 	}
7761 	/*
7762 	 * If its the first time in we need to take care of options and
7763 	 * verify we can do SACK for rack!
7764 	 */
7765 	if (rack->r_state == 0) {
7766 		/* Should be init'd by rack_init() */
7767 		KASSERT(rack->rc_inp != NULL,
7768 		    ("%s: rack->rc_inp unexpectedly NULL", __func__));
7769 		if (rack->rc_inp == NULL) {
7770 			rack->rc_inp = tp->t_inpcb;
7771 		}
7772 
7773 		/*
7774 		 * Process options only when we get SYN/ACK back. The SYN
7775 		 * case for incoming connections is handled in tcp_syncache.
7776 		 * According to RFC1323 the window field in a SYN (i.e., a
7777 		 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
7778 		 * this is traditional behavior, may need to be cleaned up.
7779 		 */
7780 		rack->r_cpu = inp_to_cpuid(tp->t_inpcb);
7781 		if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
7782 			if ((to.to_flags & TOF_SCALE) &&
7783 			    (tp->t_flags & TF_REQ_SCALE)) {
7784 				tp->t_flags |= TF_RCVD_SCALE;
7785 				tp->snd_scale = to.to_wscale;
7786 			}
7787 			/*
7788 			 * Initial send window.  It will be updated with the
7789 			 * next incoming segment to the scaled value.
7790 			 */
7791 			tp->snd_wnd = th->th_win;
7792 			if (to.to_flags & TOF_TS) {
7793 				tp->t_flags |= TF_RCVD_TSTMP;
7794 				tp->ts_recent = to.to_tsval;
7795 				tp->ts_recent_age = cts;
7796 			}
7797 			if (to.to_flags & TOF_MSS)
7798 				tcp_mss(tp, to.to_mss);
7799 			if ((tp->t_flags & TF_SACK_PERMIT) &&
7800 			    (to.to_flags & TOF_SACKPERM) == 0)
7801 				tp->t_flags &= ~TF_SACK_PERMIT;
7802 			if (IS_FASTOPEN(tp->t_flags)) {
7803 				if (to.to_flags & TOF_FASTOPEN) {
7804 					uint16_t mss;
7805 
7806 					if (to.to_flags & TOF_MSS)
7807 						mss = to.to_mss;
7808 					else
7809 						if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
7810 							mss = TCP6_MSS;
7811 						else
7812 							mss = TCP_MSS;
7813 					tcp_fastopen_update_cache(tp, mss,
7814 					    to.to_tfo_len, to.to_tfo_cookie);
7815 				} else
7816 					tcp_fastopen_disable_path(tp);
7817 			}
7818 		}
7819 		/*
7820 		 * At this point we are at the initial call. Here we decide
7821 		 * if we are doing RACK or not. We do this by seeing if
7822 		 * TF_SACK_PERMIT is set, if not rack is *not* possible and
7823 		 * we switch to the default code.
7824 		 */
7825 		if ((tp->t_flags & TF_SACK_PERMIT) == 0) {
7826 			tcp_switch_back_to_default(tp);
7827 			(*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
7828 			    tlen, iptos);
7829 			return (1);
7830 		}
7831 		/* Set the flag */
7832 		rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
7833 		tcp_set_hpts(tp->t_inpcb);
7834 		sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
7835 	}
7836 	/*
7837 	 * This is the one exception case where we set the rack state
7838 	 * always. All other times (timers etc) we must have a rack-state
7839 	 * set (so we assure we have done the checks above for SACK).
7840 	 */
7841 	memcpy(&rack->r_ctl.rc_last_ack, tv, sizeof(struct timeval));
7842 	rack->r_ctl.rc_rcvtime = cts;
7843 	if (rack->r_state != tp->t_state)
7844 		rack_set_state(tp, rack);
7845 	if (SEQ_GT(th->th_ack, tp->snd_una) &&
7846 	    (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
7847 		kern_prefetch(rsm, &prev_state);
7848 	prev_state = rack->r_state;
7849 	rack->r_ctl.rc_tlp_send_cnt = 0;
7850 	rack_clear_rate_sample(rack);
7851 	retval = (*rack->r_substate) (m, th, so,
7852 	    tp, &to, drop_hdrlen,
7853 	    tlen, tiwin, thflags, nxt_pkt);
7854 #ifdef INVARIANTS
7855 	if ((retval == 0) &&
7856 	    (tp->t_inpcb == NULL)) {
7857 		panic("retval:%d tp:%p t_inpcb:NULL state:%d",
7858 		    retval, tp, prev_state);
7859 	}
7860 #endif
7861 	if (retval == 0) {
7862 		/*
7863 		 * If retval is 1 the tcb is unlocked and most likely the tp
7864 		 * is gone.
7865 		 */
7866 		INP_WLOCK_ASSERT(tp->t_inpcb);
7867 		if (rack->set_pacing_done_a_iw == 0) {
7868 			/* How much has been acked? */
7869 			if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
7870 				/* We have enough to set in the pacing segment size */
7871 				rack->set_pacing_done_a_iw = 1;
7872 				rack_set_pace_segments(tp, rack);
7873 			}
7874 		}
7875 		tcp_rack_xmit_timer_commit(rack, tp);
7876 		if ((nxt_pkt == 0) || (IN_RECOVERY(tp->t_flags))) {
7877 			if (rack->r_wanted_output != 0) {
7878 				did_out = 1;
7879 				(void)tp->t_fb->tfb_tcp_output(tp);
7880 			}
7881 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
7882 		}
7883 		if ((nxt_pkt == 0) &&
7884 		    ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
7885 		    (SEQ_GT(tp->snd_max, tp->snd_una) ||
7886 		     (tp->t_flags & TF_DELACK) ||
7887 		     ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
7888 		      (tp->t_state <= TCPS_CLOSING)))) {
7889 			/* We could not send (probably in the hpts but stopped the timer earlier)? */
7890 			if ((tp->snd_max == tp->snd_una) &&
7891 			    ((tp->t_flags & TF_DELACK) == 0) &&
7892 			    (rack->rc_inp->inp_in_hpts) &&
7893 			    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
7894 				/* keep alive not needed if we are hptsi output yet */
7895 				;
7896 			} else {
7897 				if (rack->rc_inp->inp_in_hpts) {
7898 					tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7899 					counter_u64_add(rack_per_timer_hole, 1);
7900 				}
7901 				rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0);
7902 			}
7903 			way_out = 1;
7904 		} else if (nxt_pkt == 0) {
7905 			/* Do we have the correct timer running? */
7906 			rack_timer_audit(tp, rack, &so->so_snd);
7907 			way_out = 2;
7908 		}
7909 	done_with_input:
7910 		rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
7911 		if (did_out)
7912 			rack->r_wanted_output = 0;
7913 #ifdef INVARIANTS
7914 		if (tp->t_inpcb == NULL) {
7915 			panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
7916 			      did_out,
7917 			      retval, tp, prev_state);
7918 		}
7919 #endif
7920 	}
7921 	return (retval);
7922 }
7923 
7924 void
7925 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
7926     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
7927 {
7928 	struct timeval tv;
7929 
7930 	/* First lets see if we have old packets */
7931 	if (tp->t_in_pkt) {
7932 		if (ctf_do_queued_segments(so, tp, 1)) {
7933 			m_freem(m);
7934 			return;
7935 		}
7936 	}
7937 	if (m->m_flags & M_TSTMP_LRO) {
7938 		tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
7939 		tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
7940 	} else {
7941 		/* Should not be should we kassert instead? */
7942 		tcp_get_usecs(&tv);
7943 	}
7944 	if(rack_do_segment_nounlock(m, th, so, tp,
7945 				    drop_hdrlen, tlen, iptos, 0, &tv) == 0)
7946 		INP_WUNLOCK(tp->t_inpcb);
7947 }
7948 
7949 struct rack_sendmap *
7950 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
7951 {
7952 	struct rack_sendmap *rsm = NULL;
7953 	int32_t idx;
7954 	uint32_t srtt = 0, thresh = 0, ts_low = 0;
7955 
7956 	/* Return the next guy to be re-transmitted */
7957 	if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
7958 		return (NULL);
7959 	}
7960 	if (tp->t_flags & TF_SENTFIN) {
7961 		/* retran the end FIN? */
7962 		return (NULL);
7963 	}
7964 	/* ok lets look at this one */
7965 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7966 	if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
7967 		goto check_it;
7968 	}
7969 	rsm = rack_find_lowest_rsm(rack);
7970 	if (rsm == NULL) {
7971 		return (NULL);
7972 	}
7973 check_it:
7974 	if (rsm->r_flags & RACK_ACKED) {
7975 		return (NULL);
7976 	}
7977 	if ((rsm->r_flags & RACK_SACK_PASSED) == 0) {
7978 		/* Its not yet ready */
7979 		return (NULL);
7980 	}
7981 	srtt = rack_grab_rtt(tp, rack);
7982 	idx = rsm->r_rtr_cnt - 1;
7983 	ts_low = rsm->r_tim_lastsent[idx];
7984 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
7985 	if ((tsused == ts_low) ||
7986 	    (TSTMP_LT(tsused, ts_low))) {
7987 		/* No time since sending */
7988 		return (NULL);
7989 	}
7990 	if ((tsused - ts_low) < thresh) {
7991 		/* It has not been long enough yet */
7992 		return (NULL);
7993 	}
7994 	if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
7995 	    ((rsm->r_flags & RACK_SACK_PASSED) &&
7996 	     (rack->sack_attack_disable == 0))) {
7997 		/*
7998 		 * We have passed the dup-ack threshold <or>
7999 		 * a SACK has indicated this is missing.
8000 		 * Note that if you are a declared attacker
8001 		 * it is only the dup-ack threshold that
8002 		 * will cause retransmits.
8003 		 */
8004 		/* log retransmit reason */
8005 		rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
8006 		return (rsm);
8007 	}
8008 	return (NULL);
8009 }
8010 
8011 static int32_t
8012 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len)
8013 {
8014 	int32_t slot = 0;
8015 
8016 	if ((rack->rack_per_of_gp == 0) ||
8017 	    (rack->rc_always_pace == 0)) {
8018 		/*
8019 		 * We use the most optimistic possible cwnd/srtt for
8020 		 * sending calculations. This will make our
8021 		 * calculation anticipate getting more through
8022 		 * quicker then possible. But thats ok we don't want
8023 		 * the peer to have a gap in data sending.
8024 		 */
8025 		uint32_t srtt, cwnd, tr_perms = 0;
8026 
8027 old_method:
8028 		if (rack->r_ctl.rc_rack_min_rtt)
8029 			srtt = rack->r_ctl.rc_rack_min_rtt;
8030 		else
8031 			srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
8032 		if (rack->r_ctl.rc_rack_largest_cwnd)
8033 			cwnd = rack->r_ctl.rc_rack_largest_cwnd;
8034 		else
8035 			cwnd = tp->snd_cwnd;
8036 		tr_perms = cwnd / srtt;
8037 		if (tr_perms == 0) {
8038 			tr_perms = ctf_fixed_maxseg(tp);
8039 		}
8040 		/*
8041 		 * Calculate how long this will take to drain, if
8042 		 * the calculation comes out to zero, thats ok we
8043 		 * will use send_a_lot to possibly spin around for
8044 		 * more increasing tot_len_this_send to the point
8045 		 * that its going to require a pace, or we hit the
8046 		 * cwnd. Which in that case we are just waiting for
8047 		 * a ACK.
8048 		 */
8049 		slot = len / tr_perms;
8050 		/* Now do we reduce the time so we don't run dry? */
8051 		if (slot && rack->rc_pace_reduce) {
8052 			int32_t reduce;
8053 
8054 			reduce = (slot / rack->rc_pace_reduce);
8055 			if (reduce < slot) {
8056 				slot -= reduce;
8057 			} else
8058 				slot = 0;
8059 		}
8060 	} else {
8061 		int cnt;
8062 		uint64_t bw_est, bw_raise, res, lentim;
8063 
8064 		bw_est = 0;
8065 		for (cnt=0; cnt<RACK_GP_HIST; cnt++) {
8066 			if ((rack->r_ctl.rc_gp_hist_filled == 0) &&
8067 			    (rack->r_ctl.rc_gp_history[cnt] == 0))
8068 				break;
8069 			bw_est += rack->r_ctl.rc_gp_history[cnt];
8070 		}
8071 		if (bw_est == 0) {
8072 			/*
8073 			 * No way yet to make a b/w estimate
8074 			 * (no goodput est yet).
8075 			 */
8076 			goto old_method;
8077 		}
8078 		/* Covert to bytes per second */
8079 		bw_est *= MSEC_IN_SECOND;
8080 		/*
8081 		 * Now ratchet it up by our percentage. Note
8082 		 * that the minimum you can do is 1 which would
8083 		 * get you 101% of the average last N goodput estimates.
8084 		 * The max you can do is 256 which would yeild you
8085 		 * 356% of the last N goodput estimates.
8086 		 */
8087 		bw_raise = bw_est * (uint64_t)rack->rack_per_of_gp;
8088 		bw_est += bw_raise;
8089 		/* average by the number we added */
8090 		bw_est /= cnt;
8091 		/* Now calculate a rate based on this b/w */
8092 		lentim = (uint64_t) len * (uint64_t)MSEC_IN_SECOND;
8093 		res = lentim / bw_est;
8094 		slot = (uint32_t)res;
8095 	}
8096 	if (rack->r_enforce_min_pace &&
8097 	    (slot == 0)) {
8098 		/* We are enforcing a minimum pace time of 1ms */
8099 		slot = rack->r_enforce_min_pace;
8100 	}
8101 	if (slot)
8102 		counter_u64_add(rack_calc_nonzero, 1);
8103 	else
8104 		counter_u64_add(rack_calc_zero, 1);
8105 	return (slot);
8106 }
8107 
8108 static int
8109 rack_output(struct tcpcb *tp)
8110 {
8111 	struct socket *so;
8112 	uint32_t recwin, sendwin;
8113 	uint32_t sb_offset;
8114 	int32_t len, flags, error = 0;
8115 	struct mbuf *m;
8116 	struct mbuf *mb;
8117 	uint32_t if_hw_tsomaxsegcount = 0;
8118 	uint32_t if_hw_tsomaxsegsize;
8119 	int32_t maxseg;
8120 	long tot_len_this_send = 0;
8121 	struct ip *ip = NULL;
8122 #ifdef TCPDEBUG
8123 	struct ipovly *ipov = NULL;
8124 #endif
8125 	struct udphdr *udp = NULL;
8126 	struct tcp_rack *rack;
8127 	struct tcphdr *th;
8128 	uint8_t pass = 0;
8129 	uint8_t wanted_cookie = 0;
8130 	u_char opt[TCP_MAXOLEN];
8131 	unsigned ipoptlen, optlen, hdrlen, ulen=0;
8132 	uint32_t rack_seq;
8133 
8134 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
8135 	unsigned ipsec_optlen = 0;
8136 
8137 #endif
8138 	int32_t idle, sendalot;
8139 	int32_t sub_from_prr = 0;
8140 	volatile int32_t sack_rxmit;
8141 	struct rack_sendmap *rsm = NULL;
8142 	int32_t tso, mtu;
8143 	struct tcpopt to;
8144 	int32_t slot = 0;
8145 	int32_t sup_rack = 0;
8146 	uint32_t cts;
8147 	uint8_t hpts_calling, new_data_tlp = 0, doing_tlp = 0;
8148 	int32_t do_a_prefetch;
8149 	int32_t prefetch_rsm = 0;
8150 	int force_tso = 0;
8151 	int32_t orig_len;
8152 	int32_t prefetch_so_done = 0;
8153 	struct tcp_log_buffer *lgb = NULL;
8154 	struct inpcb *inp;
8155 	struct sockbuf *sb;
8156 #ifdef INET6
8157 	struct ip6_hdr *ip6 = NULL;
8158 	int32_t isipv6;
8159 #endif
8160 	uint8_t filled_all = 0;
8161 	bool hw_tls = false;
8162 
8163 	/* setup and take the cache hits here */
8164 	rack = (struct tcp_rack *)tp->t_fb_ptr;
8165 	inp = rack->rc_inp;
8166 	so = inp->inp_socket;
8167 	sb = &so->so_snd;
8168 	kern_prefetch(sb, &do_a_prefetch);
8169 	do_a_prefetch = 1;
8170 
8171 #ifdef KERN_TLS
8172 	hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0;
8173 #endif
8174 
8175 	INP_WLOCK_ASSERT(inp);
8176 #ifdef TCP_OFFLOAD
8177 	if (tp->t_flags & TF_TOE)
8178 		return (tcp_offload_output(tp));
8179 #endif
8180 	maxseg = ctf_fixed_maxseg(tp);
8181 	/*
8182 	 * For TFO connections in SYN_RECEIVED, only allow the initial
8183 	 * SYN|ACK and those sent by the retransmit timer.
8184 	 */
8185 	if (IS_FASTOPEN(tp->t_flags) &&
8186 	    (tp->t_state == TCPS_SYN_RECEIVED) &&
8187 	    SEQ_GT(tp->snd_max, tp->snd_una) &&    /* initial SYN|ACK sent */
8188 	    (rack->r_ctl.rc_resend == NULL))         /* not a retransmit */
8189 		return (0);
8190 #ifdef INET6
8191 	if (rack->r_state) {
8192 		/* Use the cache line loaded if possible */
8193 		isipv6 = rack->r_is_v6;
8194 	} else {
8195 		isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
8196 	}
8197 #endif
8198 	cts = tcp_ts_getticks();
8199 	if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
8200 	    inp->inp_in_hpts) {
8201 		/*
8202 		 * We are on the hpts for some timer but not hptsi output.
8203 		 * Remove from the hpts unconditionally.
8204 		 */
8205 		rack_timer_cancel(tp, rack, cts, __LINE__);
8206 	}
8207 	/* Mark that we have called rack_output(). */
8208 	if ((rack->r_timer_override) ||
8209 	    (tp->t_flags & TF_FORCEDATA) ||
8210 	    (tp->t_state < TCPS_ESTABLISHED)) {
8211 		if (tp->t_inpcb->inp_in_hpts)
8212 			tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
8213 	} else if (tp->t_inpcb->inp_in_hpts) {
8214 		/*
8215 		 * On the hpts you can't pass even if ACKNOW is on, we will
8216 		 * when the hpts fires.
8217 		 */
8218 		counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
8219 		return (0);
8220 	}
8221 	hpts_calling = inp->inp_hpts_calls;
8222 	inp->inp_hpts_calls = 0;
8223 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
8224 		if (rack_process_timers(tp, rack, cts, hpts_calling)) {
8225 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
8226 			return (0);
8227 		}
8228 	}
8229 	rack->r_wanted_output = 0;
8230 	rack->r_timer_override = 0;
8231 	/*
8232 	 * For TFO connections in SYN_SENT or SYN_RECEIVED,
8233 	 * only allow the initial SYN or SYN|ACK and those sent
8234 	 * by the retransmit timer.
8235 	 */
8236 	if (IS_FASTOPEN(tp->t_flags) &&
8237 	    ((tp->t_state == TCPS_SYN_RECEIVED) ||
8238 	     (tp->t_state == TCPS_SYN_SENT)) &&
8239 	    SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
8240 	    (tp->t_rxtshift == 0))              /* not a retransmit */
8241 		return (0);
8242 	/*
8243 	 * Determine length of data that should be transmitted, and flags
8244 	 * that will be used. If there is some data or critical controls
8245 	 * (SYN, RST) to send, then transmit; otherwise, investigate
8246 	 * further.
8247 	 */
8248 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
8249 	if (tp->t_idle_reduce) {
8250 		if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
8251 			rack_cc_after_idle(tp);
8252 	}
8253 	tp->t_flags &= ~TF_LASTIDLE;
8254 	if (idle) {
8255 		if (tp->t_flags & TF_MORETOCOME) {
8256 			tp->t_flags |= TF_LASTIDLE;
8257 			idle = 0;
8258 		}
8259 	}
8260 again:
8261 	/*
8262 	 * If we've recently taken a timeout, snd_max will be greater than
8263 	 * snd_nxt.  There may be SACK information that allows us to avoid
8264 	 * resending already delivered data.  Adjust snd_nxt accordingly.
8265 	 */
8266 	sendalot = 0;
8267 	cts = tcp_ts_getticks();
8268 	tso = 0;
8269 	mtu = 0;
8270 	sb_offset = tp->snd_max - tp->snd_una;
8271 	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
8272 
8273 	flags = tcp_outflags[tp->t_state];
8274 	while (rack->rc_free_cnt < rack_free_cache) {
8275 		rsm = rack_alloc(rack);
8276 		if (rsm == NULL) {
8277 			if (inp->inp_hpts_calls)
8278 				/* Retry in a ms */
8279 				slot = 1;
8280 			goto just_return_nolock;
8281 		}
8282 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
8283 		rack->rc_free_cnt++;
8284 		rsm = NULL;
8285 	}
8286 	if (inp->inp_hpts_calls)
8287 		inp->inp_hpts_calls = 0;
8288 	sack_rxmit = 0;
8289 	len = 0;
8290 	rsm = NULL;
8291 	if (flags & TH_RST) {
8292 		SOCKBUF_LOCK(sb);
8293 		goto send;
8294 	}
8295 	if (rack->r_ctl.rc_tlpsend) {
8296 		/* Tail loss probe */
8297 		long cwin;
8298 		long tlen;
8299 
8300 		doing_tlp = 1;
8301 		/*
8302 		 * Check if we can do a TLP with a RACK'd packet
8303 		 * this can happen if we are not doing the rack
8304 		 * cheat and we skipped to a TLP and it
8305 		 * went off.
8306 		 */
8307 		rsm = tcp_rack_output(tp, rack, cts);
8308 		if (rsm == NULL)
8309 			rsm = rack->r_ctl.rc_tlpsend;
8310 		rack->r_ctl.rc_tlpsend = NULL;
8311 		sack_rxmit = 1;
8312 		tlen = rsm->r_end - rsm->r_start;
8313 		if (tlen > ctf_fixed_maxseg(tp))
8314 			tlen = ctf_fixed_maxseg(tp);
8315 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
8316 		    ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
8317 		    __func__, __LINE__,
8318 		    rsm->r_start, tp->snd_una, tp, rack, rsm));
8319 		sb_offset = rsm->r_start - tp->snd_una;
8320 		cwin = min(tp->snd_wnd, tlen);
8321 		len = cwin;
8322 	} else if (rack->r_ctl.rc_resend) {
8323 		/* Retransmit timer */
8324 		rsm = rack->r_ctl.rc_resend;
8325 		rack->r_ctl.rc_resend = NULL;
8326 		len = rsm->r_end - rsm->r_start;
8327 		sack_rxmit = 1;
8328 		sendalot = 0;
8329 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
8330 		    ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
8331 		    __func__, __LINE__,
8332 		    rsm->r_start, tp->snd_una, tp, rack, rsm));
8333 		sb_offset = rsm->r_start - tp->snd_una;
8334 		if (len >= ctf_fixed_maxseg(tp)) {
8335 			len = ctf_fixed_maxseg(tp);
8336 		}
8337 	} else if ((rack->rc_in_persist == 0) &&
8338 	    ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
8339 		int maxseg;
8340 
8341 		maxseg = ctf_fixed_maxseg(tp);
8342 		if ((!IN_RECOVERY(tp->t_flags)) &&
8343 		    ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
8344 			/* Enter recovery if not induced by a time-out */
8345 			rack->r_ctl.rc_rsm_start = rsm->r_start;
8346 			rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
8347 			rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
8348 			rack_cong_signal(tp, NULL, CC_NDUPACK);
8349 			/*
8350 			 * When we enter recovery we need to assure we send
8351 			 * one packet.
8352 			 */
8353 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
8354 			rack_log_to_prr(rack, 13);
8355 		}
8356 #ifdef INVARIANTS
8357 		if (SEQ_LT(rsm->r_start, tp->snd_una)) {
8358 			panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
8359 			    tp, rack, rsm, rsm->r_start, tp->snd_una);
8360 		}
8361 #endif
8362 		len = rsm->r_end - rsm->r_start;
8363 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
8364 		    ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
8365 		    __func__, __LINE__,
8366 		    rsm->r_start, tp->snd_una, tp, rack, rsm));
8367 		sb_offset = rsm->r_start - tp->snd_una;
8368 		/* Can we send it within the PRR boundary? */
8369 		if ((rack->use_rack_cheat == 0) && (len > rack->r_ctl.rc_prr_sndcnt)) {
8370 			/* It does not fit */
8371 			if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) > len) &&
8372 			    (rack->r_ctl.rc_prr_sndcnt < maxseg)) {
8373 				/*
8374 				 * prr is less than a segment, we
8375 				 * have more acks due in besides
8376 				 * what we need to resend. Lets not send
8377 				 * to avoid sending small pieces of
8378 				 * what we need to retransmit.
8379 				 */
8380 				len = 0;
8381 				goto just_return_nolock;
8382 			}
8383 			len = rack->r_ctl.rc_prr_sndcnt;
8384 		}
8385 		sendalot = 0;
8386 		if (len >= maxseg) {
8387 			len = maxseg;
8388 		}
8389 		if (len > 0) {
8390 			sub_from_prr = 1;
8391 			sack_rxmit = 1;
8392 			TCPSTAT_INC(tcps_sack_rexmits);
8393 			TCPSTAT_ADD(tcps_sack_rexmit_bytes,
8394 			    min(len, ctf_fixed_maxseg(tp)));
8395 			counter_u64_add(rack_rtm_prr_retran, 1);
8396 		}
8397 	}
8398 	/*
8399 	 * Enforce a connection sendmap count limit if set
8400 	 * as long as we are not retransmiting.
8401 	 */
8402 	if ((rsm == NULL) &&
8403 	    (rack->do_detection == 0) &&
8404 	    (rack_tcp_map_entries_limit > 0) &&
8405 	    (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) {
8406 		counter_u64_add(rack_to_alloc_limited, 1);
8407 		if (!rack->alloc_limit_reported) {
8408 			rack->alloc_limit_reported = 1;
8409 			counter_u64_add(rack_alloc_limited_conns, 1);
8410 		}
8411 		goto just_return_nolock;
8412 	}
8413 	if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
8414 		/* we are retransmitting the fin */
8415 		len--;
8416 		if (len) {
8417 			/*
8418 			 * When retransmitting data do *not* include the
8419 			 * FIN. This could happen from a TLP probe.
8420 			 */
8421 			flags &= ~TH_FIN;
8422 		}
8423 	}
8424 #ifdef INVARIANTS
8425 	/* For debugging */
8426 	rack->r_ctl.rc_rsm_at_retran = rsm;
8427 #endif
8428 	/*
8429 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
8430 	 * state flags.
8431 	 */
8432 	if (tp->t_flags & TF_NEEDFIN)
8433 		flags |= TH_FIN;
8434 	if (tp->t_flags & TF_NEEDSYN)
8435 		flags |= TH_SYN;
8436 	if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
8437 		void *end_rsm;
8438 		end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
8439 		if (end_rsm)
8440 			kern_prefetch(end_rsm, &prefetch_rsm);
8441 		prefetch_rsm = 1;
8442 	}
8443 	SOCKBUF_LOCK(sb);
8444 	/*
8445 	 * If in persist timeout with window of 0, send 1 byte. Otherwise,
8446 	 * if window is small but nonzero and time TF_SENTFIN expired, we
8447 	 * will send what we can and go to transmit state.
8448 	 */
8449 	if (tp->t_flags & TF_FORCEDATA) {
8450 		if (sendwin == 0) {
8451 			/*
8452 			 * If we still have some data to send, then clear
8453 			 * the FIN bit.  Usually this would happen below
8454 			 * when it realizes that we aren't sending all the
8455 			 * data.  However, if we have exactly 1 byte of
8456 			 * unsent data, then it won't clear the FIN bit
8457 			 * below, and if we are in persist state, we wind up
8458 			 * sending the packet without recording that we sent
8459 			 * the FIN bit.
8460 			 *
8461 			 * We can't just blindly clear the FIN bit, because
8462 			 * if we don't have any more data to send then the
8463 			 * probe will be the FIN itself.
8464 			 */
8465 			if (sb_offset < sbused(sb))
8466 				flags &= ~TH_FIN;
8467 			sendwin = 1;
8468 		} else {
8469 			if ((rack->rc_in_persist != 0) &&
8470  			    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
8471 					       rack->r_ctl.rc_pace_min_segs)))
8472 				rack_exit_persist(tp, rack);
8473 			/*
8474 			 * If we are dropping persist mode then we need to
8475 			 * correct snd_nxt/snd_max and off.
8476 			 */
8477 			tp->snd_nxt = tp->snd_max;
8478 			sb_offset = tp->snd_nxt - tp->snd_una;
8479 		}
8480 	}
8481 	/*
8482 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
8483 	 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
8484 	 * negative length.  This can also occur when TCP opens up its
8485 	 * congestion window while receiving additional duplicate acks after
8486 	 * fast-retransmit because TCP will reset snd_nxt to snd_max after
8487 	 * the fast-retransmit.
8488 	 *
8489 	 * In the normal retransmit-FIN-only case, however, snd_nxt will be
8490 	 * set to snd_una, the sb_offset will be 0, and the length may wind
8491 	 * up 0.
8492 	 *
8493 	 * If sack_rxmit is true we are retransmitting from the scoreboard
8494 	 * in which case len is already set.
8495 	 */
8496 	if (sack_rxmit == 0) {
8497 		uint32_t avail;
8498 
8499 		avail = sbavail(sb);
8500 		if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
8501 			sb_offset = tp->snd_nxt - tp->snd_una;
8502 		else
8503 			sb_offset = 0;
8504 		if (IN_RECOVERY(tp->t_flags) == 0) {
8505 			if (rack->r_ctl.rc_tlp_new_data) {
8506 				/* TLP is forcing out new data */
8507 				if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
8508 					rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
8509 				}
8510 				if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
8511 					len = tp->snd_wnd;
8512 				else
8513 					len = rack->r_ctl.rc_tlp_new_data;
8514 				rack->r_ctl.rc_tlp_new_data = 0;
8515 				new_data_tlp = doing_tlp = 1;
8516 			} else {
8517 				if (sendwin > avail) {
8518 					/* use the available */
8519 					if (avail > sb_offset) {
8520 						len = (int32_t)(avail - sb_offset);
8521 					} else {
8522 						len = 0;
8523 					}
8524 				} else {
8525 					if (sendwin > sb_offset) {
8526 						len = (int32_t)(sendwin - sb_offset);
8527 					} else {
8528 						len = 0;
8529 					}
8530 				}
8531 			}
8532 		} else {
8533 			uint32_t outstanding;
8534 
8535 			/*
8536 			 * We are inside of a SACK recovery episode and are
8537 			 * sending new data, having retransmitted all the
8538 			 * data possible so far in the scoreboard.
8539 			 */
8540 			outstanding = tp->snd_max - tp->snd_una;
8541 			if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
8542 				if (tp->snd_wnd > outstanding) {
8543 					len = tp->snd_wnd - outstanding;
8544 					/* Check to see if we have the data */
8545 					if (((sb_offset + len) > avail) &&
8546 					    (avail > sb_offset))
8547 						len = avail - sb_offset;
8548 					else
8549 						len = 0;
8550 				} else
8551 					len = 0;
8552 			} else if (avail > sb_offset)
8553 				len = avail - sb_offset;
8554 			else
8555 				len = 0;
8556 			if (len > 0) {
8557 				if (len > rack->r_ctl.rc_prr_sndcnt)
8558 					len = rack->r_ctl.rc_prr_sndcnt;
8559 				if (len > 0) {
8560 					sub_from_prr = 1;
8561 					counter_u64_add(rack_rtm_prr_newdata, 1);
8562 				}
8563 			}
8564 			if (len > ctf_fixed_maxseg(tp)) {
8565 				/*
8566 				 * We should never send more than a MSS when
8567 				 * retransmitting or sending new data in prr
8568 				 * mode unless the override flag is on. Most
8569 				 * likely the PRR algorithm is not going to
8570 				 * let us send a lot as well :-)
8571 				 */
8572 				if (rack->r_ctl.rc_prr_sendalot == 0)
8573 					len = ctf_fixed_maxseg(tp);
8574 			} else if (len < ctf_fixed_maxseg(tp)) {
8575 				/*
8576 				 * Do we send any? The idea here is if the
8577 				 * send empty's the socket buffer we want to
8578 				 * do it. However if not then lets just wait
8579 				 * for our prr_sndcnt to get bigger.
8580 				 */
8581 				long leftinsb;
8582 
8583 				leftinsb = sbavail(sb) - sb_offset;
8584 				if (leftinsb > len) {
8585 					/* This send does not empty the sb */
8586 					len = 0;
8587 				}
8588 			}
8589 		}
8590 	}
8591 	if (prefetch_so_done == 0) {
8592 		kern_prefetch(so, &prefetch_so_done);
8593 		prefetch_so_done = 1;
8594 	}
8595 	/*
8596 	 * Lop off SYN bit if it has already been sent.  However, if this is
8597 	 * SYN-SENT state and if segment contains data and if we don't know
8598 	 * that foreign host supports TAO, suppress sending segment.
8599 	 */
8600 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
8601 	    ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
8602 		if (tp->t_state != TCPS_SYN_RECEIVED)
8603 			flags &= ~TH_SYN;
8604 		/*
8605 		 * When sending additional segments following a TFO SYN|ACK,
8606 		 * do not include the SYN bit.
8607 		 */
8608 		if (IS_FASTOPEN(tp->t_flags) &&
8609 		    (tp->t_state == TCPS_SYN_RECEIVED))
8610 			flags &= ~TH_SYN;
8611 		sb_offset--, len++;
8612 	}
8613 	/*
8614 	 * Be careful not to send data and/or FIN on SYN segments. This
8615 	 * measure is needed to prevent interoperability problems with not
8616 	 * fully conformant TCP implementations.
8617 	 */
8618 	if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
8619 		len = 0;
8620 		flags &= ~TH_FIN;
8621 	}
8622 	/*
8623 	 * On TFO sockets, ensure no data is sent in the following cases:
8624 	 *
8625 	 *  - When retransmitting SYN|ACK on a passively-created socket
8626 	 *
8627 	 *  - When retransmitting SYN on an actively created socket
8628 	 *
8629 	 *  - When sending a zero-length cookie (cookie request) on an
8630 	 *    actively created socket
8631 	 *
8632 	 *  - When the socket is in the CLOSED state (RST is being sent)
8633 	 */
8634 	if (IS_FASTOPEN(tp->t_flags) &&
8635 	    (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
8636 	     ((tp->t_state == TCPS_SYN_SENT) &&
8637 	      (tp->t_tfo_client_cookie_len == 0)) ||
8638 	     (flags & TH_RST))) {
8639 		sack_rxmit = 0;
8640 		len = 0;
8641 	}
8642 	/* Without fast-open there should never be data sent on a SYN */
8643 	if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags)))
8644 		len = 0;
8645 	orig_len = len;
8646 	if (len <= 0) {
8647 		/*
8648 		 * If FIN has been sent but not acked, but we haven't been
8649 		 * called to retransmit, len will be < 0.  Otherwise, window
8650 		 * shrank after we sent into it.  If window shrank to 0,
8651 		 * cancel pending retransmit, pull snd_nxt back to (closed)
8652 		 * window, and set the persist timer if it isn't already
8653 		 * going.  If the window didn't close completely, just wait
8654 		 * for an ACK.
8655 		 *
8656 		 * We also do a general check here to ensure that we will
8657 		 * set the persist timer when we have data to send, but a
8658 		 * 0-byte window. This makes sure the persist timer is set
8659 		 * even if the packet hits one of the "goto send" lines
8660 		 * below.
8661 		 */
8662 		len = 0;
8663 		if ((tp->snd_wnd == 0) &&
8664 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
8665 		    (tp->snd_una == tp->snd_max) &&
8666 		    (sb_offset < (int)sbavail(sb))) {
8667 			tp->snd_nxt = tp->snd_una;
8668 			rack_enter_persist(tp, rack, cts);
8669 		}
8670 	} else if ((rsm == NULL) &&
8671 		   ((doing_tlp == 0) || (new_data_tlp == 1)) &&
8672 		   (len < rack->r_ctl.rc_pace_max_segs)) {
8673 		/*
8674 		 * We are not sending a full segment for
8675 		 * some reason. Should we not send anything (think
8676 		 * sws or persists)?
8677 		 */
8678 		if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
8679 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
8680 		    (len < (int)(sbavail(sb) - sb_offset))) {
8681 			/*
8682 			 * Here the rwnd is less than
8683 			 * the pacing size, this is not a retransmit,
8684 			 * we are established and
8685 			 * the send is not the last in the socket buffer
8686 			 * we send nothing, and may enter persists.
8687 			 */
8688 			len = 0;
8689 			if (tp->snd_max == tp->snd_una) {
8690 				/*
8691 				 * Nothing out we can
8692 				 * go into persists.
8693 				 */
8694 				rack_enter_persist(tp, rack, cts);
8695 				tp->snd_nxt = tp->snd_una;
8696 			}
8697 		} else if ((tp->snd_cwnd >= max(rack->r_ctl.rc_pace_min_segs, (maxseg * 4))) &&
8698 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) &&
8699 			   (len < (int)(sbavail(sb) - sb_offset)) &&
8700 			   (len < rack->r_ctl.rc_pace_min_segs)) {
8701 			/*
8702 			 * Here we are not retransmitting, and
8703 			 * the cwnd is not so small that we could
8704 			 * not send at least a min size (rxt timer
8705 			 * not having gone off), We have 2 segments or
8706 			 * more already in flight, its not the tail end
8707 			 * of the socket buffer  and the cwnd is blocking
8708 			 * us from sending out a minimum pacing segment size.
8709 			 * Lets not send anything.
8710 			 */
8711 			len = 0;
8712 		} else if (((tp->snd_wnd - ctf_outstanding(tp)) <
8713 			    min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
8714 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) &&
8715 			   (len < (int)(sbavail(sb) - sb_offset)) &&
8716 			   (TCPS_HAVEESTABLISHED(tp->t_state))) {
8717 			/*
8718 			 * Here we have a send window but we have
8719 			 * filled it up and we can't send another pacing segment.
8720 			 * We also have in flight more than 2 segments
8721 			 * and we are not completing the sb i.e. we allow
8722 			 * the last bytes of the sb to go out even if
8723 			 * its not a full pacing segment.
8724 			 */
8725 			len = 0;
8726 		}
8727 	}
8728 	/* len will be >= 0 after this point. */
8729 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
8730 	tcp_sndbuf_autoscale(tp, so, sendwin);
8731 	/*
8732 	 * Decide if we can use TCP Segmentation Offloading (if supported by
8733 	 * hardware).
8734 	 *
8735 	 * TSO may only be used if we are in a pure bulk sending state.  The
8736 	 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
8737 	 * options prevent using TSO.  With TSO the TCP header is the same
8738 	 * (except for the sequence number) for all generated packets.  This
8739 	 * makes it impossible to transmit any options which vary per
8740 	 * generated segment or packet.
8741 	 *
8742 	 * IPv4 handling has a clear separation of ip options and ip header
8743 	 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
8744 	 * the right thing below to provide length of just ip options and thus
8745 	 * checking for ipoptlen is enough to decide if ip options are present.
8746 	 */
8747 
8748 #ifdef INET6
8749 	if (isipv6)
8750 		ipoptlen = ip6_optlen(tp->t_inpcb);
8751 	else
8752 #endif
8753 		if (tp->t_inpcb->inp_options)
8754 			ipoptlen = tp->t_inpcb->inp_options->m_len -
8755 			    offsetof(struct ipoption, ipopt_list);
8756 		else
8757 			ipoptlen = 0;
8758 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
8759 	/*
8760 	 * Pre-calculate here as we save another lookup into the darknesses
8761 	 * of IPsec that way and can actually decide if TSO is ok.
8762 	 */
8763 #ifdef INET6
8764 	if (isipv6 && IPSEC_ENABLED(ipv6))
8765 		ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
8766 #ifdef INET
8767 	else
8768 #endif
8769 #endif				/* INET6 */
8770 #ifdef INET
8771 	if (IPSEC_ENABLED(ipv4))
8772 		ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
8773 #endif				/* INET */
8774 #endif
8775 
8776 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
8777 	ipoptlen += ipsec_optlen;
8778 #endif
8779 	if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > ctf_fixed_maxseg(tp) &&
8780 	    (tp->t_port == 0) &&
8781 	    ((tp->t_flags & TF_SIGNATURE) == 0) &&
8782 	    tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
8783 	    ipoptlen == 0)
8784 		tso = 1;
8785 	{
8786 		uint32_t outstanding;
8787 
8788 		outstanding = tp->snd_max - tp->snd_una;
8789 		if (tp->t_flags & TF_SENTFIN) {
8790 			/*
8791 			 * If we sent a fin, snd_max is 1 higher than
8792 			 * snd_una
8793 			 */
8794 			outstanding--;
8795 		}
8796 		if (sack_rxmit) {
8797 			if ((rsm->r_flags & RACK_HAS_FIN) == 0)
8798 				flags &= ~TH_FIN;
8799 		} else {
8800 			if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
8801 			    sbused(sb)))
8802 				flags &= ~TH_FIN;
8803 		}
8804 	}
8805 	recwin = sbspace(&so->so_rcv);
8806 
8807 	/*
8808 	 * Sender silly window avoidance.   We transmit under the following
8809 	 * conditions when len is non-zero:
8810 	 *
8811 	 * - We have a full segment (or more with TSO) - This is the last
8812 	 * buffer in a write()/send() and we are either idle or running
8813 	 * NODELAY - we've timed out (e.g. persist timer) - we have more
8814 	 * then 1/2 the maximum send window's worth of data (receiver may be
8815 	 * limited the window size) - we need to retransmit
8816 	 */
8817 	if (len) {
8818 		if (len >= ctf_fixed_maxseg(tp)) {
8819 			pass = 1;
8820 			goto send;
8821 		}
8822 		/*
8823 		 * NOTE! on localhost connections an 'ack' from the remote
8824 		 * end may occur synchronously with the output and cause us
8825 		 * to flush a buffer queued with moretocome.  XXX
8826 		 *
8827 		 */
8828 		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
8829 		    (idle || (tp->t_flags & TF_NODELAY)) &&
8830 		    ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) &&
8831 		    (tp->t_flags & TF_NOPUSH) == 0) {
8832 			pass = 2;
8833 			goto send;
8834 		}
8835 		if (tp->t_flags & TF_FORCEDATA) {	/* typ. timeout case */
8836 			pass = 3;
8837 			goto send;
8838 		}
8839 		if ((tp->snd_una == tp->snd_max) && len) {	/* Nothing outstanding */
8840 			goto send;
8841 		}
8842 		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
8843 			pass = 4;
8844 			goto send;
8845 		}
8846 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {	/* retransmit case */
8847 			pass = 5;
8848 			goto send;
8849 		}
8850 		if (sack_rxmit) {
8851 			pass = 6;
8852 			goto send;
8853 		}
8854 	}
8855 	/*
8856 	 * Sending of standalone window updates.
8857 	 *
8858 	 * Window updates are important when we close our window due to a
8859 	 * full socket buffer and are opening it again after the application
8860 	 * reads data from it.  Once the window has opened again and the
8861 	 * remote end starts to send again the ACK clock takes over and
8862 	 * provides the most current window information.
8863 	 *
8864 	 * We must avoid the silly window syndrome whereas every read from
8865 	 * the receive buffer, no matter how small, causes a window update
8866 	 * to be sent.  We also should avoid sending a flurry of window
8867 	 * updates when the socket buffer had queued a lot of data and the
8868 	 * application is doing small reads.
8869 	 *
8870 	 * Prevent a flurry of pointless window updates by only sending an
8871 	 * update when we can increase the advertized window by more than
8872 	 * 1/4th of the socket buffer capacity.  When the buffer is getting
8873 	 * full or is very small be more aggressive and send an update
8874 	 * whenever we can increase by two mss sized segments. In all other
8875 	 * situations the ACK's to new incoming data will carry further
8876 	 * window increases.
8877 	 *
8878 	 * Don't send an independent window update if a delayed ACK is
8879 	 * pending (it will get piggy-backed on it) or the remote side
8880 	 * already has done a half-close and won't send more data.  Skip
8881 	 * this if the connection is in T/TCP half-open state.
8882 	 */
8883 	if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
8884 	    !(tp->t_flags & TF_DELACK) &&
8885 	    !TCPS_HAVERCVDFIN(tp->t_state)) {
8886 		/*
8887 		 * "adv" is the amount we could increase the window, taking
8888 		 * into account that we are limited by TCP_MAXWIN <<
8889 		 * tp->rcv_scale.
8890 		 */
8891 		int32_t adv;
8892 		int oldwin;
8893 
8894 		adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
8895 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
8896 			oldwin = (tp->rcv_adv - tp->rcv_nxt);
8897 			adv -= oldwin;
8898 		} else
8899 			oldwin = 0;
8900 
8901 		/*
8902 		 * If the new window size ends up being the same as the old
8903 		 * size when it is scaled, then don't force a window update.
8904 		 */
8905 		if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
8906 			goto dontupdate;
8907 
8908 		if (adv >= (int32_t)(2 * ctf_fixed_maxseg(tp)) &&
8909 		    (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
8910 		    recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
8911 		     so->so_rcv.sb_hiwat <= 8 * ctf_fixed_maxseg(tp))) {
8912 			pass = 7;
8913 			goto send;
8914 		}
8915 		if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat)
8916 			goto send;
8917 	}
8918 dontupdate:
8919 
8920 	/*
8921 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
8922 	 * is also a catch-all for the retransmit timer timeout case.
8923 	 */
8924 	if (tp->t_flags & TF_ACKNOW) {
8925 		pass = 8;
8926 		goto send;
8927 	}
8928 	if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
8929 		pass = 9;
8930 		goto send;
8931 	}
8932 	if (SEQ_GT(tp->snd_up, tp->snd_una)) {
8933 		pass = 10;
8934 		goto send;
8935 	}
8936 	/*
8937 	 * If our state indicates that FIN should be sent and we have not
8938 	 * yet done so, then we need to send.
8939 	 */
8940 	if ((flags & TH_FIN) &&
8941 	    (tp->snd_nxt == tp->snd_una)) {
8942 		pass = 11;
8943 		goto send;
8944 	}
8945 	/*
8946 	 * No reason to send a segment, just return.
8947 	 */
8948 just_return:
8949 	SOCKBUF_UNLOCK(sb);
8950 just_return_nolock:
8951 	if (tot_len_this_send == 0)
8952 		counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
8953 	if (slot) {
8954 		/* set the rack tcb into the slot N */
8955 		counter_u64_add(rack_paced_segments, 1);
8956 	} else if (tot_len_this_send) {
8957 		counter_u64_add(rack_unpaced_segments, 1);
8958 	}
8959 	/* Check if we need to go into persists or not */
8960 	if ((rack->rc_in_persist == 0) &&
8961 	    (tp->snd_max == tp->snd_una) &&
8962 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
8963 	    sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
8964 	    (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd) &&
8965 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs))) {
8966 		/* Yes lets make sure to move to persist before timer-start */
8967 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
8968 	}
8969 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
8970 	rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling);
8971 	tp->t_flags &= ~TF_FORCEDATA;
8972 	return (0);
8973 
8974 send:
8975 	if ((flags & TH_FIN) &&
8976 	    sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
8977 		/*
8978 		 * We do not transmit a FIN
8979 		 * with data outstanding. We
8980 		 * need to make it so all data
8981 		 * is acked first.
8982 		 */
8983 		flags &= ~TH_FIN;
8984 	}
8985 	if (doing_tlp == 0) {
8986 		/*
8987 		 * Data not a TLP, and its not the rxt firing. If it is the
8988 		 * rxt firing, we want to leave the tlp_in_progress flag on
8989 		 * so we don't send another TLP. It has to be a rack timer
8990 		 * or normal send (response to acked data) to clear the tlp
8991 		 * in progress flag.
8992 		 */
8993 		rack->rc_tlp_in_progress = 0;
8994 	}
8995 	SOCKBUF_LOCK_ASSERT(sb);
8996 	if (len > 0) {
8997 		if (len >= ctf_fixed_maxseg(tp))
8998 			tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
8999 		else
9000 			tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
9001 	}
9002 	/*
9003 	 * Before ESTABLISHED, force sending of initial options unless TCP
9004 	 * set not to do any options. NOTE: we assume that the IP/TCP header
9005 	 * plus TCP options always fit in a single mbuf, leaving room for a
9006 	 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
9007 	 * + optlen <= MCLBYTES
9008 	 */
9009 	optlen = 0;
9010 #ifdef INET6
9011 	if (isipv6)
9012 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
9013 	else
9014 #endif
9015 		hdrlen = sizeof(struct tcpiphdr);
9016 
9017 	/*
9018 	 * Compute options for segment. We only have to care about SYN and
9019 	 * established connection segments.  Options for SYN-ACK segments
9020 	 * are handled in TCP syncache.
9021 	 */
9022 	to.to_flags = 0;
9023 	if ((tp->t_flags & TF_NOOPT) == 0) {
9024 		/* Maximum segment size. */
9025 		if (flags & TH_SYN) {
9026 			tp->snd_nxt = tp->iss;
9027 			to.to_mss = tcp_mssopt(&inp->inp_inc);
9028 #ifdef NETFLIX_TCPOUDP
9029 			if (tp->t_port)
9030 				to.to_mss -= V_tcp_udp_tunneling_overhead;
9031 #endif
9032 			to.to_flags |= TOF_MSS;
9033 
9034 			/*
9035 			 * On SYN or SYN|ACK transmits on TFO connections,
9036 			 * only include the TFO option if it is not a
9037 			 * retransmit, as the presence of the TFO option may
9038 			 * have caused the original SYN or SYN|ACK to have
9039 			 * been dropped by a middlebox.
9040 			 */
9041 			if (IS_FASTOPEN(tp->t_flags) &&
9042 			    (tp->t_rxtshift == 0)) {
9043 				if (tp->t_state == TCPS_SYN_RECEIVED) {
9044 					to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
9045 					to.to_tfo_cookie =
9046 					    (u_int8_t *)&tp->t_tfo_cookie.server;
9047 					to.to_flags |= TOF_FASTOPEN;
9048 					wanted_cookie = 1;
9049 				} else if (tp->t_state == TCPS_SYN_SENT) {
9050 					to.to_tfo_len =
9051 					    tp->t_tfo_client_cookie_len;
9052 					to.to_tfo_cookie =
9053 					    tp->t_tfo_cookie.client;
9054 					to.to_flags |= TOF_FASTOPEN;
9055 					wanted_cookie = 1;
9056 					/*
9057 					 * If we wind up having more data to
9058 					 * send with the SYN than can fit in
9059 					 * one segment, don't send any more
9060 					 * until the SYN|ACK comes back from
9061 					 * the other end.
9062 					 */
9063 					sendalot = 0;
9064 				}
9065 			}
9066 		}
9067 		/* Window scaling. */
9068 		if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
9069 			to.to_wscale = tp->request_r_scale;
9070 			to.to_flags |= TOF_SCALE;
9071 		}
9072 		/* Timestamps. */
9073 		if ((tp->t_flags & TF_RCVD_TSTMP) ||
9074 		    ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
9075 			to.to_tsval = cts + tp->ts_offset;
9076 			to.to_tsecr = tp->ts_recent;
9077 			to.to_flags |= TOF_TS;
9078 		}
9079 		/* Set receive buffer autosizing timestamp. */
9080 		if (tp->rfbuf_ts == 0 &&
9081 		    (so->so_rcv.sb_flags & SB_AUTOSIZE))
9082 			tp->rfbuf_ts = tcp_ts_getticks();
9083 		/* Selective ACK's. */
9084 		if (flags & TH_SYN)
9085 			to.to_flags |= TOF_SACKPERM;
9086 		else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
9087 		    tp->rcv_numsacks > 0) {
9088 			to.to_flags |= TOF_SACK;
9089 			to.to_nsacks = tp->rcv_numsacks;
9090 			to.to_sacks = (u_char *)tp->sackblks;
9091 		}
9092 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
9093 		/* TCP-MD5 (RFC2385). */
9094 		if (tp->t_flags & TF_SIGNATURE)
9095 			to.to_flags |= TOF_SIGNATURE;
9096 #endif				/* TCP_SIGNATURE */
9097 
9098 		/* Processing the options. */
9099 		hdrlen += optlen = tcp_addoptions(&to, opt);
9100 		/*
9101 		 * If we wanted a TFO option to be added, but it was unable
9102 		 * to fit, ensure no data is sent.
9103 		 */
9104 		if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
9105 		    !(to.to_flags & TOF_FASTOPEN))
9106 			len = 0;
9107 	}
9108 #ifdef NETFLIX_TCPOUDP
9109 	if (tp->t_port) {
9110 		if (V_tcp_udp_tunneling_port == 0) {
9111 			/* The port was removed?? */
9112 			SOCKBUF_UNLOCK(&so->so_snd);
9113 			return (EHOSTUNREACH);
9114 		}
9115 		hdrlen += sizeof(struct udphdr);
9116 	}
9117 #endif
9118 #ifdef INET6
9119 	if (isipv6)
9120 		ipoptlen = ip6_optlen(tp->t_inpcb);
9121 	else
9122 #endif
9123 	if (tp->t_inpcb->inp_options)
9124 		ipoptlen = tp->t_inpcb->inp_options->m_len -
9125 		    offsetof(struct ipoption, ipopt_list);
9126 	else
9127 		ipoptlen = 0;
9128 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
9129 	ipoptlen += ipsec_optlen;
9130 #endif
9131 
9132 #ifdef KERN_TLS
9133  	/* force TSO for so TLS offload can get mss */
9134  	if (sb->sb_flags & SB_TLS_IFNET) {
9135  		force_tso = 1;
9136  	}
9137 #endif
9138 	/*
9139 	 * Adjust data length if insertion of options will bump the packet
9140 	 * length beyond the t_maxseg length. Clear the FIN bit because we
9141 	 * cut off the tail of the segment.
9142 	 */
9143 	if (len + optlen + ipoptlen > tp->t_maxseg) {
9144 		if (tso) {
9145 			uint32_t if_hw_tsomax;
9146 			uint32_t moff;
9147 			int32_t max_len;
9148 
9149 			/* extract TSO information */
9150 			if_hw_tsomax = tp->t_tsomax;
9151 			if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
9152 			if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
9153 			KASSERT(ipoptlen == 0,
9154 			    ("%s: TSO can't do IP options", __func__));
9155 
9156 			/*
9157 			 * Check if we should limit by maximum payload
9158 			 * length:
9159 			 */
9160 			if (if_hw_tsomax != 0) {
9161 				/* compute maximum TSO length */
9162 				max_len = (if_hw_tsomax - hdrlen -
9163 				    max_linkhdr);
9164 				if (max_len <= 0) {
9165 					len = 0;
9166 				} else if (len > max_len) {
9167 					sendalot = 1;
9168 					len = max_len;
9169 				}
9170 			}
9171 			/*
9172 			 * Prevent the last segment from being fractional
9173 			 * unless the send sockbuf can be emptied:
9174 			 */
9175 			max_len = (tp->t_maxseg - optlen);
9176 			if (((sb_offset + len) < sbavail(sb)) &&
9177 			    (hw_tls == 0)) {
9178 				moff = len % (u_int)max_len;
9179 				if (moff != 0) {
9180 					len -= moff;
9181 					sendalot = 1;
9182 				}
9183 			}
9184                         /*
9185 			 * In case there are too many small fragments don't
9186 			 * use TSO:
9187 			 */
9188 			if (len <= maxseg) {
9189 				len = max_len;
9190 				sendalot = 1;
9191 				tso = 0;
9192 			}
9193 			/*
9194 			 * Send the FIN in a separate segment after the bulk
9195 			 * sending is done. We don't trust the TSO
9196 			 * implementations to clear the FIN flag on all but
9197 			 * the last segment.
9198 			 */
9199 			if (tp->t_flags & TF_NEEDFIN)
9200 				sendalot = 1;
9201 
9202 		} else {
9203 			if (optlen + ipoptlen >= tp->t_maxseg) {
9204 				/*
9205 				 * Since we don't have enough space to put
9206 				 * the IP header chain and the TCP header in
9207 				 * one packet as required by RFC 7112, don't
9208 				 * send it. Also ensure that at least one
9209 				 * byte of the payload can be put into the
9210 				 * TCP segment.
9211 				 */
9212 				SOCKBUF_UNLOCK(&so->so_snd);
9213 				error = EMSGSIZE;
9214 				sack_rxmit = 0;
9215 				goto out;
9216 			}
9217 			len = tp->t_maxseg - optlen - ipoptlen;
9218 			sendalot = 1;
9219 		}
9220 	} else
9221 		tso = 0;
9222 	KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
9223 	    ("%s: len > IP_MAXPACKET", __func__));
9224 #ifdef DIAGNOSTIC
9225 #ifdef INET6
9226 	if (max_linkhdr + hdrlen > MCLBYTES)
9227 #else
9228 	if (max_linkhdr + hdrlen > MHLEN)
9229 #endif
9230 		panic("tcphdr too big");
9231 #endif
9232 
9233 	/*
9234 	 * This KASSERT is here to catch edge cases at a well defined place.
9235 	 * Before, those had triggered (random) panic conditions further
9236 	 * down.
9237 	 */
9238 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
9239 	if ((len == 0) &&
9240 	    (flags & TH_FIN) &&
9241 	    (sbused(sb))) {
9242 		/*
9243 		 * We have outstanding data, don't send a fin by itself!.
9244 		 */
9245 		goto just_return;
9246 	}
9247 	/*
9248 	 * Grab a header mbuf, attaching a copy of data to be transmitted,
9249 	 * and initialize the header from the template for sends on this
9250 	 * connection.
9251 	 */
9252 	if (len) {
9253 		uint32_t max_val;
9254 		uint32_t moff;
9255 
9256 		if (rack->rc_pace_max_segs)
9257 			max_val = rack->rc_pace_max_segs * ctf_fixed_maxseg(tp);
9258 		else
9259 			max_val = len;
9260 		if (rack->r_ctl.rc_pace_max_segs < max_val)
9261 			max_val = rack->r_ctl.rc_pace_max_segs;
9262 		/*
9263 		 * We allow a limit on sending with hptsi.
9264 		 */
9265 		if (len > max_val) {
9266 			len = max_val;
9267 		}
9268 #ifdef INET6
9269 		if (MHLEN < hdrlen + max_linkhdr)
9270 			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
9271 		else
9272 #endif
9273 			m = m_gethdr(M_NOWAIT, MT_DATA);
9274 
9275 		if (m == NULL) {
9276 			SOCKBUF_UNLOCK(sb);
9277 			error = ENOBUFS;
9278 			sack_rxmit = 0;
9279 			goto out;
9280 		}
9281 		m->m_data += max_linkhdr;
9282 		m->m_len = hdrlen;
9283 
9284 		/*
9285 		 * Start the m_copy functions from the closest mbuf to the
9286 		 * sb_offset in the socket buffer chain.
9287 		 */
9288 		mb = sbsndptr_noadv(sb, sb_offset, &moff);
9289 		if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
9290 			m_copydata(mb, moff, (int)len,
9291 			    mtod(m, caddr_t)+hdrlen);
9292 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
9293 				sbsndptr_adv(sb, mb, len);
9294 			m->m_len += len;
9295 		} else {
9296 			struct sockbuf *msb;
9297 
9298 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
9299 				msb = NULL;
9300 			else
9301 				msb = sb;
9302 			m->m_next = tcp_m_copym(
9303 #ifdef NETFLIX_COPY_ARGS
9304 				tp,
9305 #endif
9306 				mb, moff, &len,
9307 			    if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
9308 			    ((rsm == NULL) ? hw_tls : 0)
9309 #ifdef NETFLIX_COPY_ARGS
9310 				, &filled_all
9311 #endif
9312 				);
9313 			if (len <= (tp->t_maxseg - optlen)) {
9314 				/*
9315 				 * Must have ran out of mbufs for the copy
9316 				 * shorten it to no longer need tso. Lets
9317 				 * not put on sendalot since we are low on
9318 				 * mbufs.
9319 				 */
9320 				tso = 0;
9321 			}
9322 			if (m->m_next == NULL) {
9323 				SOCKBUF_UNLOCK(sb);
9324 				(void)m_free(m);
9325 				error = ENOBUFS;
9326 				sack_rxmit = 0;
9327 				goto out;
9328 			}
9329 		}
9330 		if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
9331 			TCPSTAT_INC(tcps_sndprobe);
9332 #ifdef NETFLIX_STATS
9333 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
9334 				stats_voi_update_abs_u32(tp->t_stats,
9335 				    VOI_TCP_RETXPB, len);
9336 			else
9337 				stats_voi_update_abs_u64(tp->t_stats,
9338 				    VOI_TCP_TXPB, len);
9339 #endif
9340 		} else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
9341 			if (rsm && (rsm->r_flags & RACK_TLP)) {
9342 				/*
9343 				 * TLP should not count in retran count, but
9344 				 * in its own bin
9345 				 */
9346 				counter_u64_add(rack_tlp_retran, 1);
9347 				counter_u64_add(rack_tlp_retran_bytes, len);
9348 			} else {
9349 				tp->t_sndrexmitpack++;
9350 				TCPSTAT_INC(tcps_sndrexmitpack);
9351 				TCPSTAT_ADD(tcps_sndrexmitbyte, len);
9352 			}
9353 #ifdef NETFLIX_STATS
9354 			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
9355 			    len);
9356 #endif
9357 		} else {
9358 			TCPSTAT_INC(tcps_sndpack);
9359 			TCPSTAT_ADD(tcps_sndbyte, len);
9360 #ifdef NETFLIX_STATS
9361 			stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
9362 			    len);
9363 #endif
9364 		}
9365 		/*
9366 		 * If we're sending everything we've got, set PUSH. (This
9367 		 * will keep happy those implementations which only give
9368 		 * data to the user when a buffer fills or a PUSH comes in.)
9369 		 */
9370 		if (sb_offset + len == sbused(sb) &&
9371 		    sbused(sb) &&
9372 		    !(flags & TH_SYN))
9373 			flags |= TH_PUSH;
9374 
9375 		/*
9376 		 * Are we doing pacing, if so we must calculate the slot. We
9377 		 * only do hptsi in ESTABLISHED and with no RESET being
9378 		 * sent where we have data to send.
9379 		 */
9380 		if (((tp->t_state == TCPS_ESTABLISHED) ||
9381 		    (tp->t_state == TCPS_CLOSE_WAIT) ||
9382 		    ((tp->t_state == TCPS_FIN_WAIT_1) &&
9383 		    ((tp->t_flags & TF_SENTFIN) == 0) &&
9384 		    ((flags & TH_FIN) == 0))) &&
9385 		    ((flags & TH_RST) == 0)) {
9386 			/* Get our pacing rate */
9387 			tot_len_this_send += len;
9388 			slot = rack_get_pacing_delay(rack, tp, tot_len_this_send);
9389 		}
9390 		SOCKBUF_UNLOCK(sb);
9391 	} else {
9392 		SOCKBUF_UNLOCK(sb);
9393 		if (tp->t_flags & TF_ACKNOW)
9394 			TCPSTAT_INC(tcps_sndacks);
9395 		else if (flags & (TH_SYN | TH_FIN | TH_RST))
9396 			TCPSTAT_INC(tcps_sndctrl);
9397 		else if (SEQ_GT(tp->snd_up, tp->snd_una))
9398 			TCPSTAT_INC(tcps_sndurg);
9399 		else
9400 			TCPSTAT_INC(tcps_sndwinup);
9401 
9402 		m = m_gethdr(M_NOWAIT, MT_DATA);
9403 		if (m == NULL) {
9404 			error = ENOBUFS;
9405 			sack_rxmit = 0;
9406 			goto out;
9407 		}
9408 #ifdef INET6
9409 		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
9410 		    MHLEN >= hdrlen) {
9411 			M_ALIGN(m, hdrlen);
9412 		} else
9413 #endif
9414 			m->m_data += max_linkhdr;
9415 		m->m_len = hdrlen;
9416 	}
9417 	SOCKBUF_UNLOCK_ASSERT(sb);
9418 	m->m_pkthdr.rcvif = (struct ifnet *)0;
9419 #ifdef MAC
9420 	mac_inpcb_create_mbuf(inp, m);
9421 #endif
9422 #ifdef INET6
9423 	if (isipv6) {
9424 		ip6 = mtod(m, struct ip6_hdr *);
9425 #ifdef NETFLIX_TCPOUDP
9426 		if (tp->t_port) {
9427 			udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
9428 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
9429 			udp->uh_dport = tp->t_port;
9430 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
9431 			udp->uh_ulen = htons(ulen);
9432 			th = (struct tcphdr *)(udp + 1);
9433 		} else
9434 #endif
9435 			th = (struct tcphdr *)(ip6 + 1);
9436 		tcpip_fillheaders(inp,
9437 #ifdef NETFLIX_TCPOUDP
9438 				  tp->t_port,
9439 #endif
9440 				  ip6, th);
9441 	} else
9442 #endif				/* INET6 */
9443 	{
9444 		ip = mtod(m, struct ip *);
9445 #ifdef TCPDEBUG
9446 		ipov = (struct ipovly *)ip;
9447 #endif
9448 #ifdef NETFLIX_TCPOUDP
9449 		if (tp->t_port) {
9450 			udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
9451 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
9452 			udp->uh_dport = tp->t_port;
9453 			ulen = hdrlen + len - sizeof(struct ip);
9454 			udp->uh_ulen = htons(ulen);
9455 			th = (struct tcphdr *)(udp + 1);
9456 		} else
9457 #endif
9458 			th = (struct tcphdr *)(ip + 1);
9459 		tcpip_fillheaders(inp,
9460 #ifdef NETFLIX_TCPOUDP
9461 				  tp->t_port,
9462 #endif
9463 				  ip, th);
9464 	}
9465 	/*
9466 	 * Fill in fields, remembering maximum advertised window for use in
9467 	 * delaying messages about window sizes. If resending a FIN, be sure
9468 	 * not to use a new sequence number.
9469 	 */
9470 	if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
9471 	    tp->snd_nxt == tp->snd_max)
9472 		tp->snd_nxt--;
9473 	/*
9474 	 * If we are starting a connection, send ECN setup SYN packet. If we
9475 	 * are on a retransmit, we may resend those bits a number of times
9476 	 * as per RFC 3168.
9477 	 */
9478 	if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
9479 		if (tp->t_rxtshift >= 1) {
9480 			if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
9481 				flags |= TH_ECE | TH_CWR;
9482 		} else
9483 			flags |= TH_ECE | TH_CWR;
9484 	}
9485 	if (tp->t_state == TCPS_ESTABLISHED &&
9486 	    (tp->t_flags & TF_ECN_PERMIT)) {
9487 		/*
9488 		 * If the peer has ECN, mark data packets with ECN capable
9489 		 * transmission (ECT). Ignore pure ack packets,
9490 		 * retransmissions and window probes.
9491 		 */
9492 		if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
9493 		    !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
9494 #ifdef INET6
9495 			if (isipv6)
9496 				ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
9497 			else
9498 #endif
9499 				ip->ip_tos |= IPTOS_ECN_ECT0;
9500 			TCPSTAT_INC(tcps_ecn_ect0);
9501 		}
9502 		/*
9503 		 * Reply with proper ECN notifications.
9504 		 */
9505 		if (tp->t_flags & TF_ECN_SND_CWR) {
9506 			flags |= TH_CWR;
9507 			tp->t_flags &= ~TF_ECN_SND_CWR;
9508 		}
9509 		if (tp->t_flags & TF_ECN_SND_ECE)
9510 			flags |= TH_ECE;
9511 	}
9512 	/*
9513 	 * If we are doing retransmissions, then snd_nxt will not reflect
9514 	 * the first unsent octet.  For ACK only packets, we do not want the
9515 	 * sequence number of the retransmitted packet, we want the sequence
9516 	 * number of the next unsent octet.  So, if there is no data (and no
9517 	 * SYN or FIN), use snd_max instead of snd_nxt when filling in
9518 	 * ti_seq.  But if we are in persist state, snd_max might reflect
9519 	 * one byte beyond the right edge of the window, so use snd_nxt in
9520 	 * that case, since we know we aren't doing a retransmission.
9521 	 * (retransmit and persist are mutually exclusive...)
9522 	 */
9523 	if (sack_rxmit == 0) {
9524 		if (len || (flags & (TH_SYN | TH_FIN)) ||
9525 		    rack->rc_in_persist) {
9526 			th->th_seq = htonl(tp->snd_nxt);
9527 			rack_seq = tp->snd_nxt;
9528 		} else if (flags & TH_RST) {
9529 			/*
9530 			 * For a Reset send the last cum ack in sequence
9531 			 * (this like any other choice may still generate a
9532 			 * challenge ack, if a ack-update packet is in
9533 			 * flight).
9534 			 */
9535 			th->th_seq = htonl(tp->snd_una);
9536 			rack_seq = tp->snd_una;
9537 		} else {
9538 			th->th_seq = htonl(tp->snd_max);
9539 			rack_seq = tp->snd_max;
9540 		}
9541 	} else {
9542 		th->th_seq = htonl(rsm->r_start);
9543 		rack_seq = rsm->r_start;
9544 	}
9545 	th->th_ack = htonl(tp->rcv_nxt);
9546 	if (optlen) {
9547 		bcopy(opt, th + 1, optlen);
9548 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
9549 	}
9550 	th->th_flags = flags;
9551 	/*
9552 	 * Calculate receive window.  Don't shrink window, but avoid silly
9553 	 * window syndrome.
9554 	 * If a RST segment is sent, advertise a window of zero.
9555 	 */
9556 	if (flags & TH_RST) {
9557 		recwin = 0;
9558 	} else {
9559 		if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
9560 		    recwin < (long)ctf_fixed_maxseg(tp))
9561 			recwin = 0;
9562 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
9563 		    recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
9564 			recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
9565 		if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
9566 			recwin = (long)TCP_MAXWIN << tp->rcv_scale;
9567 	}
9568 
9569 	/*
9570 	 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
9571 	 * <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK> case is
9572 	 * handled in syncache.
9573 	 */
9574 	if (flags & TH_SYN)
9575 		th->th_win = htons((u_short)
9576 		    (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
9577 	else
9578 		th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
9579 	/*
9580 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
9581 	 * window.  This may cause the remote transmitter to stall.  This
9582 	 * flag tells soreceive() to disable delayed acknowledgements when
9583 	 * draining the buffer.  This can occur if the receiver is
9584 	 * attempting to read more data than can be buffered prior to
9585 	 * transmitting on the connection.
9586 	 */
9587 	if (th->th_win == 0) {
9588 		tp->t_sndzerowin++;
9589 		tp->t_flags |= TF_RXWIN0SENT;
9590 	} else
9591 		tp->t_flags &= ~TF_RXWIN0SENT;
9592 	if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
9593 		th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
9594 		th->th_flags |= TH_URG;
9595 	} else
9596 		/*
9597 		 * If no urgent pointer to send, then we pull the urgent
9598 		 * pointer to the left edge of the send window so that it
9599 		 * doesn't drift into the send window on sequence number
9600 		 * wraparound.
9601 		 */
9602 		tp->snd_up = tp->snd_una;	/* drag it along */
9603 
9604 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
9605 	if (to.to_flags & TOF_SIGNATURE) {
9606 		/*
9607 		 * Calculate MD5 signature and put it into the place
9608 		 * determined before.
9609 		 * NOTE: since TCP options buffer doesn't point into
9610 		 * mbuf's data, calculate offset and use it.
9611 		 */
9612 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
9613 		    (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
9614 			/*
9615 			 * Do not send segment if the calculation of MD5
9616 			 * digest has failed.
9617 			 */
9618 			goto out;
9619 		}
9620 	}
9621 #endif
9622 
9623 	/*
9624 	 * Put TCP length in extended header, and then checksum extended
9625 	 * header and data.
9626 	 */
9627 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
9628 #ifdef INET6
9629 	if (isipv6) {
9630 		/*
9631 		 * ip6_plen is not need to be filled now, and will be filled
9632 		 * in ip6_output.
9633 		 */
9634 		if (tp->t_port) {
9635 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
9636 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
9637 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
9638 			th->th_sum = htons(0);
9639 			UDPSTAT_INC(udps_opackets);
9640 		} else {
9641 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
9642 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
9643 			th->th_sum = in6_cksum_pseudo(ip6,
9644 			    sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
9645 			    0);
9646 		}
9647 	}
9648 #endif
9649 #if defined(INET6) && defined(INET)
9650 	else
9651 #endif
9652 #ifdef INET
9653 	{
9654 		if (tp->t_port) {
9655 			m->m_pkthdr.csum_flags = CSUM_UDP;
9656 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
9657 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
9658 			   ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
9659 			th->th_sum = htons(0);
9660 			UDPSTAT_INC(udps_opackets);
9661 		} else {
9662 			m->m_pkthdr.csum_flags = CSUM_TCP;
9663 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
9664 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
9665 			    ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
9666 			    IPPROTO_TCP + len + optlen));
9667 		}
9668 		/* IP version must be set here for ipv4/ipv6 checking later */
9669 		KASSERT(ip->ip_v == IPVERSION,
9670 		    ("%s: IP version incorrect: %d", __func__, ip->ip_v));
9671 	}
9672 #endif
9673 	/*
9674 	 * Enable TSO and specify the size of the segments. The TCP pseudo
9675 	 * header checksum is always provided. XXX: Fixme: This is currently
9676 	 * not the case for IPv6.
9677 	 */
9678 	if (tso || force_tso) {
9679 		KASSERT(force_tso || len > tp->t_maxseg - optlen,
9680 		    ("%s: len <= tso_segsz", __func__));
9681 		m->m_pkthdr.csum_flags |= CSUM_TSO;
9682 		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
9683 	}
9684 	KASSERT(len + hdrlen == m_length(m, NULL),
9685 	    ("%s: mbuf chain different than expected: %d + %u != %u",
9686 	    __func__, len, hdrlen, m_length(m, NULL)));
9687 
9688 #ifdef TCP_HHOOK
9689 	/* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
9690 	hhook_run_tcp_est_out(tp, th, &to, len, tso);
9691 #endif
9692 #ifdef TCPDEBUG
9693 	/*
9694 	 * Trace.
9695 	 */
9696 	if (so->so_options & SO_DEBUG) {
9697 		u_short save = 0;
9698 
9699 #ifdef INET6
9700 		if (!isipv6)
9701 #endif
9702 		{
9703 			save = ipov->ih_len;
9704 			ipov->ih_len = htons(m->m_pkthdr.len	/* - hdrlen +
9705 			      * (th->th_off << 2) */ );
9706 		}
9707 		tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
9708 #ifdef INET6
9709 		if (!isipv6)
9710 #endif
9711 			ipov->ih_len = save;
9712 	}
9713 #endif				/* TCPDEBUG */
9714 
9715 	/* We're getting ready to send; log now. */
9716 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
9717 		union tcp_log_stackspecific log;
9718 		struct timeval tv;
9719 
9720 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
9721 		log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
9722 		log.u_bbr.ininput = rack->rc_inp->inp_in_input;
9723 		log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
9724 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
9725 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
9726 		log.u_bbr.flex4 = orig_len;
9727 		if (filled_all)
9728 			log.u_bbr.flex5 = 0x80000000;
9729 		else
9730 			log.u_bbr.flex5 = 0;
9731 		if (rsm || sack_rxmit) {
9732 			log.u_bbr.flex8 = 1;
9733 		} else {
9734 			log.u_bbr.flex8 = 0;
9735 		}
9736 		log.u_bbr.pkts_out = tp->t_maxseg;
9737 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
9738 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
9739 		lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
9740 		    len, &log, false, NULL, NULL, 0, &tv);
9741 	} else
9742 		lgb = NULL;
9743 
9744 	/*
9745 	 * Fill in IP length and desired time to live and send to IP level.
9746 	 * There should be a better way to handle ttl and tos; we could keep
9747 	 * them in the template, but need a way to checksum without them.
9748 	 */
9749 	/*
9750 	 * m->m_pkthdr.len should have been set before cksum calcuration,
9751 	 * because in6_cksum() need it.
9752 	 */
9753 #ifdef INET6
9754 	if (isipv6) {
9755 		/*
9756 		 * we separately set hoplimit for every segment, since the
9757 		 * user might want to change the value via setsockopt. Also,
9758 		 * desired default hop limit might be changed via Neighbor
9759 		 * Discovery.
9760 		 */
9761 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
9762 
9763 		/*
9764 		 * Set the packet size here for the benefit of DTrace
9765 		 * probes. ip6_output() will set it properly; it's supposed
9766 		 * to include the option header lengths as well.
9767 		 */
9768 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
9769 
9770 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
9771 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
9772 		else
9773 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
9774 
9775 		if (tp->t_state == TCPS_SYN_SENT)
9776 			TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
9777 
9778 		TCP_PROBE5(send, NULL, tp, ip6, tp, th);
9779 		/* TODO: IPv6 IP6TOS_ECT bit on */
9780 		error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
9781 		    &inp->inp_route6,
9782 		    ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
9783 		    NULL, NULL, inp);
9784 
9785 		if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL)
9786 			mtu = inp->inp_route6.ro_rt->rt_mtu;
9787 	}
9788 #endif				/* INET6 */
9789 #if defined(INET) && defined(INET6)
9790 	else
9791 #endif
9792 #ifdef INET
9793 	{
9794 		ip->ip_len = htons(m->m_pkthdr.len);
9795 #ifdef INET6
9796 		if (inp->inp_vflag & INP_IPV6PROTO)
9797 			ip->ip_ttl = in6_selecthlim(inp, NULL);
9798 #endif				/* INET6 */
9799 		/*
9800 		 * If we do path MTU discovery, then we set DF on every
9801 		 * packet. This might not be the best thing to do according
9802 		 * to RFC3390 Section 2. However the tcp hostcache migitates
9803 		 * the problem so it affects only the first tcp connection
9804 		 * with a host.
9805 		 *
9806 		 * NB: Don't set DF on small MTU/MSS to have a safe
9807 		 * fallback.
9808 		 */
9809 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
9810 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
9811 			if (tp->t_port == 0 || len < V_tcp_minmss) {
9812 				ip->ip_off |= htons(IP_DF);
9813 			}
9814 		} else {
9815 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
9816 		}
9817 
9818 		if (tp->t_state == TCPS_SYN_SENT)
9819 			TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
9820 
9821 		TCP_PROBE5(send, NULL, tp, ip, tp, th);
9822 
9823 		error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route,
9824 		    ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
9825 		    inp);
9826 		if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL)
9827 			mtu = inp->inp_route.ro_rt->rt_mtu;
9828 	}
9829 #endif				/* INET */
9830 
9831 out:
9832 	if (lgb) {
9833 		lgb->tlb_errno = error;
9834 		lgb = NULL;
9835 	}
9836 	/*
9837 	 * In transmit state, time the transmission and arrange for the
9838 	 * retransmit.  In persist state, just set snd_max.
9839 	 */
9840 	if (error == 0) {
9841 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
9842 		    (tp->t_flags & TF_SACK_PERMIT) &&
9843 		    tp->rcv_numsacks > 0)
9844 			tcp_clean_dsack_blocks(tp);
9845 		if (len == 0)
9846 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
9847 		else if (len == 1) {
9848 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
9849 		} else if (len > 1) {
9850 			int idx;
9851 
9852 			idx = (len / ctf_fixed_maxseg(tp)) + 3;
9853 			if (idx >= TCP_MSS_ACCT_ATIMER)
9854 				counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
9855 			else
9856 				counter_u64_add(rack_out_size[idx], 1);
9857 		}
9858 		if (hw_tls && len > 0) {
9859 			if (filled_all) {
9860 				counter_u64_add(rack_tls_filled, 1);
9861 				rack_log_type_hrdwtso(tp, rack, len, 0, orig_len, 1);
9862 			} else {
9863 				if (rsm) {
9864 					counter_u64_add(rack_tls_rxt, 1);
9865 					rack_log_type_hrdwtso(tp, rack, len, 2, orig_len, 1);
9866 				} else if (doing_tlp) {
9867 					counter_u64_add(rack_tls_tlp, 1);
9868 					rack_log_type_hrdwtso(tp, rack, len, 3, orig_len, 1);
9869 				} else if ( (ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > sbavail(sb)) {
9870 					counter_u64_add(rack_tls_app, 1);
9871 					rack_log_type_hrdwtso(tp, rack, len, 4, orig_len, 1);
9872 				} else if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) + rack->r_ctl.rc_pace_min_segs) > tp->snd_cwnd) {
9873 					counter_u64_add(rack_tls_cwnd, 1);
9874 					rack_log_type_hrdwtso(tp, rack, len, 5, orig_len, 1);
9875 				} else if ((ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > tp->snd_wnd) {
9876 					counter_u64_add(rack_tls_rwnd, 1);
9877 					rack_log_type_hrdwtso(tp, rack, len, 6, orig_len, 1);
9878 				} else {
9879 					rack_log_type_hrdwtso(tp, rack, len, 7, orig_len, 1);
9880 					counter_u64_add(rack_tls_other, 1);
9881 				}
9882 			}
9883 		}
9884 	}
9885 	if (sub_from_prr && (error == 0)) {
9886 		if (rack->r_ctl.rc_prr_sndcnt >= len)
9887 			rack->r_ctl.rc_prr_sndcnt -= len;
9888 		else
9889 			rack->r_ctl.rc_prr_sndcnt = 0;
9890 	}
9891 	sub_from_prr = 0;
9892 	rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
9893 	    pass, rsm);
9894 	if ((error == 0) &&
9895 	    (len > 0) &&
9896 	    (tp->snd_una == tp->snd_max))
9897 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
9898 	if ((tp->t_flags & TF_FORCEDATA) == 0 ||
9899 	    (rack->rc_in_persist == 0)) {
9900 		tcp_seq startseq = tp->snd_nxt;
9901 
9902 		/*
9903 		 * Advance snd_nxt over sequence space of this segment.
9904 		 */
9905 		if (error)
9906 			/* We don't log or do anything with errors */
9907 			goto nomore;
9908 
9909 		if (flags & (TH_SYN | TH_FIN)) {
9910 			if (flags & TH_SYN)
9911 				tp->snd_nxt++;
9912 			if (flags & TH_FIN) {
9913 				tp->snd_nxt++;
9914 				tp->t_flags |= TF_SENTFIN;
9915 			}
9916 		}
9917 		/* In the ENOBUFS case we do *not* update snd_max */
9918 		if (sack_rxmit)
9919 			goto nomore;
9920 
9921 		tp->snd_nxt += len;
9922 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
9923 			if (tp->snd_una == tp->snd_max) {
9924 				/*
9925 				 * Update the time we just added data since
9926 				 * none was outstanding.
9927 				 */
9928 				rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
9929 				tp->t_acktime = ticks;
9930 			}
9931 			tp->snd_max = tp->snd_nxt;
9932 			/*
9933 			 * Time this transmission if not a retransmission and
9934 			 * not currently timing anything.
9935 			 * This is only relevant in case of switching back to
9936 			 * the base stack.
9937 			 */
9938 			if (tp->t_rtttime == 0) {
9939 				tp->t_rtttime = ticks;
9940 				tp->t_rtseq = startseq;
9941 				TCPSTAT_INC(tcps_segstimed);
9942 			}
9943 #ifdef NETFLIX_STATS
9944 			if (!(tp->t_flags & TF_GPUTINPROG) && len) {
9945 				tp->t_flags |= TF_GPUTINPROG;
9946 				tp->gput_seq = startseq;
9947 				tp->gput_ack = startseq +
9948 				    ulmin(sbavail(sb) - sb_offset, sendwin);
9949 				tp->gput_ts = tcp_ts_getticks();
9950 			}
9951 #endif
9952 		}
9953 	} else {
9954 		/*
9955 		 * Persist case, update snd_max but since we are in persist
9956 		 * mode (no window) we do not update snd_nxt.
9957 		 */
9958 		int32_t xlen = len;
9959 
9960 		if (error)
9961 			goto nomore;
9962 
9963 		if (flags & TH_SYN)
9964 			++xlen;
9965 		if (flags & TH_FIN) {
9966 			++xlen;
9967 			tp->t_flags |= TF_SENTFIN;
9968 		}
9969 		/* In the ENOBUFS case we do *not* update snd_max */
9970 		if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
9971 			if (tp->snd_una == tp->snd_max) {
9972 				/*
9973 				 * Update the time we just added data since
9974 				 * none was outstanding.
9975 				 */
9976 				rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
9977 				tp->t_acktime = ticks;
9978 			}
9979 			tp->snd_max = tp->snd_nxt + len;
9980 		}
9981 	}
9982 nomore:
9983 	if (error) {
9984 		SOCKBUF_UNLOCK_ASSERT(sb);	/* Check gotos. */
9985 		/*
9986 		 * Failures do not advance the seq counter above. For the
9987 		 * case of ENOBUFS we will fall out and retry in 1ms with
9988 		 * the hpts. Everything else will just have to retransmit
9989 		 * with the timer.
9990 		 *
9991 		 * In any case, we do not want to loop around for another
9992 		 * send without a good reason.
9993 		 */
9994 		sendalot = 0;
9995 		switch (error) {
9996 		case EPERM:
9997 			tp->t_flags &= ~TF_FORCEDATA;
9998 			tp->t_softerror = error;
9999 			return (error);
10000 		case ENOBUFS:
10001 			if (slot == 0) {
10002 				/*
10003 				 * Pace us right away to retry in a some
10004 				 * time
10005 				 */
10006 				slot = 1 + rack->rc_enobuf;
10007 				if (rack->rc_enobuf < 255)
10008 					rack->rc_enobuf++;
10009 				if (slot > (rack->rc_rack_rtt / 2)) {
10010 					slot = rack->rc_rack_rtt / 2;
10011 				}
10012 				if (slot < 10)
10013 					slot = 10;
10014 			}
10015 			counter_u64_add(rack_saw_enobuf, 1);
10016 			error = 0;
10017 			goto enobufs;
10018 		case EMSGSIZE:
10019 			/*
10020 			 * For some reason the interface we used initially
10021 			 * to send segments changed to another or lowered
10022 			 * its MTU. If TSO was active we either got an
10023 			 * interface without TSO capabilits or TSO was
10024 			 * turned off. If we obtained mtu from ip_output()
10025 			 * then update it and try again.
10026 			 */
10027 			if (tso)
10028 				tp->t_flags &= ~TF_TSO;
10029 			if (mtu != 0) {
10030 				tcp_mss_update(tp, -1, mtu, NULL, NULL);
10031 				goto again;
10032 			}
10033 			slot = 10;
10034 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
10035 			tp->t_flags &= ~TF_FORCEDATA;
10036 			return (error);
10037 		case ENETUNREACH:
10038 			counter_u64_add(rack_saw_enetunreach, 1);
10039 		case EHOSTDOWN:
10040 		case EHOSTUNREACH:
10041 		case ENETDOWN:
10042 			if (TCPS_HAVERCVDSYN(tp->t_state)) {
10043 				tp->t_softerror = error;
10044 			}
10045 			/* FALLTHROUGH */
10046 		default:
10047 			slot = 10;
10048 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
10049 			tp->t_flags &= ~TF_FORCEDATA;
10050 			return (error);
10051 		}
10052 	} else {
10053 		rack->rc_enobuf = 0;
10054 	}
10055 	TCPSTAT_INC(tcps_sndtotal);
10056 
10057 	/*
10058 	 * Data sent (as far as we can tell). If this advertises a larger
10059 	 * window than any other segment, then remember the size of the
10060 	 * advertised window. Any pending ACK has now been sent.
10061 	 */
10062 	if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
10063 		tp->rcv_adv = tp->rcv_nxt + recwin;
10064 	tp->last_ack_sent = tp->rcv_nxt;
10065 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
10066 enobufs:
10067 	rack->r_tlp_running = 0;
10068 	if (flags & TH_RST) {
10069 		/*
10070 		 * We don't send again after sending a RST.
10071 		 */
10072 		slot = 0;
10073 		sendalot = 0;
10074 	}
10075 	if (rsm && (slot == 0)) {
10076 		/*
10077 		 * Dup ack retransmission possibly, so
10078 		 * lets assure we have at least min rack
10079 		 * time, if its a rack resend then the rack
10080 		 * to will also be set to this.
10081 		 */
10082 		slot = rack->r_ctl.rc_min_to;
10083 	}
10084 	if (slot) {
10085 		/* set the rack tcb into the slot N */
10086 		counter_u64_add(rack_paced_segments, 1);
10087 	} else if (sendalot) {
10088 		if (len)
10089 			counter_u64_add(rack_unpaced_segments, 1);
10090 		sack_rxmit = 0;
10091 		tp->t_flags &= ~TF_FORCEDATA;
10092 		goto again;
10093 	} else if (len) {
10094 		counter_u64_add(rack_unpaced_segments, 1);
10095 	}
10096 	tp->t_flags &= ~TF_FORCEDATA;
10097 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
10098 	return (error);
10099 }
10100 
10101 /*
10102  * rack_ctloutput() must drop the inpcb lock before performing copyin on
10103  * socket option arguments.  When it re-acquires the lock after the copy, it
10104  * has to revalidate that the connection is still valid for the socket
10105  * option.
10106  */
10107 static int
10108 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
10109     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
10110 {
10111 	int32_t error = 0, optval;
10112 
10113 	switch (sopt->sopt_name) {
10114 	case TCP_RACK_PROP_RATE:
10115 	case TCP_RACK_PROP:
10116 	case TCP_RACK_TLP_REDUCE:
10117 	case TCP_RACK_EARLY_RECOV:
10118 	case TCP_RACK_PACE_ALWAYS:
10119 	case TCP_DELACK:
10120 	case TCP_RACK_PACE_REDUCE:
10121 	case TCP_RACK_PACE_MAX_SEG:
10122 	case TCP_RACK_PRR_SENDALOT:
10123 	case TCP_RACK_MIN_TO:
10124 	case TCP_RACK_EARLY_SEG:
10125 	case TCP_RACK_REORD_THRESH:
10126 	case TCP_RACK_REORD_FADE:
10127 	case TCP_RACK_TLP_THRESH:
10128 	case TCP_RACK_PKT_DELAY:
10129 	case TCP_RACK_TLP_USE:
10130 	case TCP_RACK_TLP_INC_VAR:
10131 	case TCP_RACK_IDLE_REDUCE_HIGH:
10132 	case TCP_RACK_MIN_PACE:
10133 	case TCP_RACK_GP_INCREASE:
10134 	case TCP_BBR_RACK_RTT_USE:
10135 	case TCP_BBR_USE_RACK_CHEAT:
10136 	case TCP_RACK_DO_DETECTION:
10137 	case TCP_DATA_AFTER_CLOSE:
10138 		break;
10139 	default:
10140 		return (tcp_default_ctloutput(so, sopt, inp, tp));
10141 		break;
10142 	}
10143 	INP_WUNLOCK(inp);
10144 	error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
10145 	if (error)
10146 		return (error);
10147 	INP_WLOCK(inp);
10148 	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
10149 		INP_WUNLOCK(inp);
10150 		return (ECONNRESET);
10151 	}
10152 	tp = intotcpcb(inp);
10153 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10154 	switch (sopt->sopt_name) {
10155 	case TCP_RACK_DO_DETECTION:
10156 		RACK_OPTS_INC(tcp_rack_no_sack);
10157 		if (optval == 0)
10158 			rack->do_detection = 0;
10159 		else
10160 			rack->do_detection = 1;
10161 		break;
10162 	case TCP_RACK_PROP_RATE:
10163 		if ((optval <= 0) || (optval >= 100)) {
10164 			error = EINVAL;
10165 			break;
10166 		}
10167 		RACK_OPTS_INC(tcp_rack_prop_rate);
10168 		rack->r_ctl.rc_prop_rate = optval;
10169 		break;
10170 	case TCP_RACK_TLP_USE:
10171 		if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
10172 			error = EINVAL;
10173 			break;
10174 		}
10175 		RACK_OPTS_INC(tcp_tlp_use);
10176 		rack->rack_tlp_threshold_use = optval;
10177 		break;
10178 	case TCP_RACK_PROP:
10179 		/* RACK proportional rate reduction (bool) */
10180 		RACK_OPTS_INC(tcp_rack_prop);
10181 		rack->r_ctl.rc_prop_reduce = optval;
10182 		break;
10183 	case TCP_RACK_TLP_REDUCE:
10184 		/* RACK TLP cwnd reduction (bool) */
10185 		RACK_OPTS_INC(tcp_rack_tlp_reduce);
10186 		rack->r_ctl.rc_tlp_cwnd_reduce = optval;
10187 		break;
10188 	case TCP_RACK_EARLY_RECOV:
10189 		/* Should recovery happen early (bool) */
10190 		RACK_OPTS_INC(tcp_rack_early_recov);
10191 		rack->r_ctl.rc_early_recovery = optval;
10192 		break;
10193 	case TCP_RACK_PACE_ALWAYS:
10194 		/* Use the always pace method (bool)  */
10195 		RACK_OPTS_INC(tcp_rack_pace_always);
10196 		if (optval > 0)
10197 			rack->rc_always_pace = 1;
10198 		else
10199 			rack->rc_always_pace = 0;
10200 		break;
10201 	case TCP_RACK_PACE_REDUCE:
10202 		/* RACK Hptsi reduction factor (divisor) */
10203 		RACK_OPTS_INC(tcp_rack_pace_reduce);
10204 		if (optval)
10205 			/* Must be non-zero */
10206 			rack->rc_pace_reduce = optval;
10207 		else
10208 			error = EINVAL;
10209 		break;
10210 	case TCP_RACK_PACE_MAX_SEG:
10211 		/* Max segments in a pace */
10212 		RACK_OPTS_INC(tcp_rack_max_seg);
10213 		rack->rc_pace_max_segs = optval;
10214 		rack_set_pace_segments(tp, rack);
10215 		break;
10216 	case TCP_RACK_PRR_SENDALOT:
10217 		/* Allow PRR to send more than one seg */
10218 		RACK_OPTS_INC(tcp_rack_prr_sendalot);
10219 		rack->r_ctl.rc_prr_sendalot = optval;
10220 		break;
10221 	case TCP_RACK_MIN_TO:
10222 		/* Minimum time between rack t-o's in ms */
10223 		RACK_OPTS_INC(tcp_rack_min_to);
10224 		rack->r_ctl.rc_min_to = optval;
10225 		break;
10226 	case TCP_RACK_EARLY_SEG:
10227 		/* If early recovery max segments */
10228 		RACK_OPTS_INC(tcp_rack_early_seg);
10229 		rack->r_ctl.rc_early_recovery_segs = optval;
10230 		break;
10231 	case TCP_RACK_REORD_THRESH:
10232 		/* RACK reorder threshold (shift amount) */
10233 		RACK_OPTS_INC(tcp_rack_reord_thresh);
10234 		if ((optval > 0) && (optval < 31))
10235 			rack->r_ctl.rc_reorder_shift = optval;
10236 		else
10237 			error = EINVAL;
10238 		break;
10239 	case TCP_RACK_REORD_FADE:
10240 		/* Does reordering fade after ms time */
10241 		RACK_OPTS_INC(tcp_rack_reord_fade);
10242 		rack->r_ctl.rc_reorder_fade = optval;
10243 		break;
10244 	case TCP_RACK_TLP_THRESH:
10245 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
10246 		RACK_OPTS_INC(tcp_rack_tlp_thresh);
10247 		if (optval)
10248 			rack->r_ctl.rc_tlp_threshold = optval;
10249 		else
10250 			error = EINVAL;
10251 		break;
10252 	case TCP_BBR_USE_RACK_CHEAT:
10253 		RACK_OPTS_INC(tcp_rack_cheat);
10254 		if (optval)
10255 			rack->use_rack_cheat = 1;
10256 		else
10257 			rack->use_rack_cheat = 0;
10258 		break;
10259 	case TCP_RACK_PKT_DELAY:
10260 		/* RACK added ms i.e. rack-rtt + reord + N */
10261 		RACK_OPTS_INC(tcp_rack_pkt_delay);
10262 		rack->r_ctl.rc_pkt_delay = optval;
10263 		break;
10264 	case TCP_RACK_TLP_INC_VAR:
10265 		/* Does TLP include rtt variance in t-o */
10266 		error = EINVAL;
10267 		break;
10268 	case TCP_RACK_IDLE_REDUCE_HIGH:
10269 		error = EINVAL;
10270 		break;
10271 	case TCP_DELACK:
10272 		if (optval == 0)
10273 			tp->t_delayed_ack = 0;
10274 		else
10275 			tp->t_delayed_ack = 1;
10276 		if (tp->t_flags & TF_DELACK) {
10277 			tp->t_flags &= ~TF_DELACK;
10278 			tp->t_flags |= TF_ACKNOW;
10279 			rack_output(tp);
10280 		}
10281 		break;
10282 	case TCP_RACK_MIN_PACE:
10283 		RACK_OPTS_INC(tcp_rack_min_pace);
10284 		if (optval > 3)
10285 			rack->r_enforce_min_pace = 3;
10286 		else
10287 			rack->r_enforce_min_pace = optval;
10288 		break;
10289 	case TCP_RACK_GP_INCREASE:
10290 		if ((optval >= 0) &&
10291 		    (optval <= 256))
10292 			rack->rack_per_of_gp = optval;
10293 		else
10294 			error = EINVAL;
10295 
10296 		break;
10297 	case TCP_BBR_RACK_RTT_USE:
10298 		if ((optval != USE_RTT_HIGH) &&
10299 		    (optval != USE_RTT_LOW) &&
10300 		    (optval != USE_RTT_AVG))
10301 			error = EINVAL;
10302 		else
10303 			rack->r_ctl.rc_rate_sample_method = optval;
10304 		break;
10305 	case TCP_DATA_AFTER_CLOSE:
10306 		if (optval)
10307 			rack->rc_allow_data_af_clo = 1;
10308 		else
10309 			rack->rc_allow_data_af_clo = 0;
10310 		break;
10311 	default:
10312 		return (tcp_default_ctloutput(so, sopt, inp, tp));
10313 		break;
10314 	}
10315 #ifdef NETFLIX_STATS
10316 	tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
10317 #endif
10318 	INP_WUNLOCK(inp);
10319 	return (error);
10320 }
10321 
10322 static int
10323 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
10324     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
10325 {
10326 	int32_t error, optval;
10327 
10328 	/*
10329 	 * Because all our options are either boolean or an int, we can just
10330 	 * pull everything into optval and then unlock and copy. If we ever
10331 	 * add a option that is not a int, then this will have quite an
10332 	 * impact to this routine.
10333 	 */
10334 	error = 0;
10335 	switch (sopt->sopt_name) {
10336 	case TCP_RACK_DO_DETECTION:
10337 		optval = rack->do_detection;
10338 		break;
10339 
10340 	case TCP_RACK_PROP_RATE:
10341 		optval = rack->r_ctl.rc_prop_rate;
10342 		break;
10343 	case TCP_RACK_PROP:
10344 		/* RACK proportional rate reduction (bool) */
10345 		optval = rack->r_ctl.rc_prop_reduce;
10346 		break;
10347 	case TCP_RACK_TLP_REDUCE:
10348 		/* RACK TLP cwnd reduction (bool) */
10349 		optval = rack->r_ctl.rc_tlp_cwnd_reduce;
10350 		break;
10351 	case TCP_RACK_EARLY_RECOV:
10352 		/* Should recovery happen early (bool) */
10353 		optval = rack->r_ctl.rc_early_recovery;
10354 		break;
10355 	case TCP_RACK_PACE_REDUCE:
10356 		/* RACK Hptsi reduction factor (divisor) */
10357 		optval = rack->rc_pace_reduce;
10358 		break;
10359 	case TCP_RACK_PACE_MAX_SEG:
10360 		/* Max segments in a pace */
10361 		optval = rack->rc_pace_max_segs;
10362 		break;
10363 	case TCP_RACK_PACE_ALWAYS:
10364 		/* Use the always pace method */
10365 		optval = rack->rc_always_pace;
10366 		break;
10367 	case TCP_RACK_PRR_SENDALOT:
10368 		/* Allow PRR to send more than one seg */
10369 		optval = rack->r_ctl.rc_prr_sendalot;
10370 		break;
10371 	case TCP_RACK_MIN_TO:
10372 		/* Minimum time between rack t-o's in ms */
10373 		optval = rack->r_ctl.rc_min_to;
10374 		break;
10375 	case TCP_RACK_EARLY_SEG:
10376 		/* If early recovery max segments */
10377 		optval = rack->r_ctl.rc_early_recovery_segs;
10378 		break;
10379 	case TCP_RACK_REORD_THRESH:
10380 		/* RACK reorder threshold (shift amount) */
10381 		optval = rack->r_ctl.rc_reorder_shift;
10382 		break;
10383 	case TCP_RACK_REORD_FADE:
10384 		/* Does reordering fade after ms time */
10385 		optval = rack->r_ctl.rc_reorder_fade;
10386 		break;
10387 	case TCP_BBR_USE_RACK_CHEAT:
10388 		/* Do we use the rack cheat for rxt */
10389 		optval = rack->use_rack_cheat;
10390 		break;
10391 	case TCP_RACK_TLP_THRESH:
10392 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
10393 		optval = rack->r_ctl.rc_tlp_threshold;
10394 		break;
10395 	case TCP_RACK_PKT_DELAY:
10396 		/* RACK added ms i.e. rack-rtt + reord + N */
10397 		optval = rack->r_ctl.rc_pkt_delay;
10398 		break;
10399 	case TCP_RACK_TLP_USE:
10400 		optval = rack->rack_tlp_threshold_use;
10401 		break;
10402 	case TCP_RACK_TLP_INC_VAR:
10403 		/* Does TLP include rtt variance in t-o */
10404 		error = EINVAL;
10405 		break;
10406 	case TCP_RACK_IDLE_REDUCE_HIGH:
10407 		error = EINVAL;
10408 		break;
10409 	case TCP_RACK_MIN_PACE:
10410 		optval = rack->r_enforce_min_pace;
10411 		break;
10412 	case TCP_RACK_GP_INCREASE:
10413 		optval = rack->rack_per_of_gp;
10414 		break;
10415 	case TCP_BBR_RACK_RTT_USE:
10416 		optval = rack->r_ctl.rc_rate_sample_method;
10417 		break;
10418 	case TCP_DELACK:
10419 		optval = tp->t_delayed_ack;
10420 		break;
10421 	case TCP_DATA_AFTER_CLOSE:
10422 		optval = rack->rc_allow_data_af_clo;
10423 		break;
10424 	default:
10425 		return (tcp_default_ctloutput(so, sopt, inp, tp));
10426 		break;
10427 	}
10428 	INP_WUNLOCK(inp);
10429 	if (error == 0) {
10430 		error = sooptcopyout(sopt, &optval, sizeof optval);
10431 	}
10432 	return (error);
10433 }
10434 
10435 static int
10436 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
10437 {
10438 	int32_t error = EINVAL;
10439 	struct tcp_rack *rack;
10440 
10441 	rack = (struct tcp_rack *)tp->t_fb_ptr;
10442 	if (rack == NULL) {
10443 		/* Huh? */
10444 		goto out;
10445 	}
10446 	if (sopt->sopt_dir == SOPT_SET) {
10447 		return (rack_set_sockopt(so, sopt, inp, tp, rack));
10448 	} else if (sopt->sopt_dir == SOPT_GET) {
10449 		return (rack_get_sockopt(so, sopt, inp, tp, rack));
10450 	}
10451 out:
10452 	INP_WUNLOCK(inp);
10453 	return (error);
10454 }
10455 
10456 
10457 static struct tcp_function_block __tcp_rack = {
10458 	.tfb_tcp_block_name = __XSTRING(STACKNAME),
10459 	.tfb_tcp_output = rack_output,
10460 	.tfb_do_queued_segments = ctf_do_queued_segments,
10461 	.tfb_do_segment_nounlock = rack_do_segment_nounlock,
10462 	.tfb_tcp_do_segment = rack_do_segment,
10463 	.tfb_tcp_ctloutput = rack_ctloutput,
10464 	.tfb_tcp_fb_init = rack_init,
10465 	.tfb_tcp_fb_fini = rack_fini,
10466 	.tfb_tcp_timer_stop_all = rack_stopall,
10467 	.tfb_tcp_timer_activate = rack_timer_activate,
10468 	.tfb_tcp_timer_active = rack_timer_active,
10469 	.tfb_tcp_timer_stop = rack_timer_stop,
10470 	.tfb_tcp_rexmit_tmr = rack_remxt_tmr,
10471 	.tfb_tcp_handoff_ok = rack_handoff_ok
10472 };
10473 
10474 static const char *rack_stack_names[] = {
10475 	__XSTRING(STACKNAME),
10476 #ifdef STACKALIAS
10477 	__XSTRING(STACKALIAS),
10478 #endif
10479 };
10480 
10481 static int
10482 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
10483 {
10484 	memset(mem, 0, size);
10485 	return (0);
10486 }
10487 
10488 static void
10489 rack_dtor(void *mem, int32_t size, void *arg)
10490 {
10491 
10492 }
10493 
10494 static bool rack_mod_inited = false;
10495 
10496 static int
10497 tcp_addrack(module_t mod, int32_t type, void *data)
10498 {
10499 	int32_t err = 0;
10500 	int num_stacks;
10501 
10502 	switch (type) {
10503 	case MOD_LOAD:
10504 		rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
10505 		    sizeof(struct rack_sendmap),
10506 		    rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
10507 
10508 		rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
10509 		    sizeof(struct tcp_rack),
10510 		    rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
10511 
10512 		sysctl_ctx_init(&rack_sysctl_ctx);
10513 		rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
10514 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
10515 		    OID_AUTO,
10516 #ifdef STACKALIAS
10517 		    __XSTRING(STACKALIAS),
10518 #else
10519 		    __XSTRING(STACKNAME),
10520 #endif
10521 		    CTLFLAG_RW, 0,
10522 		    "");
10523 		if (rack_sysctl_root == NULL) {
10524 			printf("Failed to add sysctl node\n");
10525 			err = EFAULT;
10526 			goto free_uma;
10527 		}
10528 		rack_init_sysctls();
10529 		num_stacks = nitems(rack_stack_names);
10530 		err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
10531 		    rack_stack_names, &num_stacks);
10532 		if (err) {
10533 			printf("Failed to register %s stack name for "
10534 			    "%s module\n", rack_stack_names[num_stacks],
10535 			    __XSTRING(MODNAME));
10536 			sysctl_ctx_free(&rack_sysctl_ctx);
10537 free_uma:
10538 			uma_zdestroy(rack_zone);
10539 			uma_zdestroy(rack_pcb_zone);
10540 			rack_counter_destroy();
10541 			printf("Failed to register rack module -- err:%d\n", err);
10542 			return (err);
10543 		}
10544 		tcp_lro_reg_mbufq();
10545 		rack_mod_inited = true;
10546 		break;
10547 	case MOD_QUIESCE:
10548 		err = deregister_tcp_functions(&__tcp_rack, true, false);
10549 		break;
10550 	case MOD_UNLOAD:
10551 		err = deregister_tcp_functions(&__tcp_rack, false, true);
10552 		if (err == EBUSY)
10553 			break;
10554 		if (rack_mod_inited) {
10555 			uma_zdestroy(rack_zone);
10556 			uma_zdestroy(rack_pcb_zone);
10557 			sysctl_ctx_free(&rack_sysctl_ctx);
10558 			rack_counter_destroy();
10559 			rack_mod_inited = false;
10560 		}
10561 		tcp_lro_dereg_mbufq();
10562 		err = 0;
10563 		break;
10564 	default:
10565 		return (EOPNOTSUPP);
10566 	}
10567 	return (err);
10568 }
10569 
10570 static moduledata_t tcp_rack = {
10571 	.name = __XSTRING(MODNAME),
10572 	.evhand = tcp_addrack,
10573 	.priv = 0
10574 };
10575 
10576 MODULE_VERSION(MODNAME, 1);
10577 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
10578 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
10579