xref: /freebsd/sys/netinet/tcp_sack.c (revision 9dba3024c3f1a2df6f42689aac5a2ab4acc7561d)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_sack.c	8.12 (Berkeley) 5/24/95
30  * $FreeBSD$
31  */
32 
33 /*-
34  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
62  *
63  * NRL grants permission for redistribution and use in source and binary
64  * forms, with or without modification, of the software and documentation
65  * created at NRL provided that the following conditions are met:
66  *
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  * 3. All advertising materials mentioning features or use of this software
73  *    must display the following acknowledgements:
74  *	This product includes software developed by the University of
75  *	California, Berkeley and its contributors.
76  *	This product includes software developed at the Information
77  *	Technology Division, US Naval Research Laboratory.
78  * 4. Neither the name of the NRL nor the names of its contributors
79  *    may be used to endorse or promote products derived from this software
80  *    without specific prior written permission.
81  *
82  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
83  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
85  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
86  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
87  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
88  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
89  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
90  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
91  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
92  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93  *
94  * The views and conclusions contained in the software and documentation
95  * are those of the authors and should not be interpreted as representing
96  * official policies, either expressed or implied, of the US Naval
97  * Research Laboratory (NRL).
98  */
99 #include "opt_inet.h"
100 #include "opt_inet6.h"
101 #include "opt_ipsec.h"
102 #include "opt_tcpdebug.h"
103 #include "opt_tcp_input.h"
104 #include "opt_tcp_sack.h"
105 
106 #include <sys/param.h>
107 #include <sys/systm.h>
108 #include <sys/kernel.h>
109 #include <sys/sysctl.h>
110 #include <sys/malloc.h>
111 #include <sys/mbuf.h>
112 #include <sys/proc.h>		/* for proc0 declaration */
113 #include <sys/protosw.h>
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/syslog.h>
117 #include <sys/systm.h>
118 
119 #include <machine/cpu.h>	/* before tcp_seq.h, for tcp_random18() */
120 
121 #include <vm/uma.h>
122 
123 #include <net/if.h>
124 #include <net/route.h>
125 
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/ip_icmp.h>	/* for ICMP_BANDLIM		*/
130 #include <netinet/in_var.h>
131 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM		*/
132 #include <netinet/in_pcb.h>
133 #include <netinet/ip_var.h>
134 #include <netinet/ip6.h>
135 #include <netinet/icmp6.h>
136 #include <netinet6/nd6.h>
137 #include <netinet6/ip6_var.h>
138 #include <netinet6/in6_pcb.h>
139 #include <netinet/tcp.h>
140 #include <netinet/tcp_fsm.h>
141 #include <netinet/tcp_seq.h>
142 #include <netinet/tcp_timer.h>
143 #include <netinet/tcp_var.h>
144 #include <netinet6/tcp6_var.h>
145 #include <netinet/tcpip.h>
146 #ifdef TCPDEBUG
147 #include <netinet/tcp_debug.h>
148 #endif /* TCPDEBUG */
149 
150 #ifdef FAST_IPSEC
151 #include <netipsec/ipsec.h>
152 #include <netipsec/ipsec6.h>
153 #endif
154 
155 #ifdef IPSEC
156 #include <netinet6/ipsec.h>
157 #include <netinet6/ipsec6.h>
158 #include <netkey/key.h>
159 #endif /*IPSEC*/
160 #include <machine/in_cksum.h>
161 
162 extern struct uma_zone *sack_hole_zone;
163 
164 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
165 int tcp_do_sack = 1;
166 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW,
167 	&tcp_do_sack, 0, "Enable/Disable TCP SACK support");
168 TUNABLE_INT("net.inet.tcp.sack.enable", &tcp_do_sack);
169 
170 static int tcp_sack_maxholes = 128;
171 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_RW,
172 	&tcp_sack_maxholes, 0,
173     "Maximum number of TCP SACK holes allowed per connection");
174 
175 static int tcp_sack_globalmaxholes = 65536;
176 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_RW,
177 	&tcp_sack_globalmaxholes, 0,
178     "Global maximum number of TCP SACK holes");
179 
180 static int tcp_sack_globalholes = 0;
181 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_RD,
182     &tcp_sack_globalholes, 0,
183     "Global number of TCP SACK holes currently allocated");
184 
185 /*
186  * This function is called upon receipt of new valid data (while not in header
187  * prediction mode), and it updates the ordered list of sacks.
188  */
189 void
190 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
191 {
192 	/*
193 	 * First reported block MUST be the most recent one.  Subsequent
194 	 * blocks SHOULD be in the order in which they arrived at the
195 	 * receiver.  These two conditions make the implementation fully
196 	 * compliant with RFC 2018.
197 	 */
198 	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
199 	int num_head, num_saved, i;
200 
201 	INP_LOCK_ASSERT(tp->t_inpcb);
202 
203 	/* Check arguments */
204 	KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
205 
206 	/* SACK block for the received segment. */
207 	head_blk.start = rcv_start;
208 	head_blk.end = rcv_end;
209 
210 	/*
211 	 * Merge updated SACK blocks into head_blk, and
212 	 * save unchanged SACK blocks into saved_blks[].
213 	 * num_saved will have the number of the saved SACK blocks.
214 	 */
215 	num_saved = 0;
216 	for (i = 0; i < tp->rcv_numsacks; i++) {
217 		tcp_seq start = tp->sackblks[i].start;
218 		tcp_seq end = tp->sackblks[i].end;
219 		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
220 			/*
221 			 * Discard this SACK block.
222 			 */
223 		} else if (SEQ_LEQ(head_blk.start, end) &&
224 			   SEQ_GEQ(head_blk.end, start)) {
225 			/*
226 			 * Merge this SACK block into head_blk.
227 			 * This SACK block itself will be discarded.
228 			 */
229 			if (SEQ_GT(head_blk.start, start))
230 				head_blk.start = start;
231 			if (SEQ_LT(head_blk.end, end))
232 				head_blk.end = end;
233 		} else {
234 			/*
235 			 * Save this SACK block.
236 			 */
237 			saved_blks[num_saved].start = start;
238 			saved_blks[num_saved].end = end;
239 			num_saved++;
240 		}
241 	}
242 
243 	/*
244 	 * Update SACK list in tp->sackblks[].
245 	 */
246 	num_head = 0;
247 	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
248 		/*
249 		 * The received data segment is an out-of-order segment.
250 		 * Put head_blk at the top of SACK list.
251 		 */
252 		tp->sackblks[0] = head_blk;
253 		num_head = 1;
254 		/*
255 		 * If the number of saved SACK blocks exceeds its limit,
256 		 * discard the last SACK block.
257 		 */
258 		if (num_saved >= MAX_SACK_BLKS)
259 			num_saved--;
260 	}
261 	if (num_saved > 0) {
262 		/*
263 		 * Copy the saved SACK blocks back.
264 		 */
265 		bcopy(saved_blks, &tp->sackblks[num_head],
266 		      sizeof(struct sackblk) * num_saved);
267 	}
268 
269 	/* Save the number of SACK blocks. */
270 	tp->rcv_numsacks = num_head + num_saved;
271 }
272 
273 /*
274  * Delete all receiver-side SACK information.
275  */
276 void
277 tcp_clean_sackreport(tp)
278 	struct tcpcb *tp;
279 {
280 	int i;
281 
282 	INP_LOCK_ASSERT(tp->t_inpcb);
283 	tp->rcv_numsacks = 0;
284 	for (i = 0; i < MAX_SACK_BLKS; i++)
285 		tp->sackblks[i].start = tp->sackblks[i].end=0;
286 }
287 
288 /*
289  * Process the TCP SACK option.  Returns 1 if tcp_dooptions() should continue,
290  * and 0 otherwise, if the option was fine.  tp->snd_holes is an ordered list
291  * of holes (oldest to newest, in terms of the sequence space).
292  */
293 int
294 tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen)
295 {
296 	int tmp_olen;
297 	u_char *tmp_cp;
298 	struct sackhole *cur, *temp;
299 
300 	INP_LOCK_ASSERT(tp->t_inpcb);
301 	if (!tp->sack_enable)
302 		return (1);
303 	if ((th->th_flags & TH_ACK) == 0)
304 		return (1);
305 	/* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */
306 	if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
307 		return (1);
308 	/* If ack is outside [snd_una, snd_max], ignore the SACK options */
309 	if (SEQ_LT(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max))
310 		return (1);
311 	tmp_cp = cp + 2;
312 	tmp_olen = optlen - 2;
313 	tcpstat.tcps_sack_rcv_blocks++;
314 	if (tp->snd_numholes < 0) /* XXX panic? */
315 		tp->snd_numholes = 0;
316 	if (tp->t_maxseg == 0)
317 		panic("tcp_sack_option"); /* Should never happen */
318 	while (tmp_olen > 0) {
319 		struct sackblk sack;
320 
321 		bcopy(tmp_cp, (char *) &(sack.start), sizeof(tcp_seq));
322 		sack.start = ntohl(sack.start);
323 		bcopy(tmp_cp + sizeof(tcp_seq),
324 		    (char *) &(sack.end), sizeof(tcp_seq));
325 		sack.end = ntohl(sack.end);
326 		tmp_olen -= TCPOLEN_SACK;
327 		tmp_cp += TCPOLEN_SACK;
328 		if (SEQ_LEQ(sack.end, sack.start))
329 			continue; /* bad SACK fields */
330 		if (SEQ_LEQ(sack.end, tp->snd_una))
331 			continue; /* old block */
332 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
333 			if (SEQ_LT(sack.start, th->th_ack))
334 				continue;
335 		}
336 		if (SEQ_GT(sack.end, tp->snd_max))
337 			continue;
338 		if (TAILQ_EMPTY(&tp->snd_holes)) { /* first hole */
339 			if (tcp_sack_globalholes >= tcp_sack_globalmaxholes ||
340 			    tcp_sack_maxholes == 0) {
341 				tcpstat.tcps_sack_sboverflow++;
342 				continue;
343 			}
344 			cur = (struct sackhole *)
345 				uma_zalloc(sack_hole_zone,M_NOWAIT);
346 			if (cur == NULL) {
347 				/* ENOBUFS, so ignore SACKed block for now*/
348 				continue;
349 			}
350 			cur->start = th->th_ack;
351 			cur->end = sack.start;
352 			cur->rxmit = cur->start;
353 			tp->snd_numholes = 1;
354 			tcp_sack_globalholes++;
355 			/* Update the sack scoreboard "cache" */
356 			tp->sackhint.nexthole = cur;
357 			tp->rcv_lastsack = sack.end;
358 			TAILQ_INSERT_HEAD(&tp->snd_holes, cur, scblink);
359 			continue; /* with next sack block */
360 		}
361 		/* Go thru list of holes. */
362 		cur = TAILQ_FIRST(&tp->snd_holes);
363 		while (cur) {
364 			if (SEQ_LEQ(sack.end, cur->start))
365 				/* SACKs data before the current hole */
366 				break; /* no use going through more holes */
367 			if (SEQ_GEQ(sack.start, cur->end)) {
368 				/* SACKs data beyond the current hole */
369 				cur = TAILQ_NEXT(cur, scblink);
370 				continue;
371 			}
372 			tp->sackhint.sack_bytes_rexmit -=
373 				(cur->rxmit - cur->start);
374 			KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
375 				("sackhint bytes rtx >= 0"));
376 			if (SEQ_LEQ(sack.start, cur->start)) {
377 				/* Data acks at least the beginning of hole */
378 				if (SEQ_GEQ(sack.end, cur->end)) {
379 					/* Acks entire hole, so delete hole */
380 					if (tp->sackhint.nexthole == cur)
381 						tp->sackhint.nexthole =
382 						    TAILQ_NEXT(cur, scblink);
383 					temp = cur;
384 					cur = TAILQ_NEXT(cur, scblink);
385 					TAILQ_REMOVE(&tp->snd_holes,
386 						temp, scblink);
387 					uma_zfree(sack_hole_zone, temp);
388 					tp->snd_numholes--;
389 					tcp_sack_globalholes--;
390 					continue;
391 				} else {
392 					/* Move start of hole forward */
393 					cur->start = sack.end;
394 					cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
395 				}
396 			} else if (SEQ_GEQ(sack.end, cur->end)) {
397 				/* Move end of hole backward */
398 				cur->end = sack.start;
399 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
400 			} else {
401 				/*
402 				 * ACKs some data in middle of a hole; need to
403 				 * split current hole
404 				 */
405 				if (tp->snd_numholes >= tcp_sack_maxholes ||
406 					tcp_sack_globalholes >=
407 					 tcp_sack_globalmaxholes) {
408 					tcpstat.tcps_sack_sboverflow++;
409 					temp = NULL;
410 				} else {
411 					temp = (struct sackhole *)
412 						uma_zalloc(sack_hole_zone,
413 							M_NOWAIT);
414 				}
415 				if (temp != NULL) {
416 					temp->start = sack.end;
417 					temp->end = cur->end;
418 					temp->rxmit = SEQ_MAX(cur->rxmit,
419 						temp->start);
420 					cur->end = sack.start;
421 					cur->rxmit = SEQ_MIN(cur->rxmit,
422 						cur->end);
423 					tp->sackhint.sack_bytes_rexmit +=
424 						(cur->rxmit - cur->start);
425 					TAILQ_INSERT_AFTER(&tp->snd_holes,
426 						cur, temp, scblink);
427 					cur = temp;
428 					tp->snd_numholes++;
429 					tcp_sack_globalholes++;
430 				}
431 			}
432 			tp->sackhint.sack_bytes_rexmit +=
433 			    (cur->rxmit - cur->start);
434 			cur = TAILQ_NEXT(cur, scblink);
435 		}
436 		/* At this point, we have iterated the whole scoreboard. */
437 		if (SEQ_LT(tp->rcv_lastsack, sack.start)) {
438 			/* Need to append new hole at end. */
439 			if (tp->snd_numholes >= tcp_sack_maxholes ||
440 			    tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
441 				tcpstat.tcps_sack_sboverflow++;
442 				continue;
443 			}
444 			temp = (struct sackhole *)
445 				uma_zalloc(sack_hole_zone,M_NOWAIT);
446 			if (temp == NULL)
447 				continue; /* ENOBUFS */
448 			temp->start = tp->rcv_lastsack;
449 			temp->end = sack.start;
450 			temp->rxmit = temp->start;
451 			tp->rcv_lastsack = sack.end;
452 			tp->snd_numholes++;
453 			tcp_sack_globalholes++;
454 			TAILQ_INSERT_TAIL(&tp->snd_holes, temp, scblink);
455 			if (tp->sackhint.nexthole == NULL)
456 				tp->sackhint.nexthole = temp;
457 		}
458 		if (SEQ_LT(tp->rcv_lastsack, sack.end))
459 			tp->rcv_lastsack = sack.end;
460 	}
461 	return (0);
462 }
463 
464 /*
465  * Delete stale (i.e, cumulatively ack'd) holes.  Hole is deleted only if
466  * it is completely acked; otherwise, tcp_sack_option(), called from
467  * tcp_dooptions(), will fix up the hole.
468  */
469 void
470 tcp_del_sackholes(tp, th)
471 	struct tcpcb *tp;
472 	struct tcphdr *th;
473 {
474 	INP_LOCK_ASSERT(tp->t_inpcb);
475 	if (tp->sack_enable && tp->t_state != TCPS_LISTEN) {
476 		/* max because this could be an older ack just arrived */
477 		tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ?
478 			th->th_ack : tp->snd_una;
479 		struct sackhole *cur = TAILQ_FIRST(&tp->snd_holes);
480 		struct sackhole *prev;
481 		while (cur)
482 			if (SEQ_LEQ(cur->end, lastack)) {
483 				prev = cur;
484 				tp->sackhint.sack_bytes_rexmit -=
485 					(cur->rxmit - cur->start);
486 				if (tp->sackhint.nexthole == cur)
487 					tp->sackhint.nexthole =
488 					    TAILQ_NEXT(cur, scblink);
489 				cur = TAILQ_NEXT(cur, scblink);
490 				TAILQ_REMOVE(&tp->snd_holes, prev, scblink);
491 				uma_zfree(sack_hole_zone, prev);
492 				tp->snd_numholes--;
493 				tcp_sack_globalholes--;
494 			} else if (SEQ_LT(cur->start, lastack)) {
495 				if (SEQ_LT(cur->rxmit, lastack)) {
496 					tp->sackhint.sack_bytes_rexmit -=
497 					    (cur->rxmit - cur->start);
498 					cur->rxmit = lastack;
499 				} else
500 					tp->sackhint.sack_bytes_rexmit -=
501 					    (lastack - cur->start);
502 				cur->start = lastack;
503 				break;
504 			} else
505 				break;
506 	}
507 }
508 
509 void
510 tcp_free_sackholes(struct tcpcb *tp)
511 {
512 	struct sackhole *q;
513 
514 	INP_LOCK_ASSERT(tp->t_inpcb);
515 	while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) {
516 		TAILQ_REMOVE(&tp->snd_holes, q, scblink);
517 		uma_zfree(sack_hole_zone, q);
518 		tcp_sack_globalholes--;
519 	}
520 	tp->snd_numholes = 0;
521 	tp->sackhint.nexthole = NULL;
522 	tp->sackhint.sack_bytes_rexmit = 0;
523 }
524 
525 /*
526  * Partial ack handling within a sack recovery episode.
527  * Keeping this very simple for now. When a partial ack
528  * is received, force snd_cwnd to a value that will allow
529  * the sender to transmit no more than 2 segments.
530  * If necessary, a better scheme can be adopted at a
531  * later point, but for now, the goal is to prevent the
532  * sender from bursting a large amount of data in the midst
533  * of sack recovery.
534  */
535 void
536 tcp_sack_partialack(tp, th)
537 	struct tcpcb *tp;
538 	struct tcphdr *th;
539 {
540 	int num_segs = 1;
541 
542 	INP_LOCK_ASSERT(tp->t_inpcb);
543 	callout_stop(tp->tt_rexmt);
544 	tp->t_rtttime = 0;
545 	/* send one or 2 segments based on how much new data was acked */
546 	if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2)
547 		num_segs = 2;
548 	tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
549 		(tp->snd_nxt - tp->sack_newdata) +
550 		num_segs * tp->t_maxseg);
551 	if (tp->snd_cwnd > tp->snd_ssthresh)
552 		tp->snd_cwnd = tp->snd_ssthresh;
553 	tp->t_flags |= TF_ACKNOW;
554 	(void) tcp_output(tp);
555 }
556 
557 /*
558  * Debug version of tcp_sack_output() that walks the scoreboard. Used for
559  * now to sanity check the hint.
560  */
561 static struct sackhole *
562 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
563 {
564 	struct sackhole *p;
565 
566 	INP_LOCK_ASSERT(tp->t_inpcb);
567 	*sack_bytes_rexmt = 0;
568 	TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
569 		if (SEQ_LT(p->rxmit, p->end)) {
570 			if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
571 				continue;
572 			}
573 			*sack_bytes_rexmt += (p->rxmit - p->start);
574 			break;
575 		}
576 		*sack_bytes_rexmt += (p->rxmit - p->start);
577 	}
578 	return (p);
579 }
580 
581 /*
582  * Returns the next hole to retransmit and the number of retransmitted bytes
583  * from the scoreboard. We store both the next hole and the number of
584  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
585  * reception). This avoids scoreboard traversals completely.
586  *
587  * The loop here will traverse *at most* one link. Here's the argument.
588  * For the loop to traverse more than 1 link before finding the next hole to
589  * retransmit, we would need to have at least 1 node following the current hint
590  * with (rxmit == end). But, for all holes following the current hint,
591  * (start == rxmit), since we have not yet retransmitted from them. Therefore,
592  * in order to traverse more 1 link in the loop below, we need to have at least
593  * one node following the current hint with (start == rxmit == end).
594  * But that can't happen, (start == end) means that all the data in that hole
595  * has been sacked, in which case, the hole would have been removed from the
596  * scoreboard.
597  */
598 struct sackhole *
599 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
600 {
601 	struct sackhole *hole = NULL, *dbg_hole = NULL;
602 	int dbg_bytes_rexmt;
603 
604 	INP_LOCK_ASSERT(tp->t_inpcb);
605 	dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt);
606 	*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
607 	hole = tp->sackhint.nexthole;
608 	if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
609 		goto out;
610 	while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
611 		if (SEQ_LT(hole->rxmit, hole->end)) {
612 			tp->sackhint.nexthole = hole;
613 			break;
614 		}
615 	}
616 out:
617 	if (dbg_hole != hole) {
618 		printf("%s: Computed sack hole not the same as cached value\n", __func__);
619 		hole = dbg_hole;
620 	}
621 	if (*sack_bytes_rexmt != dbg_bytes_rexmt) {
622 		printf("%s: Computed sack_bytes_retransmitted (%d) not"
623 		       "the same as cached value (%d)\n",
624 		       __func__, dbg_bytes_rexmt, *sack_bytes_rexmt);
625 		*sack_bytes_rexmt = dbg_bytes_rexmt;
626 	}
627 	return (hole);
628 }
629 
630 /*
631  * After a timeout, the SACK list may be rebuilt.  This SACK information
632  * should be used to avoid retransmitting SACKed data.  This function
633  * traverses the SACK list to see if snd_nxt should be moved forward.
634  */
635 void
636 tcp_sack_adjust(struct tcpcb *tp)
637 {
638 	struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
639 
640 	INP_LOCK_ASSERT(tp->t_inpcb);
641 	if (cur == NULL)
642 		return; /* No holes */
643 	if (SEQ_GEQ(tp->snd_nxt, tp->rcv_lastsack))
644 		return; /* We're already beyond any SACKed blocks */
645 	/*
646 	 * Two cases for which we want to advance snd_nxt:
647 	 * i) snd_nxt lies between end of one hole and beginning of another
648 	 * ii) snd_nxt lies between end of last hole and rcv_lastsack
649 	 */
650 	while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
651 		if (SEQ_LT(tp->snd_nxt, cur->end))
652 			return;
653 		if (SEQ_GEQ(tp->snd_nxt, p->start))
654 			cur = p;
655 		else {
656 			tp->snd_nxt = p->start;
657 			return;
658 		}
659 	}
660 	if (SEQ_LT(tp->snd_nxt, cur->end))
661 		return;
662 	tp->snd_nxt = tp->rcv_lastsack;
663 	return;
664 }
665