xref: /freebsd/sys/netinet/tcp_sack.c (revision 92fd12db2459ef2c8419cfcc6a4f8dc68f5c88bc)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_sack.c	8.12 (Berkeley) 5/24/95
30  * $FreeBSD$
31  */
32 
33 /*-
34  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
62  *
63  * NRL grants permission for redistribution and use in source and binary
64  * forms, with or without modification, of the software and documentation
65  * created at NRL provided that the following conditions are met:
66  *
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  * 3. All advertising materials mentioning features or use of this software
73  *    must display the following acknowledgements:
74  *	This product includes software developed by the University of
75  *	California, Berkeley and its contributors.
76  *	This product includes software developed at the Information
77  *	Technology Division, US Naval Research Laboratory.
78  * 4. Neither the name of the NRL nor the names of its contributors
79  *    may be used to endorse or promote products derived from this software
80  *    without specific prior written permission.
81  *
82  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
83  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
85  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
86  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
87  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
88  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
89  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
90  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
91  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
92  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93  *
94  * The views and conclusions contained in the software and documentation
95  * are those of the authors and should not be interpreted as representing
96  * official policies, either expressed or implied, of the US Naval
97  * Research Laboratory (NRL).
98  */
99 #include "opt_inet.h"
100 #include "opt_inet6.h"
101 #include "opt_ipsec.h"
102 #include "opt_tcpdebug.h"
103 #include "opt_tcp_input.h"
104 #include "opt_tcp_sack.h"
105 
106 #include <sys/param.h>
107 #include <sys/systm.h>
108 #include <sys/kernel.h>
109 #include <sys/sysctl.h>
110 #include <sys/malloc.h>
111 #include <sys/mbuf.h>
112 #include <sys/proc.h>		/* for proc0 declaration */
113 #include <sys/protosw.h>
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/syslog.h>
117 #include <sys/systm.h>
118 
119 #include <machine/cpu.h>	/* before tcp_seq.h, for tcp_random18() */
120 
121 #include <vm/uma.h>
122 
123 #include <net/if.h>
124 #include <net/route.h>
125 
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/ip_icmp.h>	/* for ICMP_BANDLIM		*/
130 #include <netinet/in_var.h>
131 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM		*/
132 #include <netinet/in_pcb.h>
133 #include <netinet/ip_var.h>
134 #include <netinet/ip6.h>
135 #include <netinet/icmp6.h>
136 #include <netinet6/nd6.h>
137 #include <netinet6/ip6_var.h>
138 #include <netinet6/in6_pcb.h>
139 #include <netinet/tcp.h>
140 #include <netinet/tcp_fsm.h>
141 #include <netinet/tcp_seq.h>
142 #include <netinet/tcp_timer.h>
143 #include <netinet/tcp_var.h>
144 #include <netinet6/tcp6_var.h>
145 #include <netinet/tcpip.h>
146 #ifdef TCPDEBUG
147 #include <netinet/tcp_debug.h>
148 #endif /* TCPDEBUG */
149 
150 #ifdef FAST_IPSEC
151 #include <netipsec/ipsec.h>
152 #include <netipsec/ipsec6.h>
153 #endif
154 
155 #ifdef IPSEC
156 #include <netinet6/ipsec.h>
157 #include <netinet6/ipsec6.h>
158 #include <netkey/key.h>
159 #endif /*IPSEC*/
160 #include <machine/in_cksum.h>
161 
162 extern struct uma_zone *sack_hole_zone;
163 
164 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
165 int tcp_do_sack = 1;
166 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW,
167 	&tcp_do_sack, 0, "Enable/Disable TCP SACK support");
168 TUNABLE_INT("net.inet.tcp.sack.enable", &tcp_do_sack);
169 
170 static int tcp_sack_maxholes = 128;
171 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_RW,
172 	&tcp_sack_maxholes, 0,
173     "Maximum number of TCP SACK holes allowed per connection");
174 
175 static int tcp_sack_globalmaxholes = 65536;
176 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_RW,
177 	&tcp_sack_globalmaxholes, 0,
178     "Global maximum number of TCP SACK holes");
179 
180 static int tcp_sack_globalholes = 0;
181 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_RD,
182     &tcp_sack_globalholes, 0,
183     "Global number of TCP SACK holes currently allocated");
184 
185 /*
186  * This function is called upon receipt of new valid data (while not in header
187  * prediction mode), and it updates the ordered list of sacks.
188  */
189 void
190 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
191 {
192 	/*
193 	 * First reported block MUST be the most recent one.  Subsequent
194 	 * blocks SHOULD be in the order in which they arrived at the
195 	 * receiver.  These two conditions make the implementation fully
196 	 * compliant with RFC 2018.
197 	 */
198 	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
199 	int num_head, num_saved, i;
200 
201 	INP_LOCK_ASSERT(tp->t_inpcb);
202 
203 	/* Check arguments */
204 	KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
205 
206 	/* SACK block for the received segment. */
207 	head_blk.start = rcv_start;
208 	head_blk.end = rcv_end;
209 
210 	/*
211 	 * Merge updated SACK blocks into head_blk, and
212 	 * save unchanged SACK blocks into saved_blks[].
213 	 * num_saved will have the number of the saved SACK blocks.
214 	 */
215 	num_saved = 0;
216 	for (i = 0; i < tp->rcv_numsacks; i++) {
217 		tcp_seq start = tp->sackblks[i].start;
218 		tcp_seq end = tp->sackblks[i].end;
219 		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
220 			/*
221 			 * Discard this SACK block.
222 			 */
223 		} else if (SEQ_LEQ(head_blk.start, end) &&
224 			   SEQ_GEQ(head_blk.end, start)) {
225 			/*
226 			 * Merge this SACK block into head_blk.
227 			 * This SACK block itself will be discarded.
228 			 */
229 			if (SEQ_GT(head_blk.start, start))
230 				head_blk.start = start;
231 			if (SEQ_LT(head_blk.end, end))
232 				head_blk.end = end;
233 		} else {
234 			/*
235 			 * Save this SACK block.
236 			 */
237 			saved_blks[num_saved].start = start;
238 			saved_blks[num_saved].end = end;
239 			num_saved++;
240 		}
241 	}
242 
243 	/*
244 	 * Update SACK list in tp->sackblks[].
245 	 */
246 	num_head = 0;
247 	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
248 		/*
249 		 * The received data segment is an out-of-order segment.
250 		 * Put head_blk at the top of SACK list.
251 		 */
252 		tp->sackblks[0] = head_blk;
253 		num_head = 1;
254 		/*
255 		 * If the number of saved SACK blocks exceeds its limit,
256 		 * discard the last SACK block.
257 		 */
258 		if (num_saved >= MAX_SACK_BLKS)
259 			num_saved--;
260 	}
261 	if (num_saved > 0) {
262 		/*
263 		 * Copy the saved SACK blocks back.
264 		 */
265 		bcopy(saved_blks, &tp->sackblks[num_head],
266 		      sizeof(struct sackblk) * num_saved);
267 	}
268 
269 	/* Save the number of SACK blocks. */
270 	tp->rcv_numsacks = num_head + num_saved;
271 }
272 
273 /*
274  * Delete all receiver-side SACK information.
275  */
276 void
277 tcp_clean_sackreport(tp)
278 	struct tcpcb *tp;
279 {
280 	int i;
281 
282 	INP_LOCK_ASSERT(tp->t_inpcb);
283 	tp->rcv_numsacks = 0;
284 	for (i = 0; i < MAX_SACK_BLKS; i++)
285 		tp->sackblks[i].start = tp->sackblks[i].end=0;
286 }
287 
288 /*
289  * Allocate struct sackhole.
290  */
291 static struct sackhole *
292 tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
293 {
294 	struct sackhole *hole;
295 
296 	if (tp->snd_numholes >= tcp_sack_maxholes ||
297 	    tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
298 		tcpstat.tcps_sack_sboverflow++;
299 		return NULL;
300 	}
301 
302 	hole = (struct sackhole *)uma_zalloc(sack_hole_zone, M_NOWAIT);
303 	if (hole == NULL)
304 		return NULL;
305 
306 	hole->start = start;
307 	hole->end = end;
308 	hole->rxmit = start;
309 
310 	tp->snd_numholes++;
311 	tcp_sack_globalholes++;
312 
313 	return hole;
314 }
315 
316 /*
317  * Free struct sackhole.
318  */
319 static void
320 tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
321 {
322 	uma_zfree(sack_hole_zone, hole);
323 
324 	tp->snd_numholes--;
325 	tcp_sack_globalholes--;
326 
327 	KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
328 	KASSERT(tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
329 }
330 
331 /*
332  * Process the TCP SACK option.  Returns 1 if tcp_dooptions() should continue,
333  * and 0 otherwise, if the option was fine.  tp->snd_holes is an ordered list
334  * of holes (oldest to newest, in terms of the sequence space).
335  */
336 int
337 tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen)
338 {
339 	int tmp_olen;
340 	u_char *tmp_cp;
341 	struct sackhole *cur, *temp;
342 	struct sackblk sack, sack_blocks[TCP_MAX_SACK];
343 	int i, j, next_sack_blk, num_sack_blks;
344 
345 	INP_LOCK_ASSERT(tp->t_inpcb);
346 	if (!tp->sack_enable)
347 		return (1);
348 	if ((th->th_flags & TH_ACK) == 0)
349 		return (1);
350 	/* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */
351 	if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
352 		return (1);
353 	/* If ack is outside [snd_una, snd_max], ignore the SACK options */
354 	if (SEQ_LT(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max))
355 		return (1);
356 	tmp_cp = cp + 2;
357 	tmp_olen = optlen - 2;
358 	tcpstat.tcps_sack_rcv_blocks++;
359 	/*
360 	 * Sort the SACK blocks so we can update the scoreboard
361 	 * with just one pass. The overhead of sorting upto 4 elements
362 	 * is less than making 3 passes over the scoreboard.
363 	 */
364 	num_sack_blks = 0;
365 	while (tmp_olen > 0) {
366 		bcopy(tmp_cp, &sack, sizeof(sack));
367 		sack.start = ntohl(sack.start);
368 		sack.end = ntohl(sack.end);
369 		if (SEQ_GT(sack.end, sack.start) &&
370 		    SEQ_GT(sack.start, tp->snd_una) &&
371 		    SEQ_GT(sack.start, th->th_ack) &&
372 		    SEQ_LEQ(sack.end, tp->snd_max))
373 			sack_blocks[num_sack_blks++] = sack;
374 		tmp_olen -= TCPOLEN_SACK;
375 		tmp_cp += TCPOLEN_SACK;
376 	}
377 	if (num_sack_blks == 0)
378 		return 0;
379 	/* Bubble sort */
380 	for (i = 0; i < num_sack_blks; i++) {
381 		for (j = i + 1; j < num_sack_blks; j++) {
382 			if (SEQ_GT(sack_blocks[i].start,
383 				   sack_blocks[j].start)){
384 				sack = sack_blocks[i];
385 				sack_blocks[i] = sack_blocks[j];
386 				sack_blocks[j] = sack;
387 			}
388 		}
389 	}
390 	if (TAILQ_EMPTY(&tp->snd_holes))
391 		/*
392 		 * Empty scoreboard. Need to initialize rcv_lastsack (it may be
393 		 * uninitialized or have a bogus value). Scoreboard holes
394 		 * (from the sack blocks received) are created later below (in
395 		 * the logic that adds holes to the tail of the scoreboard).
396 		 */
397 		tp->rcv_lastsack = tp->snd_una;
398 	next_sack_blk = 0;
399 	cur = TAILQ_FIRST(&tp->snd_holes);
400 	/*
401 	 * Since the incoming sack blocks are sorted, we can process them
402 	 * making one sweep of the scoreboard.
403 	 */
404 	while ((next_sack_blk < num_sack_blks) && (cur != NULL)) {
405 		sack = sack_blocks[next_sack_blk];
406 		if (SEQ_LT(tp->rcv_lastsack, sack.start))
407 			/*
408 			 * The sack block acks data to the right of all the holes
409 			 * in the scoreboard. No need to iterate over the
410 			 * scoreboard anymore.
411 			 */
412 			break;
413 		if (SEQ_LEQ(sack.end, cur->start)) {
414 			/*
415 			 * SACKs data before the current hole.
416 			 * Ignore the sack block. Go to the next sack
417 			 * block.
418 			 */
419 			next_sack_blk++;
420 			continue;
421 		}
422 		if (SEQ_GEQ(sack.start, cur->end)) {
423 			/*
424 			 * SACKs data beyond the current hole.
425 			 * Go to the next hole.
426 			 */
427 			cur = TAILQ_NEXT(cur, scblink);
428 			continue;
429 		}
430 		tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
431 		KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
432 			("sackhint bytes rtx >= 0"));
433 		if (SEQ_LEQ(sack.start, cur->start)) {
434 			/* Data acks at least the beginning of hole */
435 			if (SEQ_GEQ(sack.end, cur->end)) {
436 				/* Acks entire hole, so delete hole */
437 				if (tp->sackhint.nexthole == cur)
438 					tp->sackhint.nexthole =
439 						TAILQ_NEXT(cur, scblink);
440 				temp = cur;
441 				cur = TAILQ_NEXT(cur, scblink);
442 				TAILQ_REMOVE(&tp->snd_holes, temp, scblink);
443 				tcp_sackhole_free(tp, temp);
444 				/*
445 				 * The sack block may ack all or part of the next
446 				 * hole too, so continue onto the next hole.
447 				 */
448 				continue;
449 			} else {
450 				/* Move start of hole forward */
451 				cur->start = sack.end;
452 				cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
453 			}
454 		} else {
455 			if (SEQ_GEQ(sack.end, cur->end)) {
456 				/* Move end of hole backward */
457 				cur->end = sack.start;
458 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
459 			} else {
460 				/*
461 				 * ACKs some data in middle of a hole; need to
462 				 * split current hole
463 				 */
464 				temp = tcp_sackhole_alloc(tp, sack.end,
465 							  cur->end);
466 				if (temp != NULL) {
467 					if (SEQ_GT(cur->rxmit, temp->rxmit))
468 						temp->rxmit = cur->rxmit;
469 					TAILQ_INSERT_AFTER(&tp->snd_holes,
470 							   cur, temp, scblink);
471 					cur->end = sack.start;
472 					cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
473 					tp->sackhint.sack_bytes_rexmit +=
474 						(cur->rxmit - cur->start);
475 					cur = temp;
476 				}
477 			}
478 		}
479 		tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
480 		/*
481 		 * Testing sack.end against cur->end tells us whether we're done
482 		 * with the sack block or the sack hole. Accordingly, we advance
483 		 * one or the other.
484 		 */
485 		if (SEQ_GEQ(sack.end, cur->end))
486 			cur = TAILQ_NEXT(cur, scblink);
487 		else
488 			next_sack_blk++;
489 	}
490 	/* Iterated all the holes in the scoreboard. Add new holes. */
491 	for ( ; next_sack_blk < num_sack_blks ; next_sack_blk++) {
492 		sack = sack_blocks[next_sack_blk];
493 		/*
494 		 * The two SEQ_LT() checks here that test rcv_laststart against
495 		 * sack.start and sack.end seem redundant, but they're necessary
496 		 * to deal with overlapping sack blocks.
497 		 */
498 		if (SEQ_LT(tp->rcv_lastsack, sack.start)) {
499 			/* Need to append new hole at end. */
500 			temp = tcp_sackhole_alloc(tp, tp->rcv_lastsack,
501 						  sack.start);
502 			if (temp == NULL)
503 				continue; /* ENOBUFS */
504 			TAILQ_INSERT_TAIL(&tp->snd_holes, temp, scblink);
505 			tp->rcv_lastsack = sack.end;
506 			if (tp->sackhint.nexthole == NULL)
507 				tp->sackhint.nexthole = temp;
508 		}
509 		if (SEQ_LT(tp->rcv_lastsack, sack.end))
510 			tp->rcv_lastsack = sack.end;
511 	}
512 	return (0);
513 }
514 
515 /*
516  * Delete stale (i.e, cumulatively ack'd) holes.  Hole is deleted only if
517  * it is completely acked; otherwise, tcp_sack_option(), called from
518  * tcp_dooptions(), will fix up the hole.
519  */
520 void
521 tcp_del_sackholes(tp, th)
522 	struct tcpcb *tp;
523 	struct tcphdr *th;
524 {
525 	INP_LOCK_ASSERT(tp->t_inpcb);
526 	if (tp->sack_enable && tp->t_state != TCPS_LISTEN) {
527 		/* max because this could be an older ack just arrived */
528 		tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ?
529 			th->th_ack : tp->snd_una;
530 		struct sackhole *cur = TAILQ_FIRST(&tp->snd_holes);
531 		struct sackhole *prev;
532 		while (cur)
533 			if (SEQ_LEQ(cur->end, lastack)) {
534 				prev = cur;
535 				tp->sackhint.sack_bytes_rexmit -=
536 					(cur->rxmit - cur->start);
537 				if (tp->sackhint.nexthole == cur)
538 					tp->sackhint.nexthole =
539 					    TAILQ_NEXT(cur, scblink);
540 				cur = TAILQ_NEXT(cur, scblink);
541 				TAILQ_REMOVE(&tp->snd_holes, prev, scblink);
542 				tcp_sackhole_free(tp, prev);
543 			} else if (SEQ_LT(cur->start, lastack)) {
544 				if (SEQ_LT(cur->rxmit, lastack)) {
545 					tp->sackhint.sack_bytes_rexmit -=
546 					    (cur->rxmit - cur->start);
547 					cur->rxmit = lastack;
548 				} else
549 					tp->sackhint.sack_bytes_rexmit -=
550 					    (lastack - cur->start);
551 				cur->start = lastack;
552 				break;
553 			} else
554 				break;
555 	}
556 }
557 
558 void
559 tcp_free_sackholes(struct tcpcb *tp)
560 {
561 	struct sackhole *q;
562 
563 	INP_LOCK_ASSERT(tp->t_inpcb);
564 	while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) {
565 		TAILQ_REMOVE(&tp->snd_holes, q, scblink);
566 		tcp_sackhole_free(tp, q);
567 	}
568 	tp->sackhint.nexthole = NULL;
569 	tp->sackhint.sack_bytes_rexmit = 0;
570 
571 	KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0"));
572 }
573 
574 /*
575  * Partial ack handling within a sack recovery episode.
576  * Keeping this very simple for now. When a partial ack
577  * is received, force snd_cwnd to a value that will allow
578  * the sender to transmit no more than 2 segments.
579  * If necessary, a better scheme can be adopted at a
580  * later point, but for now, the goal is to prevent the
581  * sender from bursting a large amount of data in the midst
582  * of sack recovery.
583  */
584 void
585 tcp_sack_partialack(tp, th)
586 	struct tcpcb *tp;
587 	struct tcphdr *th;
588 {
589 	int num_segs = 1;
590 
591 	INP_LOCK_ASSERT(tp->t_inpcb);
592 	callout_stop(tp->tt_rexmt);
593 	tp->t_rtttime = 0;
594 	/* send one or 2 segments based on how much new data was acked */
595 	if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2)
596 		num_segs = 2;
597 	tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
598 		(tp->snd_nxt - tp->sack_newdata) +
599 		num_segs * tp->t_maxseg);
600 	if (tp->snd_cwnd > tp->snd_ssthresh)
601 		tp->snd_cwnd = tp->snd_ssthresh;
602 	tp->t_flags |= TF_ACKNOW;
603 	(void) tcp_output(tp);
604 }
605 
606 /*
607  * Debug version of tcp_sack_output() that walks the scoreboard. Used for
608  * now to sanity check the hint.
609  */
610 static struct sackhole *
611 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
612 {
613 	struct sackhole *p;
614 
615 	INP_LOCK_ASSERT(tp->t_inpcb);
616 	*sack_bytes_rexmt = 0;
617 	TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
618 		if (SEQ_LT(p->rxmit, p->end)) {
619 			if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
620 				continue;
621 			}
622 			*sack_bytes_rexmt += (p->rxmit - p->start);
623 			break;
624 		}
625 		*sack_bytes_rexmt += (p->rxmit - p->start);
626 	}
627 	return (p);
628 }
629 
630 /*
631  * Returns the next hole to retransmit and the number of retransmitted bytes
632  * from the scoreboard. We store both the next hole and the number of
633  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
634  * reception). This avoids scoreboard traversals completely.
635  *
636  * The loop here will traverse *at most* one link. Here's the argument.
637  * For the loop to traverse more than 1 link before finding the next hole to
638  * retransmit, we would need to have at least 1 node following the current hint
639  * with (rxmit == end). But, for all holes following the current hint,
640  * (start == rxmit), since we have not yet retransmitted from them. Therefore,
641  * in order to traverse more 1 link in the loop below, we need to have at least
642  * one node following the current hint with (start == rxmit == end).
643  * But that can't happen, (start == end) means that all the data in that hole
644  * has been sacked, in which case, the hole would have been removed from the
645  * scoreboard.
646  */
647 struct sackhole *
648 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
649 {
650 	struct sackhole *hole = NULL, *dbg_hole = NULL;
651 	int dbg_bytes_rexmt;
652 
653 	INP_LOCK_ASSERT(tp->t_inpcb);
654 	dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt);
655 	*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
656 	hole = tp->sackhint.nexthole;
657 	if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
658 		goto out;
659 	while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
660 		if (SEQ_LT(hole->rxmit, hole->end)) {
661 			tp->sackhint.nexthole = hole;
662 			break;
663 		}
664 	}
665 out:
666 	if (dbg_hole != hole) {
667 		printf("%s: Computed sack hole not the same as cached value\n", __func__);
668 		hole = dbg_hole;
669 	}
670 	if (*sack_bytes_rexmt != dbg_bytes_rexmt) {
671 		printf("%s: Computed sack_bytes_retransmitted (%d) not"
672 		       "the same as cached value (%d)\n",
673 		       __func__, dbg_bytes_rexmt, *sack_bytes_rexmt);
674 		*sack_bytes_rexmt = dbg_bytes_rexmt;
675 	}
676 	return (hole);
677 }
678 
679 /*
680  * After a timeout, the SACK list may be rebuilt.  This SACK information
681  * should be used to avoid retransmitting SACKed data.  This function
682  * traverses the SACK list to see if snd_nxt should be moved forward.
683  */
684 void
685 tcp_sack_adjust(struct tcpcb *tp)
686 {
687 	struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
688 
689 	INP_LOCK_ASSERT(tp->t_inpcb);
690 	if (cur == NULL)
691 		return; /* No holes */
692 	if (SEQ_GEQ(tp->snd_nxt, tp->rcv_lastsack))
693 		return; /* We're already beyond any SACKed blocks */
694 	/*
695 	 * Two cases for which we want to advance snd_nxt:
696 	 * i) snd_nxt lies between end of one hole and beginning of another
697 	 * ii) snd_nxt lies between end of last hole and rcv_lastsack
698 	 */
699 	while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
700 		if (SEQ_LT(tp->snd_nxt, cur->end))
701 			return;
702 		if (SEQ_GEQ(tp->snd_nxt, p->start))
703 			cur = p;
704 		else {
705 			tp->snd_nxt = p->start;
706 			return;
707 		}
708 	}
709 	if (SEQ_LT(tp->snd_nxt, cur->end))
710 		return;
711 	tp->snd_nxt = tp->rcv_lastsack;
712 	return;
713 }
714