xref: /freebsd/sys/netinet/tcp_sack.c (revision fbe4316a67fa30b786e2cac77d6f6c2b6b5b691c)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_sack.c	8.12 (Berkeley) 5/24/95
30  * $FreeBSD$
31  */
32 
33 /*-
34  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
62  *
63  * NRL grants permission for redistribution and use in source and binary
64  * forms, with or without modification, of the software and documentation
65  * created at NRL provided that the following conditions are met:
66  *
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  * 3. All advertising materials mentioning features or use of this software
73  *    must display the following acknowledgements:
74  *	This product includes software developed by the University of
75  *	California, Berkeley and its contributors.
76  *	This product includes software developed at the Information
77  *	Technology Division, US Naval Research Laboratory.
78  * 4. Neither the name of the NRL nor the names of its contributors
79  *    may be used to endorse or promote products derived from this software
80  *    without specific prior written permission.
81  *
82  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
83  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
85  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
86  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
87  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
88  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
89  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
90  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
91  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
92  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93  *
94  * The views and conclusions contained in the software and documentation
95  * are those of the authors and should not be interpreted as representing
96  * official policies, either expressed or implied, of the US Naval
97  * Research Laboratory (NRL).
98  */
99 #include "opt_inet.h"
100 #include "opt_inet6.h"
101 #include "opt_ipsec.h"
102 #include "opt_tcpdebug.h"
103 #include "opt_tcp_input.h"
104 #include "opt_tcp_sack.h"
105 
106 #include <sys/param.h>
107 #include <sys/systm.h>
108 #include <sys/kernel.h>
109 #include <sys/sysctl.h>
110 #include <sys/malloc.h>
111 #include <sys/mbuf.h>
112 #include <sys/proc.h>		/* for proc0 declaration */
113 #include <sys/protosw.h>
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/syslog.h>
117 #include <sys/systm.h>
118 
119 #include <machine/cpu.h>	/* before tcp_seq.h, for tcp_random18() */
120 
121 #include <vm/uma.h>
122 
123 #include <net/if.h>
124 #include <net/route.h>
125 
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #include <netinet/ip.h>
129 #include <netinet/ip_icmp.h>	/* for ICMP_BANDLIM		*/
130 #include <netinet/in_var.h>
131 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM		*/
132 #include <netinet/in_pcb.h>
133 #include <netinet/ip_var.h>
134 #include <netinet/ip6.h>
135 #include <netinet/icmp6.h>
136 #include <netinet6/nd6.h>
137 #include <netinet6/ip6_var.h>
138 #include <netinet6/in6_pcb.h>
139 #include <netinet/tcp.h>
140 #include <netinet/tcp_fsm.h>
141 #include <netinet/tcp_seq.h>
142 #include <netinet/tcp_timer.h>
143 #include <netinet/tcp_var.h>
144 #include <netinet6/tcp6_var.h>
145 #include <netinet/tcpip.h>
146 #ifdef TCPDEBUG
147 #include <netinet/tcp_debug.h>
148 #endif /* TCPDEBUG */
149 
150 #ifdef FAST_IPSEC
151 #include <netipsec/ipsec.h>
152 #include <netipsec/ipsec6.h>
153 #endif
154 
155 #ifdef IPSEC
156 #include <netinet6/ipsec.h>
157 #include <netinet6/ipsec6.h>
158 #include <netkey/key.h>
159 #endif /*IPSEC*/
160 #include <machine/in_cksum.h>
161 
162 extern struct uma_zone *sack_hole_zone;
163 
164 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
165 int tcp_do_sack = 1;
166 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW,
167 	&tcp_do_sack, 0, "Enable/Disable TCP SACK support");
168 TUNABLE_INT("net.inet.tcp.sack.enable", &tcp_do_sack);
169 
170 static int tcp_sack_maxholes = 128;
171 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_RW,
172 	&tcp_sack_maxholes, 0,
173     "Maximum number of TCP SACK holes allowed per connection");
174 
175 static int tcp_sack_globalmaxholes = 65536;
176 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_RW,
177 	&tcp_sack_globalmaxholes, 0,
178     "Global maximum number of TCP SACK holes");
179 
180 static int tcp_sack_globalholes = 0;
181 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_RD,
182     &tcp_sack_globalholes, 0,
183     "Global number of TCP SACK holes currently allocated");
184 
185 /*
186  * This function is called upon receipt of new valid data (while not in header
187  * prediction mode), and it updates the ordered list of sacks.
188  */
189 void
190 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
191 {
192 	/*
193 	 * First reported block MUST be the most recent one.  Subsequent
194 	 * blocks SHOULD be in the order in which they arrived at the
195 	 * receiver.  These two conditions make the implementation fully
196 	 * compliant with RFC 2018.
197 	 */
198 	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
199 	int num_head, num_saved, i;
200 
201 	INP_LOCK_ASSERT(tp->t_inpcb);
202 
203 	/* Check arguments */
204 	KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
205 
206 	/* SACK block for the received segment. */
207 	head_blk.start = rcv_start;
208 	head_blk.end = rcv_end;
209 
210 	/*
211 	 * Merge updated SACK blocks into head_blk, and
212 	 * save unchanged SACK blocks into saved_blks[].
213 	 * num_saved will have the number of the saved SACK blocks.
214 	 */
215 	num_saved = 0;
216 	for (i = 0; i < tp->rcv_numsacks; i++) {
217 		tcp_seq start = tp->sackblks[i].start;
218 		tcp_seq end = tp->sackblks[i].end;
219 		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
220 			/*
221 			 * Discard this SACK block.
222 			 */
223 		} else if (SEQ_LEQ(head_blk.start, end) &&
224 			   SEQ_GEQ(head_blk.end, start)) {
225 			/*
226 			 * Merge this SACK block into head_blk.
227 			 * This SACK block itself will be discarded.
228 			 */
229 			if (SEQ_GT(head_blk.start, start))
230 				head_blk.start = start;
231 			if (SEQ_LT(head_blk.end, end))
232 				head_blk.end = end;
233 		} else {
234 			/*
235 			 * Save this SACK block.
236 			 */
237 			saved_blks[num_saved].start = start;
238 			saved_blks[num_saved].end = end;
239 			num_saved++;
240 		}
241 	}
242 
243 	/*
244 	 * Update SACK list in tp->sackblks[].
245 	 */
246 	num_head = 0;
247 	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
248 		/*
249 		 * The received data segment is an out-of-order segment.
250 		 * Put head_blk at the top of SACK list.
251 		 */
252 		tp->sackblks[0] = head_blk;
253 		num_head = 1;
254 		/*
255 		 * If the number of saved SACK blocks exceeds its limit,
256 		 * discard the last SACK block.
257 		 */
258 		if (num_saved >= MAX_SACK_BLKS)
259 			num_saved--;
260 	}
261 	if (num_saved > 0) {
262 		/*
263 		 * Copy the saved SACK blocks back.
264 		 */
265 		bcopy(saved_blks, &tp->sackblks[num_head],
266 		      sizeof(struct sackblk) * num_saved);
267 	}
268 
269 	/* Save the number of SACK blocks. */
270 	tp->rcv_numsacks = num_head + num_saved;
271 }
272 
273 /*
274  * Delete all receiver-side SACK information.
275  */
276 void
277 tcp_clean_sackreport(tp)
278 	struct tcpcb *tp;
279 {
280 	int i;
281 
282 	INP_LOCK_ASSERT(tp->t_inpcb);
283 	tp->rcv_numsacks = 0;
284 	for (i = 0; i < MAX_SACK_BLKS; i++)
285 		tp->sackblks[i].start = tp->sackblks[i].end=0;
286 }
287 
288 /*
289  * Allocate struct sackhole.
290  */
291 static struct sackhole *
292 tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
293 {
294 	struct sackhole *hole;
295 
296 	if (tp->snd_numholes >= tcp_sack_maxholes ||
297 	    tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
298 		tcpstat.tcps_sack_sboverflow++;
299 		return NULL;
300 	}
301 
302 	hole = (struct sackhole *)uma_zalloc(sack_hole_zone, M_NOWAIT);
303 	if (hole == NULL)
304 		return NULL;
305 
306 	hole->start = start;
307 	hole->end = end;
308 	hole->rxmit = start;
309 
310 	tp->snd_numholes++;
311 	tcp_sack_globalholes++;
312 
313 	return hole;
314 }
315 
316 /*
317  * Free struct sackhole.
318  */
319 static void
320 tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
321 {
322 	uma_zfree(sack_hole_zone, hole);
323 
324 	tp->snd_numholes--;
325 	tcp_sack_globalholes--;
326 
327 	KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
328 	KASSERT(tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
329 }
330 
331 /*
332  * Process the TCP SACK option.  Returns 1 if tcp_dooptions() should continue,
333  * and 0 otherwise, if the option was fine.  tp->snd_holes is an ordered list
334  * of holes (oldest to newest, in terms of the sequence space).
335  */
336 int
337 tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen)
338 {
339 	int tmp_olen;
340 	u_char *tmp_cp;
341 	struct sackhole *cur, *temp;
342 
343 	INP_LOCK_ASSERT(tp->t_inpcb);
344 	if (!tp->sack_enable)
345 		return (1);
346 	if ((th->th_flags & TH_ACK) == 0)
347 		return (1);
348 	/* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */
349 	if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
350 		return (1);
351 	/* If ack is outside [snd_una, snd_max], ignore the SACK options */
352 	if (SEQ_LT(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max))
353 		return (1);
354 	tmp_cp = cp + 2;
355 	tmp_olen = optlen - 2;
356 	tcpstat.tcps_sack_rcv_blocks++;
357 	if (tp->t_maxseg == 0)
358 		panic("tcp_sack_option"); /* Should never happen */
359 	while (tmp_olen > 0) {
360 		struct sackblk sack;
361 
362 		bcopy(tmp_cp, (char *) &(sack.start), sizeof(tcp_seq));
363 		sack.start = ntohl(sack.start);
364 		bcopy(tmp_cp + sizeof(tcp_seq),
365 		    (char *) &(sack.end), sizeof(tcp_seq));
366 		sack.end = ntohl(sack.end);
367 		tmp_olen -= TCPOLEN_SACK;
368 		tmp_cp += TCPOLEN_SACK;
369 		if (SEQ_LEQ(sack.end, sack.start))
370 			continue; /* bad SACK fields */
371 		if (SEQ_LEQ(sack.end, tp->snd_una))
372 			continue; /* old block */
373 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
374 			if (SEQ_LT(sack.start, th->th_ack))
375 				continue;
376 		}
377 		if (SEQ_GT(sack.end, tp->snd_max))
378 			continue;
379 		if (TAILQ_EMPTY(&tp->snd_holes)) { /* first hole */
380 			cur = tcp_sackhole_alloc(tp, th->th_ack, sack.start);
381 			if (cur == NULL) {
382 				/* ENOBUFS, so ignore SACKed block for now*/
383 				continue;
384 			}
385 			TAILQ_INSERT_HEAD(&tp->snd_holes, cur, scblink);
386 			tp->rcv_lastsack = sack.end;
387 			/* Update the sack scoreboard "cache" */
388 			tp->sackhint.nexthole = cur;
389 			continue; /* with next sack block */
390 		}
391 		/* Go thru list of holes. */
392 		cur = TAILQ_FIRST(&tp->snd_holes);
393 		while (cur) {
394 			if (SEQ_LEQ(sack.end, cur->start))
395 				/* SACKs data before the current hole */
396 				break; /* no use going through more holes */
397 			if (SEQ_GEQ(sack.start, cur->end)) {
398 				/* SACKs data beyond the current hole */
399 				cur = TAILQ_NEXT(cur, scblink);
400 				continue;
401 			}
402 			tp->sackhint.sack_bytes_rexmit -=
403 				(cur->rxmit - cur->start);
404 			KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
405 				("sackhint bytes rtx >= 0"));
406 			if (SEQ_LEQ(sack.start, cur->start)) {
407 				/* Data acks at least the beginning of hole */
408 				if (SEQ_GEQ(sack.end, cur->end)) {
409 					/* Acks entire hole, so delete hole */
410 					if (tp->sackhint.nexthole == cur)
411 						tp->sackhint.nexthole =
412 						    TAILQ_NEXT(cur, scblink);
413 					temp = cur;
414 					cur = TAILQ_NEXT(cur, scblink);
415 					TAILQ_REMOVE(&tp->snd_holes,
416 						temp, scblink);
417 					tcp_sackhole_free(tp, temp);
418 					continue;
419 				} else {
420 					/* Move start of hole forward */
421 					cur->start = sack.end;
422 					cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
423 				}
424 			} else if (SEQ_GEQ(sack.end, cur->end)) {
425 				/* Move end of hole backward */
426 				cur->end = sack.start;
427 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
428 			} else {
429 				/*
430 				 * ACKs some data in middle of a hole; need to
431 				 * split current hole
432 				 */
433 				temp = tcp_sackhole_alloc(tp, sack.end,
434 							  cur->end);
435 				if (temp != NULL) {
436 					if (SEQ_GT(cur->rxmit, temp->rxmit))
437 						temp->rxmit = cur->rxmit;
438 					TAILQ_INSERT_AFTER(&tp->snd_holes,
439 							   cur, temp, scblink);
440 					cur->end = sack.start;
441 					cur->rxmit = SEQ_MIN(cur->rxmit,
442 						cur->end);
443 					tp->sackhint.sack_bytes_rexmit +=
444 						(cur->rxmit - cur->start);
445 					cur = temp;
446 				}
447 			}
448 			tp->sackhint.sack_bytes_rexmit +=
449 			    (cur->rxmit - cur->start);
450 			cur = TAILQ_NEXT(cur, scblink);
451 		}
452 		/* At this point, we have iterated the whole scoreboard. */
453 		if (SEQ_LT(tp->rcv_lastsack, sack.start)) {
454 			/* Need to append new hole at end. */
455 			temp = tcp_sackhole_alloc(tp, tp->rcv_lastsack,
456 						  sack.start);
457 			if (temp == NULL)
458 				continue; /* ENOBUFS */
459 			TAILQ_INSERT_TAIL(&tp->snd_holes, temp, scblink);
460 			tp->rcv_lastsack = sack.end;
461 			if (tp->sackhint.nexthole == NULL)
462 				tp->sackhint.nexthole = temp;
463 		}
464 		if (SEQ_LT(tp->rcv_lastsack, sack.end))
465 			tp->rcv_lastsack = sack.end;
466 	}
467 	return (0);
468 }
469 
470 /*
471  * Delete stale (i.e, cumulatively ack'd) holes.  Hole is deleted only if
472  * it is completely acked; otherwise, tcp_sack_option(), called from
473  * tcp_dooptions(), will fix up the hole.
474  */
475 void
476 tcp_del_sackholes(tp, th)
477 	struct tcpcb *tp;
478 	struct tcphdr *th;
479 {
480 	INP_LOCK_ASSERT(tp->t_inpcb);
481 	if (tp->sack_enable && tp->t_state != TCPS_LISTEN) {
482 		/* max because this could be an older ack just arrived */
483 		tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ?
484 			th->th_ack : tp->snd_una;
485 		struct sackhole *cur = TAILQ_FIRST(&tp->snd_holes);
486 		struct sackhole *prev;
487 		while (cur)
488 			if (SEQ_LEQ(cur->end, lastack)) {
489 				prev = cur;
490 				tp->sackhint.sack_bytes_rexmit -=
491 					(cur->rxmit - cur->start);
492 				if (tp->sackhint.nexthole == cur)
493 					tp->sackhint.nexthole =
494 					    TAILQ_NEXT(cur, scblink);
495 				cur = TAILQ_NEXT(cur, scblink);
496 				TAILQ_REMOVE(&tp->snd_holes, prev, scblink);
497 				tcp_sackhole_free(tp, prev);
498 			} else if (SEQ_LT(cur->start, lastack)) {
499 				if (SEQ_LT(cur->rxmit, lastack)) {
500 					tp->sackhint.sack_bytes_rexmit -=
501 					    (cur->rxmit - cur->start);
502 					cur->rxmit = lastack;
503 				} else
504 					tp->sackhint.sack_bytes_rexmit -=
505 					    (lastack - cur->start);
506 				cur->start = lastack;
507 				break;
508 			} else
509 				break;
510 	}
511 }
512 
513 void
514 tcp_free_sackholes(struct tcpcb *tp)
515 {
516 	struct sackhole *q;
517 
518 	INP_LOCK_ASSERT(tp->t_inpcb);
519 	while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) {
520 		TAILQ_REMOVE(&tp->snd_holes, q, scblink);
521 		tcp_sackhole_free(tp, q);
522 	}
523 	tp->sackhint.nexthole = NULL;
524 	tp->sackhint.sack_bytes_rexmit = 0;
525 
526 	KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0"));
527 }
528 
529 /*
530  * Partial ack handling within a sack recovery episode.
531  * Keeping this very simple for now. When a partial ack
532  * is received, force snd_cwnd to a value that will allow
533  * the sender to transmit no more than 2 segments.
534  * If necessary, a better scheme can be adopted at a
535  * later point, but for now, the goal is to prevent the
536  * sender from bursting a large amount of data in the midst
537  * of sack recovery.
538  */
539 void
540 tcp_sack_partialack(tp, th)
541 	struct tcpcb *tp;
542 	struct tcphdr *th;
543 {
544 	int num_segs = 1;
545 
546 	INP_LOCK_ASSERT(tp->t_inpcb);
547 	callout_stop(tp->tt_rexmt);
548 	tp->t_rtttime = 0;
549 	/* send one or 2 segments based on how much new data was acked */
550 	if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2)
551 		num_segs = 2;
552 	tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
553 		(tp->snd_nxt - tp->sack_newdata) +
554 		num_segs * tp->t_maxseg);
555 	if (tp->snd_cwnd > tp->snd_ssthresh)
556 		tp->snd_cwnd = tp->snd_ssthresh;
557 	tp->t_flags |= TF_ACKNOW;
558 	(void) tcp_output(tp);
559 }
560 
561 /*
562  * Debug version of tcp_sack_output() that walks the scoreboard. Used for
563  * now to sanity check the hint.
564  */
565 static struct sackhole *
566 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
567 {
568 	struct sackhole *p;
569 
570 	INP_LOCK_ASSERT(tp->t_inpcb);
571 	*sack_bytes_rexmt = 0;
572 	TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
573 		if (SEQ_LT(p->rxmit, p->end)) {
574 			if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
575 				continue;
576 			}
577 			*sack_bytes_rexmt += (p->rxmit - p->start);
578 			break;
579 		}
580 		*sack_bytes_rexmt += (p->rxmit - p->start);
581 	}
582 	return (p);
583 }
584 
585 /*
586  * Returns the next hole to retransmit and the number of retransmitted bytes
587  * from the scoreboard. We store both the next hole and the number of
588  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
589  * reception). This avoids scoreboard traversals completely.
590  *
591  * The loop here will traverse *at most* one link. Here's the argument.
592  * For the loop to traverse more than 1 link before finding the next hole to
593  * retransmit, we would need to have at least 1 node following the current hint
594  * with (rxmit == end). But, for all holes following the current hint,
595  * (start == rxmit), since we have not yet retransmitted from them. Therefore,
596  * in order to traverse more 1 link in the loop below, we need to have at least
597  * one node following the current hint with (start == rxmit == end).
598  * But that can't happen, (start == end) means that all the data in that hole
599  * has been sacked, in which case, the hole would have been removed from the
600  * scoreboard.
601  */
602 struct sackhole *
603 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
604 {
605 	struct sackhole *hole = NULL, *dbg_hole = NULL;
606 	int dbg_bytes_rexmt;
607 
608 	INP_LOCK_ASSERT(tp->t_inpcb);
609 	dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt);
610 	*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
611 	hole = tp->sackhint.nexthole;
612 	if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
613 		goto out;
614 	while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
615 		if (SEQ_LT(hole->rxmit, hole->end)) {
616 			tp->sackhint.nexthole = hole;
617 			break;
618 		}
619 	}
620 out:
621 	if (dbg_hole != hole) {
622 		printf("%s: Computed sack hole not the same as cached value\n", __func__);
623 		hole = dbg_hole;
624 	}
625 	if (*sack_bytes_rexmt != dbg_bytes_rexmt) {
626 		printf("%s: Computed sack_bytes_retransmitted (%d) not"
627 		       "the same as cached value (%d)\n",
628 		       __func__, dbg_bytes_rexmt, *sack_bytes_rexmt);
629 		*sack_bytes_rexmt = dbg_bytes_rexmt;
630 	}
631 	return (hole);
632 }
633 
634 /*
635  * After a timeout, the SACK list may be rebuilt.  This SACK information
636  * should be used to avoid retransmitting SACKed data.  This function
637  * traverses the SACK list to see if snd_nxt should be moved forward.
638  */
639 void
640 tcp_sack_adjust(struct tcpcb *tp)
641 {
642 	struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
643 
644 	INP_LOCK_ASSERT(tp->t_inpcb);
645 	if (cur == NULL)
646 		return; /* No holes */
647 	if (SEQ_GEQ(tp->snd_nxt, tp->rcv_lastsack))
648 		return; /* We're already beyond any SACKed blocks */
649 	/*
650 	 * Two cases for which we want to advance snd_nxt:
651 	 * i) snd_nxt lies between end of one hole and beginning of another
652 	 * ii) snd_nxt lies between end of last hole and rcv_lastsack
653 	 */
654 	while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
655 		if (SEQ_LT(tp->snd_nxt, cur->end))
656 			return;
657 		if (SEQ_GEQ(tp->snd_nxt, p->start))
658 			cur = p;
659 		else {
660 			tp->snd_nxt = p->start;
661 			return;
662 		}
663 	}
664 	if (SEQ_LT(tp->snd_nxt, cur->end))
665 		return;
666 	tp->snd_nxt = tp->rcv_lastsack;
667 	return;
668 }
669