1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_sack.c 8.12 (Berkeley) 5/24/95 30 * $FreeBSD$ 31 */ 32 33 /*- 34 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @@(#)COPYRIGHT 1.1 (NRL) 17 January 1995 62 * 63 * NRL grants permission for redistribution and use in source and binary 64 * forms, with or without modification, of the software and documentation 65 * created at NRL provided that the following conditions are met: 66 * 67 * 1. Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * 2. Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in the 71 * documentation and/or other materials provided with the distribution. 72 * 3. All advertising materials mentioning features or use of this software 73 * must display the following acknowledgements: 74 * This product includes software developed by the University of 75 * California, Berkeley and its contributors. 76 * This product includes software developed at the Information 77 * Technology Division, US Naval Research Laboratory. 78 * 4. Neither the name of the NRL nor the names of its contributors 79 * may be used to endorse or promote products derived from this software 80 * without specific prior written permission. 81 * 82 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS 83 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 85 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR 86 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 87 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 88 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 89 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 90 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 91 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 92 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 93 * 94 * The views and conclusions contained in the software and documentation 95 * are those of the authors and should not be interpreted as representing 96 * official policies, either expressed or implied, of the US Naval 97 * Research Laboratory (NRL). 98 */ 99 #include "opt_inet.h" 100 #include "opt_inet6.h" 101 #include "opt_ipsec.h" 102 #include "opt_tcpdebug.h" 103 #include "opt_tcp_input.h" 104 #include "opt_tcp_sack.h" 105 106 #include <sys/param.h> 107 #include <sys/systm.h> 108 #include <sys/kernel.h> 109 #include <sys/sysctl.h> 110 #include <sys/malloc.h> 111 #include <sys/mbuf.h> 112 #include <sys/proc.h> /* for proc0 declaration */ 113 #include <sys/protosw.h> 114 #include <sys/socket.h> 115 #include <sys/socketvar.h> 116 #include <sys/syslog.h> 117 #include <sys/systm.h> 118 119 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 120 121 #include <vm/uma.h> 122 123 #include <net/if.h> 124 #include <net/route.h> 125 126 #include <netinet/in.h> 127 #include <netinet/in_systm.h> 128 #include <netinet/ip.h> 129 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */ 130 #include <netinet/in_var.h> 131 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 132 #include <netinet/in_pcb.h> 133 #include <netinet/ip_var.h> 134 #include <netinet/ip6.h> 135 #include <netinet/icmp6.h> 136 #include <netinet6/nd6.h> 137 #include <netinet6/ip6_var.h> 138 #include <netinet6/in6_pcb.h> 139 #include <netinet/tcp.h> 140 #include <netinet/tcp_fsm.h> 141 #include <netinet/tcp_seq.h> 142 #include <netinet/tcp_timer.h> 143 #include <netinet/tcp_var.h> 144 #include <netinet6/tcp6_var.h> 145 #include <netinet/tcpip.h> 146 #ifdef TCPDEBUG 147 #include <netinet/tcp_debug.h> 148 #endif /* TCPDEBUG */ 149 150 #ifdef FAST_IPSEC 151 #include <netipsec/ipsec.h> 152 #include <netipsec/ipsec6.h> 153 #endif 154 155 #ifdef IPSEC 156 #include <netinet6/ipsec.h> 157 #include <netinet6/ipsec6.h> 158 #include <netkey/key.h> 159 #endif /*IPSEC*/ 160 #include <machine/in_cksum.h> 161 162 extern struct uma_zone *sack_hole_zone; 163 164 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK"); 165 int tcp_do_sack = 1; 166 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW, 167 &tcp_do_sack, 0, "Enable/Disable TCP SACK support"); 168 TUNABLE_INT("net.inet.tcp.sack.enable", &tcp_do_sack); 169 170 static int tcp_sack_maxholes = 128; 171 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_RW, 172 &tcp_sack_maxholes, 0, 173 "Maximum number of TCP SACK holes allowed per connection"); 174 175 static int tcp_sack_globalmaxholes = 65536; 176 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_RW, 177 &tcp_sack_globalmaxholes, 0, 178 "Global maximum number of TCP SACK holes"); 179 180 static int tcp_sack_globalholes = 0; 181 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_RD, 182 &tcp_sack_globalholes, 0, 183 "Global number of TCP SACK holes currently allocated"); 184 185 /* 186 * This function is called upon receipt of new valid data (while not in header 187 * prediction mode), and it updates the ordered list of sacks. 188 */ 189 void 190 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) 191 { 192 /* 193 * First reported block MUST be the most recent one. Subsequent 194 * blocks SHOULD be in the order in which they arrived at the 195 * receiver. These two conditions make the implementation fully 196 * compliant with RFC 2018. 197 */ 198 struct sackblk head_blk, saved_blks[MAX_SACK_BLKS]; 199 int num_head, num_saved, i; 200 201 INP_LOCK_ASSERT(tp->t_inpcb); 202 203 /* Check arguments */ 204 KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end")); 205 206 /* SACK block for the received segment. */ 207 head_blk.start = rcv_start; 208 head_blk.end = rcv_end; 209 210 /* 211 * Merge updated SACK blocks into head_blk, and 212 * save unchanged SACK blocks into saved_blks[]. 213 * num_saved will have the number of the saved SACK blocks. 214 */ 215 num_saved = 0; 216 for (i = 0; i < tp->rcv_numsacks; i++) { 217 tcp_seq start = tp->sackblks[i].start; 218 tcp_seq end = tp->sackblks[i].end; 219 if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) { 220 /* 221 * Discard this SACK block. 222 */ 223 } else if (SEQ_LEQ(head_blk.start, end) && 224 SEQ_GEQ(head_blk.end, start)) { 225 /* 226 * Merge this SACK block into head_blk. 227 * This SACK block itself will be discarded. 228 */ 229 if (SEQ_GT(head_blk.start, start)) 230 head_blk.start = start; 231 if (SEQ_LT(head_blk.end, end)) 232 head_blk.end = end; 233 } else { 234 /* 235 * Save this SACK block. 236 */ 237 saved_blks[num_saved].start = start; 238 saved_blks[num_saved].end = end; 239 num_saved++; 240 } 241 } 242 243 /* 244 * Update SACK list in tp->sackblks[]. 245 */ 246 num_head = 0; 247 if (SEQ_GT(head_blk.start, tp->rcv_nxt)) { 248 /* 249 * The received data segment is an out-of-order segment. 250 * Put head_blk at the top of SACK list. 251 */ 252 tp->sackblks[0] = head_blk; 253 num_head = 1; 254 /* 255 * If the number of saved SACK blocks exceeds its limit, 256 * discard the last SACK block. 257 */ 258 if (num_saved >= MAX_SACK_BLKS) 259 num_saved--; 260 } 261 if (num_saved > 0) { 262 /* 263 * Copy the saved SACK blocks back. 264 */ 265 bcopy(saved_blks, &tp->sackblks[num_head], 266 sizeof(struct sackblk) * num_saved); 267 } 268 269 /* Save the number of SACK blocks. */ 270 tp->rcv_numsacks = num_head + num_saved; 271 } 272 273 /* 274 * Delete all receiver-side SACK information. 275 */ 276 void 277 tcp_clean_sackreport(tp) 278 struct tcpcb *tp; 279 { 280 int i; 281 282 INP_LOCK_ASSERT(tp->t_inpcb); 283 tp->rcv_numsacks = 0; 284 for (i = 0; i < MAX_SACK_BLKS; i++) 285 tp->sackblks[i].start = tp->sackblks[i].end=0; 286 } 287 288 /* 289 * Process the TCP SACK option. Returns 1 if tcp_dooptions() should continue, 290 * and 0 otherwise, if the option was fine. tp->snd_holes is an ordered list 291 * of holes (oldest to newest, in terms of the sequence space). 292 */ 293 int 294 tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen) 295 { 296 int tmp_olen; 297 u_char *tmp_cp; 298 struct sackhole *cur, *p, *temp; 299 300 INP_LOCK_ASSERT(tp->t_inpcb); 301 if (!tp->sack_enable) 302 return (1); 303 if ((th->th_flags & TH_ACK) == 0) 304 return (1); 305 /* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */ 306 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 307 return (1); 308 /* If ack is outside [snd_una, snd_max], ignore the SACK options */ 309 if (SEQ_LT(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max)) 310 return (1); 311 tmp_cp = cp + 2; 312 tmp_olen = optlen - 2; 313 tcpstat.tcps_sack_rcv_blocks++; 314 if (tp->snd_numholes < 0) /* XXX panic? */ 315 tp->snd_numholes = 0; 316 if (tp->t_maxseg == 0) 317 panic("tcp_sack_option"); /* Should never happen */ 318 while (tmp_olen > 0) { 319 struct sackblk sack; 320 321 bcopy(tmp_cp, (char *) &(sack.start), sizeof(tcp_seq)); 322 sack.start = ntohl(sack.start); 323 bcopy(tmp_cp + sizeof(tcp_seq), 324 (char *) &(sack.end), sizeof(tcp_seq)); 325 sack.end = ntohl(sack.end); 326 tmp_olen -= TCPOLEN_SACK; 327 tmp_cp += TCPOLEN_SACK; 328 if (SEQ_LEQ(sack.end, sack.start)) 329 continue; /* bad SACK fields */ 330 if (SEQ_LEQ(sack.end, tp->snd_una)) 331 continue; /* old block */ 332 if (SEQ_GT(th->th_ack, tp->snd_una)) { 333 if (SEQ_LT(sack.start, th->th_ack)) 334 continue; 335 } 336 if (SEQ_GT(sack.end, tp->snd_max)) 337 continue; 338 if (tp->snd_holes == NULL) { /* first hole */ 339 if (tcp_sack_globalholes >= tcp_sack_globalmaxholes || 340 tcp_sack_maxholes == 0) { 341 tcpstat.tcps_sack_sboverflow++; 342 continue; 343 } 344 tp->snd_holes = (struct sackhole *) 345 uma_zalloc(sack_hole_zone,M_NOWAIT); 346 if (tp->snd_holes == NULL) { 347 /* ENOBUFS, so ignore SACKed block for now*/ 348 continue; 349 } 350 cur = tp->snd_holes; 351 cur->start = th->th_ack; 352 cur->end = sack.start; 353 cur->rxmit = cur->start; 354 cur->next = NULL; 355 tp->snd_numholes = 1; 356 tcp_sack_globalholes++; 357 tp->rcv_lastsack = sack.end; 358 continue; /* with next sack block */ 359 } 360 /* Go thru list of holes: p = previous, cur = current */ 361 p = cur = tp->snd_holes; 362 while (cur) { 363 if (SEQ_LEQ(sack.end, cur->start)) 364 /* SACKs data before the current hole */ 365 break; /* no use going through more holes */ 366 if (SEQ_GEQ(sack.start, cur->end)) { 367 /* SACKs data beyond the current hole */ 368 p = cur; 369 cur = cur->next; 370 continue; 371 } 372 if (SEQ_LEQ(sack.start, cur->start)) { 373 /* Data acks at least the beginning of hole */ 374 if (SEQ_GEQ(sack.end, cur->end)) { 375 /* Acks entire hole, so delete hole */ 376 if (p != cur) { 377 p->next = cur->next; 378 uma_zfree(sack_hole_zone, cur); 379 cur = p->next; 380 } else { 381 cur = cur->next; 382 uma_zfree(sack_hole_zone, p); 383 p = cur; 384 tp->snd_holes = p; 385 } 386 tp->snd_numholes--; 387 tcp_sack_globalholes--; 388 continue; 389 } 390 /* otherwise, move start of hole forward */ 391 cur->start = sack.end; 392 cur->rxmit = SEQ_MAX(cur->rxmit, cur->start); 393 p = cur; 394 cur = cur->next; 395 continue; 396 } 397 /* move end of hole backward */ 398 if (SEQ_GEQ(sack.end, cur->end)) { 399 cur->end = sack.start; 400 cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); 401 p = cur; 402 cur = cur->next; 403 continue; 404 } 405 if (SEQ_LT(cur->start, sack.start) && 406 SEQ_GT(cur->end, sack.end)) { 407 /* 408 * ACKs some data in middle of a hole; need to 409 * split current hole 410 */ 411 if (tp->snd_numholes >= tcp_sack_maxholes || 412 tcp_sack_globalholes >= 413 tcp_sack_globalmaxholes) { 414 tcpstat.tcps_sack_sboverflow++; 415 continue; 416 } 417 temp = (struct sackhole *) 418 uma_zalloc(sack_hole_zone,M_NOWAIT); 419 if (temp == NULL) 420 continue; /* ENOBUFS */ 421 temp->next = cur->next; 422 temp->start = sack.end; 423 temp->end = cur->end; 424 temp->rxmit = SEQ_MAX(cur->rxmit, temp->start); 425 cur->end = sack.start; 426 cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); 427 cur->next = temp; 428 p = temp; 429 cur = p->next; 430 tp->snd_numholes++; 431 tcp_sack_globalholes++; 432 } 433 } 434 /* At this point, p points to the last hole on the list */ 435 if (SEQ_LT(tp->rcv_lastsack, sack.start)) { 436 /* 437 * Need to append new hole at end. 438 * Last hole is p (and it's not NULL). 439 */ 440 if (tp->snd_numholes >= tcp_sack_maxholes || 441 tcp_sack_globalholes >= tcp_sack_globalmaxholes) { 442 tcpstat.tcps_sack_sboverflow++; 443 continue; 444 } 445 temp = (struct sackhole *) 446 uma_zalloc(sack_hole_zone,M_NOWAIT); 447 if (temp == NULL) 448 continue; /* ENOBUFS */ 449 temp->start = tp->rcv_lastsack; 450 temp->end = sack.start; 451 temp->rxmit = temp->start; 452 temp->next = 0; 453 p->next = temp; 454 tp->rcv_lastsack = sack.end; 455 tp->snd_numholes++; 456 tcp_sack_globalholes++; 457 } 458 if (SEQ_LT(tp->rcv_lastsack, sack.end)) 459 tp->rcv_lastsack = sack.end; 460 } 461 return (0); 462 } 463 464 /* 465 * Delete stale (i.e, cumulatively ack'd) holes. Hole is deleted only if 466 * it is completely acked; otherwise, tcp_sack_option(), called from 467 * tcp_dooptions(), will fix up the hole. 468 */ 469 void 470 tcp_del_sackholes(tp, th) 471 struct tcpcb *tp; 472 struct tcphdr *th; 473 { 474 INP_LOCK_ASSERT(tp->t_inpcb); 475 if (tp->sack_enable && tp->t_state != TCPS_LISTEN) { 476 /* max because this could be an older ack just arrived */ 477 tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ? 478 th->th_ack : tp->snd_una; 479 struct sackhole *cur = tp->snd_holes; 480 struct sackhole *prev; 481 while (cur) 482 if (SEQ_LEQ(cur->end, lastack)) { 483 prev = cur; 484 cur = cur->next; 485 uma_zfree(sack_hole_zone, prev); 486 tp->snd_numholes--; 487 tcp_sack_globalholes--; 488 } else if (SEQ_LT(cur->start, lastack)) { 489 cur->start = lastack; 490 if (SEQ_LT(cur->rxmit, cur->start)) 491 cur->rxmit = cur->start; 492 break; 493 } else 494 break; 495 tp->snd_holes = cur; 496 } 497 } 498 499 void 500 tcp_free_sackholes(struct tcpcb *tp) 501 { 502 struct sackhole *p, *q; 503 504 INP_LOCK_ASSERT(tp->t_inpcb); 505 q = tp->snd_holes; 506 while (q != NULL) { 507 p = q; 508 q = q->next; 509 uma_zfree(sack_hole_zone, p); 510 tcp_sack_globalholes--; 511 } 512 tp->snd_holes = 0; 513 tp->snd_numholes = 0; 514 } 515 516 /* 517 * Partial ack handling within a sack recovery episode. 518 * Keeping this very simple for now. When a partial ack 519 * is received, force snd_cwnd to a value that will allow 520 * the sender to transmit no more than 2 segments. 521 * If necessary, a better scheme can be adopted at a 522 * later point, but for now, the goal is to prevent the 523 * sender from bursting a large amount of data in the midst 524 * of sack recovery. 525 */ 526 void 527 tcp_sack_partialack(tp, th) 528 struct tcpcb *tp; 529 struct tcphdr *th; 530 { 531 int num_segs = 1; 532 int sack_bytes_rxmt = 0; 533 534 INP_LOCK_ASSERT(tp->t_inpcb); 535 callout_stop(tp->tt_rexmt); 536 tp->t_rtttime = 0; 537 /* send one or 2 segments based on how much new data was acked */ 538 if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2) 539 num_segs = 2; 540 (void)tcp_sack_output(tp, &sack_bytes_rxmt); 541 tp->snd_cwnd = sack_bytes_rxmt + (tp->snd_nxt - tp->sack_newdata) + 542 num_segs * tp->t_maxseg; 543 if (tp->snd_cwnd > tp->snd_ssthresh) 544 tp->snd_cwnd = tp->snd_ssthresh; 545 tp->t_flags |= TF_ACKNOW; 546 (void) tcp_output(tp); 547 } 548 549 #ifdef TCP_SACK_DEBUG 550 void 551 tcp_print_holes(struct tcpcb *tp) 552 { 553 struct sackhole *p = tp->snd_holes; 554 if (p == 0) 555 return; 556 printf("Hole report: start--end dups rxmit\n"); 557 while (p) { 558 printf("%x--%x r %x\n", p->start, p->end, p->rxmit); 559 p = p->next; 560 } 561 printf("\n"); 562 } 563 #endif /* TCP_SACK_DEBUG */ 564 565 /* 566 * Returns pointer to a sackhole if there are any pending retransmissions; 567 * NULL otherwise. 568 */ 569 struct sackhole * 570 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt) 571 { 572 struct sackhole *p = NULL; 573 574 INP_LOCK_ASSERT(tp->t_inpcb); 575 if (!tp->sack_enable) 576 return (NULL); 577 *sack_bytes_rexmt = 0; 578 for (p = tp->snd_holes; p ; p = p->next) { 579 if (SEQ_LT(p->rxmit, p->end)) { 580 if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */ 581 continue; 582 } 583 #ifdef TCP_SACK_DEBUG 584 if (p) 585 tcp_print_holes(tp); 586 #endif 587 *sack_bytes_rexmt += (p->rxmit - p->start); 588 break; 589 } 590 *sack_bytes_rexmt += (p->rxmit - p->start); 591 } 592 return (p); 593 } 594 595 /* 596 * After a timeout, the SACK list may be rebuilt. This SACK information 597 * should be used to avoid retransmitting SACKed data. This function 598 * traverses the SACK list to see if snd_nxt should be moved forward. 599 */ 600 void 601 tcp_sack_adjust(struct tcpcb *tp) 602 { 603 struct sackhole *cur = tp->snd_holes; 604 605 INP_LOCK_ASSERT(tp->t_inpcb); 606 if (cur == NULL) 607 return; /* No holes */ 608 if (SEQ_GEQ(tp->snd_nxt, tp->rcv_lastsack)) 609 return; /* We're already beyond any SACKed blocks */ 610 /* 611 * Two cases for which we want to advance snd_nxt: 612 * i) snd_nxt lies between end of one hole and beginning of another 613 * ii) snd_nxt lies between end of last hole and rcv_lastsack 614 */ 615 while (cur->next) { 616 if (SEQ_LT(tp->snd_nxt, cur->end)) 617 return; 618 if (SEQ_GEQ(tp->snd_nxt, cur->next->start)) 619 cur = cur->next; 620 else { 621 tp->snd_nxt = cur->next->start; 622 return; 623 } 624 } 625 if (SEQ_LT(tp->snd_nxt, cur->end)) 626 return; 627 tp->snd_nxt = tp->rcv_lastsack; 628 return; 629 } 630 631 /* 632 * Calculate the number of SACKed bytes in the scoreboard by 633 * subtracting the amount of data accounted for in sackholes 634 * from the total span of the scoreboard. Also returns the 635 * amount of data that is "lost" and has not yet been retransmitted. 636 */ 637 int 638 tcp_sacked_bytes(struct tcpcb *tp, int *lost_not_rexmitted) 639 { 640 INP_LOCK_ASSERT(tp->t_inpcb); 641 struct sackhole *cur = tp->snd_holes; 642 int sacked = 0; 643 u_long lost = 0; 644 645 if (cur == NULL) /* Scoreboard empty. */ 646 goto out; 647 if (SEQ_GEQ(tp->snd_una, tp->rcv_lastsack)) /* Scoreboard is stale. */ 648 goto out; 649 sacked = tp->rcv_lastsack - cur->start; 650 while (cur) { 651 lost += (cur->end - cur->rxmit); 652 sacked -= (cur->end - cur->start); 653 cur = cur->next; 654 } 655 out: 656 if (lost_not_rexmitted) 657 *lost_not_rexmitted = lost; 658 return (sacked); 659 } 660