1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/systm.h> 31 #include <sys/stream.h> 32 #include <sys/cmn_err.h> 33 #include <sys/kmem.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/socket.h> 37 #include <sys/strsun.h> 38 #include <sys/strsubr.h> 39 40 #include <netinet/in.h> 41 #include <netinet/ip6.h> 42 #include <netinet/tcp_seq.h> 43 #include <netinet/sctp.h> 44 45 #include <inet/common.h> 46 #include <inet/ip.h> 47 #include <inet/ip6.h> 48 #include <inet/mib2.h> 49 #include <inet/ipclassifier.h> 50 #include <inet/ipp_common.h> 51 #include <inet/ipsec_impl.h> 52 #include <inet/sctp_ip.h> 53 54 #include "sctp_impl.h" 55 #include "sctp_asconf.h" 56 #include "sctp_addr.h" 57 58 static struct kmem_cache *sctp_kmem_set_cache; 59 60 /* 61 * PR-SCTP comments. 62 * 63 * When we get a valid Forward TSN chunk, we check the fragment list for this 64 * SSN and preceeding SSNs free all them. Further, if this Forward TSN causes 65 * the next expected SSN to be present in the stream queue, we deliver any 66 * such stranded messages upstream. We also update the SACK info. appropriately. 67 * When checking for advancing the cumulative ack (in sctp_cumack()) we must 68 * check for abandoned chunks and messages. While traversing the tramsmit 69 * list if we come across an abandoned chunk, we can skip the message (i.e. 70 * take it out of the (re)transmit list) since this message, and hence this 71 * chunk, has been marked abandoned by sctp_rexmit(). If we come across an 72 * unsent chunk for a message this now abandoned we need to check if a 73 * Forward TSN needs to be sent, this could be a case where we deferred sending 74 * a Forward TSN in sctp_get_msg_to_send(). Further, after processing a 75 * SACK we check if the Advanced peer ack point can be moved ahead, i.e. 76 * if we can send a Forward TSN via sctp_check_abandoned_data(). 77 */ 78 void 79 sctp_free_set(sctp_set_t *s) 80 { 81 sctp_set_t *p; 82 83 while (s) { 84 p = s->next; 85 kmem_cache_free(sctp_kmem_set_cache, s); 86 s = p; 87 } 88 } 89 90 static void 91 sctp_ack_add(sctp_set_t **head, uint32_t tsn, int *num) 92 { 93 sctp_set_t *p, *t; 94 95 if (head == NULL || num == NULL) 96 return; 97 98 ASSERT(*num >= 0); 99 ASSERT((*num == 0 && *head == NULL) || (*num > 0 && *head != NULL)); 100 101 if (*head == NULL) { 102 *head = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 103 if (*head == NULL) 104 return; 105 (*head)->prev = (*head)->next = NULL; 106 (*head)->begin = tsn; 107 (*head)->end = tsn; 108 *num = 1; 109 return; 110 } 111 112 ASSERT((*head)->prev == NULL); 113 114 /* 115 * Handle this special case here so we don't have to check 116 * for it each time in the loop. 117 */ 118 if (SEQ_LT(tsn + 1, (*head)->begin)) { 119 /* add a new set, and move the head pointer */ 120 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 121 if (t == NULL) 122 return; 123 t->next = *head; 124 t->prev = NULL; 125 (*head)->prev = t; 126 t->begin = tsn; 127 t->end = tsn; 128 (*num)++; 129 *head = t; 130 return; 131 } 132 133 /* 134 * We need to handle the following cases, where p points to 135 * the current set (as we walk through the loop): 136 * 137 * 1. tsn is entirely less than p; create a new set before p. 138 * 2. tsn borders p from less; coalesce p with tsn. 139 * 3. tsn is withing p; do nothing. 140 * 4. tsn borders p from greater; coalesce p with tsn. 141 * 4a. p may now border p->next from less; if so, coalesce those 142 * two sets. 143 * 5. tsn is entirely greater then all sets; add a new set at 144 * the end. 145 */ 146 for (p = *head; ; p = p->next) { 147 if (SEQ_LT(tsn + 1, p->begin)) { 148 /* 1: add a new set before p. */ 149 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 150 if (t == NULL) 151 return; 152 t->next = p; 153 t->prev = NULL; 154 t->begin = tsn; 155 t->end = tsn; 156 if (p->prev) { 157 t->prev = p->prev; 158 p->prev->next = t; 159 } 160 p->prev = t; 161 (*num)++; 162 return; 163 } 164 165 if ((tsn + 1) == p->begin) { 166 /* 2: adjust p->begin */ 167 p->begin = tsn; 168 return; 169 } 170 171 if (SEQ_GEQ(tsn, p->begin) && SEQ_LEQ(tsn, p->end)) { 172 /* 3; do nothing */ 173 return; 174 } 175 176 if ((p->end + 1) == tsn) { 177 /* 4; adjust p->end */ 178 p->end = tsn; 179 180 if (p->next != NULL && (tsn + 1) == p->next->begin) { 181 /* 4a: coalesce p and p->next */ 182 t = p->next; 183 p->end = t->end; 184 p->next = t->next; 185 if (t->next != NULL) 186 t->next->prev = p; 187 kmem_cache_free(sctp_kmem_set_cache, t); 188 (*num)--; 189 } 190 return; 191 } 192 193 if (p->next == NULL) { 194 /* 5: add new set at the end */ 195 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 196 if (t == NULL) 197 return; 198 t->next = NULL; 199 t->prev = p; 200 t->begin = tsn; 201 t->end = tsn; 202 p->next = t; 203 (*num)++; 204 return; 205 } 206 207 if (SEQ_GT(tsn, p->end + 1)) 208 continue; 209 } 210 } 211 212 static void 213 sctp_ack_rem(sctp_set_t **head, uint32_t end, int *num) 214 { 215 sctp_set_t *p, *t; 216 217 if (head == NULL || *head == NULL || num == NULL) 218 return; 219 220 /* Nothing to remove */ 221 if (SEQ_LT(end, (*head)->begin)) 222 return; 223 224 /* Find out where to start removing sets */ 225 for (p = *head; p->next; p = p->next) { 226 if (SEQ_LEQ(end, p->end)) 227 break; 228 } 229 230 if (SEQ_LT(end, p->end) && SEQ_GEQ(end, p->begin)) { 231 /* adjust p */ 232 p->begin = end + 1; 233 /* all done */ 234 if (p == *head) 235 return; 236 } else if (SEQ_GEQ(end, p->end)) { 237 /* remove this set too */ 238 p = p->next; 239 } 240 241 /* unlink everything before this set */ 242 t = *head; 243 *head = p; 244 if (p != NULL && p->prev != NULL) { 245 p->prev->next = NULL; 246 p->prev = NULL; 247 } 248 249 sctp_free_set(t); 250 251 /* recount the number of sets */ 252 *num = 0; 253 254 for (p = *head; p != NULL; p = p->next) 255 (*num)++; 256 } 257 258 void 259 sctp_sets_init() 260 { 261 sctp_kmem_set_cache = kmem_cache_create("sctp_set_cache", 262 sizeof (sctp_set_t), 0, NULL, NULL, NULL, NULL, 263 NULL, 0); 264 } 265 266 void 267 sctp_sets_fini() 268 { 269 kmem_cache_destroy(sctp_kmem_set_cache); 270 } 271 272 sctp_chunk_hdr_t * 273 sctp_first_chunk(uchar_t *rptr, ssize_t remaining) 274 { 275 sctp_chunk_hdr_t *ch; 276 uint16_t ch_len; 277 278 if (remaining < sizeof (*ch)) { 279 return (NULL); 280 } 281 282 ch = (sctp_chunk_hdr_t *)rptr; 283 ch_len = ntohs(ch->sch_len); 284 285 if (ch_len < sizeof (*ch) || remaining < ch_len) { 286 return (NULL); 287 } 288 289 return (ch); 290 } 291 292 sctp_chunk_hdr_t * 293 sctp_next_chunk(sctp_chunk_hdr_t *ch, ssize_t *remaining) 294 { 295 int pad; 296 uint16_t ch_len; 297 298 if (!ch) { 299 return (NULL); 300 } 301 302 ch_len = ntohs(ch->sch_len); 303 304 if ((pad = ch_len & (SCTP_ALIGN - 1)) != 0) { 305 pad = SCTP_ALIGN - pad; 306 } 307 308 *remaining -= (ch_len + pad); 309 ch = (sctp_chunk_hdr_t *)((char *)ch + ch_len + pad); 310 311 return (sctp_first_chunk((uchar_t *)ch, *remaining)); 312 } 313 314 /* 315 * Attach ancillary data to a received SCTP segments. 316 * If the source address (fp) is not the primary, send up a 317 * unitdata_ind so recvfrom() can populate the msg_name field. 318 * If ancillary data is also requested, we append it to the 319 * unitdata_req. Otherwise, we just send up an optdata_ind. 320 */ 321 static int 322 sctp_input_add_ancillary(sctp_t *sctp, mblk_t **mp, sctp_data_hdr_t *dcp, 323 sctp_faddr_t *fp, ip6_pkt_t *ipp) 324 { 325 struct T_unitdata_ind *tudi; 326 int optlen; 327 int hdrlen; 328 uchar_t *optptr; 329 struct cmsghdr *cmsg; 330 mblk_t *mp1; 331 struct sockaddr_in6 sin_buf[1]; 332 struct sockaddr_in6 *sin6; 333 struct sockaddr_in *sin4; 334 uint_t addflag = 0; 335 336 sin4 = NULL; 337 sin6 = NULL; 338 339 optlen = hdrlen = 0; 340 341 /* Figure out address size */ 342 if (sctp->sctp_ipversion == IPV4_VERSION) { 343 sin4 = (struct sockaddr_in *)sin_buf; 344 sin4->sin_family = AF_INET; 345 sin4->sin_port = sctp->sctp_fport; 346 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr); 347 hdrlen = sizeof (*tudi) + sizeof (*sin4); 348 } else { 349 sin6 = sin_buf; 350 sin6->sin6_family = AF_INET6; 351 sin6->sin6_port = sctp->sctp_fport; 352 sin6->sin6_addr = fp->faddr; 353 hdrlen = sizeof (*tudi) + sizeof (*sin6); 354 } 355 356 /* If app asked to receive send / recv info */ 357 if (sctp->sctp_recvsndrcvinfo) { 358 optlen += sizeof (*cmsg) + sizeof (struct sctp_sndrcvinfo); 359 if (hdrlen == 0) 360 hdrlen = sizeof (struct T_optdata_ind); 361 } 362 363 if (sctp->sctp_ipv6_recvancillary == 0) 364 goto noancillary; 365 366 if ((ipp->ipp_fields & IPPF_IFINDEX) && 367 ipp->ipp_ifindex != sctp->sctp_recvifindex && 368 (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVPKTINFO)) { 369 optlen += sizeof (*cmsg) + sizeof (struct in6_pktinfo); 370 if (hdrlen == 0) 371 hdrlen = sizeof (struct T_unitdata_ind); 372 addflag |= SCTP_IPV6_RECVPKTINFO; 373 } 374 /* If app asked for hoplimit and it has changed ... */ 375 if ((ipp->ipp_fields & IPPF_HOPLIMIT) && 376 ipp->ipp_hoplimit != sctp->sctp_recvhops && 377 (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVHOPLIMIT)) { 378 optlen += sizeof (*cmsg) + sizeof (uint_t); 379 if (hdrlen == 0) 380 hdrlen = sizeof (struct T_unitdata_ind); 381 addflag |= SCTP_IPV6_RECVHOPLIMIT; 382 } 383 /* If app asked for hopbyhop headers and it has changed ... */ 384 if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVHOPOPTS) && 385 ip_cmpbuf(sctp->sctp_hopopts, sctp->sctp_hopoptslen, 386 (ipp->ipp_fields & IPPF_HOPOPTS), 387 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 388 optlen += sizeof (*cmsg) + ipp->ipp_hopoptslen - 389 sctp->sctp_v6label_len; 390 if (hdrlen == 0) 391 hdrlen = sizeof (struct T_unitdata_ind); 392 addflag |= SCTP_IPV6_RECVHOPOPTS; 393 if (!ip_allocbuf((void **)&sctp->sctp_hopopts, 394 &sctp->sctp_hopoptslen, 395 (ipp->ipp_fields & IPPF_HOPOPTS), 396 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 397 return (-1); 398 } 399 /* If app asked for dst headers before routing headers ... */ 400 if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVRTDSTOPTS) && 401 ip_cmpbuf(sctp->sctp_rtdstopts, sctp->sctp_rtdstoptslen, 402 (ipp->ipp_fields & IPPF_RTDSTOPTS), 403 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) { 404 optlen += sizeof (*cmsg) + ipp->ipp_rtdstoptslen; 405 if (hdrlen == 0) 406 hdrlen = sizeof (struct T_unitdata_ind); 407 addflag |= SCTP_IPV6_RECVRTDSTOPTS; 408 if (!ip_allocbuf((void **)&sctp->sctp_rtdstopts, 409 &sctp->sctp_rtdstoptslen, 410 (ipp->ipp_fields & IPPF_RTDSTOPTS), 411 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) 412 return (-1); 413 } 414 /* If app asked for routing headers and it has changed ... */ 415 if (sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVRTHDR) { 416 if (ip_cmpbuf(sctp->sctp_rthdr, sctp->sctp_rthdrlen, 417 (ipp->ipp_fields & IPPF_RTHDR), 418 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 419 optlen += sizeof (*cmsg) + ipp->ipp_rthdrlen; 420 if (hdrlen == 0) 421 hdrlen = sizeof (struct T_unitdata_ind); 422 addflag |= SCTP_IPV6_RECVRTHDR; 423 if (!ip_allocbuf((void **)&sctp->sctp_rthdr, 424 &sctp->sctp_rthdrlen, 425 (ipp->ipp_fields & IPPF_RTHDR), 426 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 427 return (-1); 428 } 429 } 430 /* If app asked for dest headers and it has changed ... */ 431 if ((sctp->sctp_ipv6_recvancillary & SCTP_IPV6_RECVDSTOPTS) && 432 ip_cmpbuf(sctp->sctp_dstopts, sctp->sctp_dstoptslen, 433 (ipp->ipp_fields & IPPF_DSTOPTS), 434 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 435 optlen += sizeof (*cmsg) + ipp->ipp_dstoptslen; 436 if (hdrlen == 0) 437 hdrlen = sizeof (struct T_unitdata_ind); 438 addflag |= SCTP_IPV6_RECVDSTOPTS; 439 if (!ip_allocbuf((void **)&sctp->sctp_dstopts, 440 &sctp->sctp_dstoptslen, 441 (ipp->ipp_fields & IPPF_DSTOPTS), 442 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 443 return (-1); 444 } 445 noancillary: 446 /* Nothing to add */ 447 if (hdrlen == 0) 448 return (-1); 449 450 mp1 = allocb(hdrlen + optlen + sizeof (void *), BPRI_MED); 451 if (mp1 == NULL) 452 return (-1); 453 mp1->b_cont = *mp; 454 *mp = mp1; 455 mp1->b_rptr += sizeof (void *); /* pointer worth of padding */ 456 mp1->b_wptr = mp1->b_rptr + hdrlen + optlen; 457 DB_TYPE(mp1) = M_PROTO; 458 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 459 tudi->PRIM_type = T_UNITDATA_IND; 460 tudi->SRC_length = sin4 ? sizeof (*sin4) : sizeof (*sin6); 461 tudi->SRC_offset = sizeof (*tudi); 462 tudi->OPT_offset = sizeof (*tudi) + tudi->SRC_length; 463 tudi->OPT_length = optlen; 464 if (sin4) { 465 bcopy(sin4, tudi + 1, sizeof (*sin4)); 466 } else { 467 bcopy(sin6, tudi + 1, sizeof (*sin6)); 468 } 469 optptr = (uchar_t *)tudi + tudi->OPT_offset; 470 471 if (sctp->sctp_recvsndrcvinfo) { 472 /* XXX need backout method if memory allocation fails. */ 473 struct sctp_sndrcvinfo *sri; 474 475 cmsg = (struct cmsghdr *)optptr; 476 cmsg->cmsg_level = IPPROTO_SCTP; 477 cmsg->cmsg_type = SCTP_SNDRCV; 478 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*sri); 479 optptr += sizeof (*cmsg); 480 481 sri = (struct sctp_sndrcvinfo *)(cmsg + 1); 482 ASSERT(OK_32PTR(sri)); 483 sri->sinfo_stream = ntohs(dcp->sdh_sid); 484 sri->sinfo_ssn = ntohs(dcp->sdh_ssn); 485 if (SCTP_DATA_GET_UBIT(dcp)) { 486 sri->sinfo_flags = MSG_UNORDERED; 487 } else { 488 sri->sinfo_flags = 0; 489 } 490 sri->sinfo_ppid = dcp->sdh_payload_id; 491 sri->sinfo_context = 0; 492 sri->sinfo_timetolive = 0; 493 sri->sinfo_tsn = ntohl(dcp->sdh_tsn); 494 sri->sinfo_cumtsn = sctp->sctp_ftsn; 495 sri->sinfo_assoc_id = 0; 496 497 optptr += sizeof (*sri); 498 } 499 500 /* 501 * If app asked for pktinfo and the index has changed ... 502 * Note that the local address never changes for the connection. 503 */ 504 if (addflag & SCTP_IPV6_RECVPKTINFO) { 505 struct in6_pktinfo *pkti; 506 507 cmsg = (struct cmsghdr *)optptr; 508 cmsg->cmsg_level = IPPROTO_IPV6; 509 cmsg->cmsg_type = IPV6_PKTINFO; 510 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*pkti); 511 optptr += sizeof (*cmsg); 512 513 pkti = (struct in6_pktinfo *)optptr; 514 if (sctp->sctp_ipversion == IPV6_VERSION) 515 pkti->ipi6_addr = sctp->sctp_ip6h->ip6_src; 516 else 517 IN6_IPADDR_TO_V4MAPPED(sctp->sctp_ipha->ipha_src, 518 &pkti->ipi6_addr); 519 pkti->ipi6_ifindex = ipp->ipp_ifindex; 520 optptr += sizeof (*pkti); 521 ASSERT(OK_32PTR(optptr)); 522 /* Save as "last" value */ 523 sctp->sctp_recvifindex = ipp->ipp_ifindex; 524 } 525 /* If app asked for hoplimit and it has changed ... */ 526 if (addflag & SCTP_IPV6_RECVHOPLIMIT) { 527 cmsg = (struct cmsghdr *)optptr; 528 cmsg->cmsg_level = IPPROTO_IPV6; 529 cmsg->cmsg_type = IPV6_HOPLIMIT; 530 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t); 531 optptr += sizeof (*cmsg); 532 533 *(uint_t *)optptr = ipp->ipp_hoplimit; 534 optptr += sizeof (uint_t); 535 ASSERT(OK_32PTR(optptr)); 536 /* Save as "last" value */ 537 sctp->sctp_recvhops = ipp->ipp_hoplimit; 538 } 539 if (addflag & SCTP_IPV6_RECVHOPOPTS) { 540 cmsg = (struct cmsghdr *)optptr; 541 cmsg->cmsg_level = IPPROTO_IPV6; 542 cmsg->cmsg_type = IPV6_HOPOPTS; 543 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_hopoptslen; 544 optptr += sizeof (*cmsg); 545 546 bcopy(ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 547 optptr += ipp->ipp_hopoptslen; 548 ASSERT(OK_32PTR(optptr)); 549 /* Save as last value */ 550 ip_savebuf((void **)&sctp->sctp_hopopts, 551 &sctp->sctp_hopoptslen, 552 (ipp->ipp_fields & IPPF_HOPOPTS), 553 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 554 } 555 if (addflag & SCTP_IPV6_RECVRTDSTOPTS) { 556 cmsg = (struct cmsghdr *)optptr; 557 cmsg->cmsg_level = IPPROTO_IPV6; 558 cmsg->cmsg_type = IPV6_RTHDRDSTOPTS; 559 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rtdstoptslen; 560 optptr += sizeof (*cmsg); 561 562 bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen); 563 optptr += ipp->ipp_rtdstoptslen; 564 ASSERT(OK_32PTR(optptr)); 565 /* Save as last value */ 566 ip_savebuf((void **)&sctp->sctp_rtdstopts, 567 &sctp->sctp_rtdstoptslen, 568 (ipp->ipp_fields & IPPF_RTDSTOPTS), 569 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen); 570 } 571 if (addflag & SCTP_IPV6_RECVRTHDR) { 572 cmsg = (struct cmsghdr *)optptr; 573 cmsg->cmsg_level = IPPROTO_IPV6; 574 cmsg->cmsg_type = IPV6_RTHDR; 575 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrlen; 576 optptr += sizeof (*cmsg); 577 578 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 579 optptr += ipp->ipp_rthdrlen; 580 ASSERT(OK_32PTR(optptr)); 581 /* Save as last value */ 582 ip_savebuf((void **)&sctp->sctp_rthdr, 583 &sctp->sctp_rthdrlen, 584 (ipp->ipp_fields & IPPF_RTHDR), 585 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 586 } 587 if (addflag & SCTP_IPV6_RECVDSTOPTS) { 588 cmsg = (struct cmsghdr *)optptr; 589 cmsg->cmsg_level = IPPROTO_IPV6; 590 cmsg->cmsg_type = IPV6_DSTOPTS; 591 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_dstoptslen; 592 optptr += sizeof (*cmsg); 593 594 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 595 optptr += ipp->ipp_dstoptslen; 596 ASSERT(OK_32PTR(optptr)); 597 /* Save as last value */ 598 ip_savebuf((void **)&sctp->sctp_dstopts, 599 &sctp->sctp_dstoptslen, 600 (ipp->ipp_fields & IPPF_DSTOPTS), 601 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 602 } 603 604 ASSERT(optptr == mp1->b_wptr); 605 606 return (0); 607 } 608 609 void 610 sctp_free_reass(sctp_instr_t *sip) 611 { 612 mblk_t *mp, *mpnext, *mctl; 613 614 for (mp = sip->istr_reass; mp != NULL; mp = mpnext) { 615 mpnext = mp->b_next; 616 mp->b_next = NULL; 617 mp->b_prev = NULL; 618 if (DB_TYPE(mp) == M_CTL) { 619 mctl = mp; 620 ASSERT(mp->b_cont != NULL); 621 mp = mp->b_cont; 622 mctl->b_cont = NULL; 623 freeb(mctl); 624 } 625 freemsg(mp); 626 } 627 } 628 629 /* 630 * If the series of data fragments of which dmp is a part is successfully 631 * reassembled, the first mblk in the series is returned. dc is adjusted 632 * to point at the data chunk in the lead mblk, and b_rptr also points to 633 * the data chunk; the following mblk's b_rptr's point at the actual payload. 634 * 635 * If the series is not yet reassembled, NULL is returned. dc is not changed. 636 * XXX should probably move this up into the state machine. 637 */ 638 639 /* Fragment list for un-ordered messages. Partial delivery is not supported */ 640 static mblk_t * 641 sctp_uodata_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc) 642 { 643 mblk_t *hmp; 644 mblk_t *begin = NULL; 645 mblk_t *end = NULL; 646 sctp_data_hdr_t *qdc; 647 uint32_t ntsn; 648 uint32_t tsn = ntohl((*dc)->sdh_tsn); 649 #ifdef DEBUG 650 mblk_t *mp1; 651 #endif 652 653 /* First frag. */ 654 if (sctp->sctp_uo_frags == NULL) { 655 sctp->sctp_uo_frags = dmp; 656 return (NULL); 657 } 658 hmp = sctp->sctp_uo_frags; 659 /* 660 * Insert the segment according to the TSN, fragmented unordered 661 * chunks are sequenced by TSN. 662 */ 663 while (hmp != NULL) { 664 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 665 ntsn = ntohl(qdc->sdh_tsn); 666 if (SEQ_GT(ntsn, tsn)) { 667 if (hmp->b_prev == NULL) { 668 dmp->b_next = hmp; 669 hmp->b_prev = dmp; 670 sctp->sctp_uo_frags = dmp; 671 } else { 672 dmp->b_next = hmp; 673 dmp->b_prev = hmp->b_prev; 674 hmp->b_prev->b_next = dmp; 675 hmp->b_prev = dmp; 676 } 677 break; 678 } 679 if (hmp->b_next == NULL) { 680 hmp->b_next = dmp; 681 dmp->b_prev = hmp; 682 break; 683 } 684 hmp = hmp->b_next; 685 } 686 /* check if we completed a msg */ 687 if (SCTP_DATA_GET_BBIT(*dc)) { 688 begin = dmp; 689 } else if (SCTP_DATA_GET_EBIT(*dc)) { 690 end = dmp; 691 } 692 /* 693 * We walk consecutive TSNs backwards till we get a seg. with 694 * the B bit 695 */ 696 if (begin == NULL) { 697 for (hmp = dmp->b_prev; hmp != NULL; hmp = hmp->b_prev) { 698 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 699 ntsn = ntohl(qdc->sdh_tsn); 700 if ((int32_t)(tsn - ntsn) > 1) { 701 return (NULL); 702 } 703 if (SCTP_DATA_GET_BBIT(qdc)) { 704 begin = hmp; 705 break; 706 } 707 tsn = ntsn; 708 } 709 } 710 tsn = ntohl((*dc)->sdh_tsn); 711 /* 712 * We walk consecutive TSNs till we get a seg. with the E bit 713 */ 714 if (end == NULL) { 715 for (hmp = dmp->b_next; hmp != NULL; hmp = hmp->b_next) { 716 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 717 ntsn = ntohl(qdc->sdh_tsn); 718 if ((int32_t)(ntsn - tsn) > 1) { 719 return (NULL); 720 } 721 if (SCTP_DATA_GET_EBIT(qdc)) { 722 end = hmp; 723 break; 724 } 725 tsn = ntsn; 726 } 727 } 728 if (begin == NULL || end == NULL) { 729 return (NULL); 730 } 731 /* Got one!, Remove the msg from the list */ 732 if (sctp->sctp_uo_frags == begin) { 733 ASSERT(begin->b_prev == NULL); 734 sctp->sctp_uo_frags = end->b_next; 735 if (end->b_next != NULL) 736 end->b_next->b_prev = NULL; 737 } else { 738 begin->b_prev->b_next = end->b_next; 739 if (end->b_next != NULL) 740 end->b_next->b_prev = begin->b_prev; 741 } 742 begin->b_prev = NULL; 743 end->b_next = NULL; 744 745 /* 746 * Null out b_next and b_prev and chain using b_cont. 747 */ 748 dmp = end = begin; 749 hmp = begin->b_next; 750 *dc = (sctp_data_hdr_t *)begin->b_rptr; 751 begin->b_next = NULL; 752 while (hmp != NULL) { 753 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 754 hmp->b_rptr = (uchar_t *)(qdc + 1); 755 end = hmp->b_next; 756 dmp->b_cont = hmp; 757 dmp = hmp; 758 759 if (end != NULL) 760 hmp->b_next = NULL; 761 hmp->b_prev = NULL; 762 hmp = end; 763 } 764 BUMP_LOCAL(sctp->sctp_reassmsgs); 765 #ifdef DEBUG 766 mp1 = begin; 767 while (mp1 != NULL) { 768 ASSERT(mp1->b_next == NULL); 769 ASSERT(mp1->b_prev == NULL); 770 mp1 = mp1->b_cont; 771 } 772 #endif 773 return (begin); 774 } 775 776 /* 777 * Try partial delivery. 778 */ 779 static mblk_t * 780 sctp_try_partial_delivery(sctp_t *sctp, mblk_t *hmp, sctp_reass_t *srp, 781 sctp_data_hdr_t **dc) 782 { 783 mblk_t *first_mp; 784 mblk_t *mp; 785 mblk_t *dmp; 786 mblk_t *qmp; 787 mblk_t *prev; 788 sctp_data_hdr_t *qdc; 789 uint32_t tsn; 790 791 ASSERT(DB_TYPE(hmp) == M_CTL); 792 793 dprint(4, ("trypartial: got=%d, needed=%d\n", 794 (int)(srp->got), (int)(srp->needed))); 795 796 first_mp = hmp->b_cont; 797 mp = first_mp; 798 qdc = (sctp_data_hdr_t *)mp->b_rptr; 799 800 ASSERT(SCTP_DATA_GET_BBIT(qdc) && srp->hasBchunk); 801 802 tsn = ntohl(qdc->sdh_tsn) + 1; 803 804 /* 805 * This loop has two exit conditions: the 806 * end of received chunks has been reached, or 807 * there is a break in the sequence. We want 808 * to chop the reassembly list as follows (the 809 * numbers are TSNs): 810 * 10 -> 11 -> (end of chunks) 811 * 10 -> 11 -> | 13 (break in sequence) 812 */ 813 prev = mp; 814 mp = mp->b_cont; 815 while (mp != NULL) { 816 qdc = (sctp_data_hdr_t *)mp->b_rptr; 817 if (ntohl(qdc->sdh_tsn) != tsn) 818 break; 819 prev = mp; 820 mp = mp->b_cont; 821 tsn++; 822 } 823 /* 824 * We are sending all the fragments upstream, we have to retain 825 * the srp info for further fragments. 826 */ 827 if (mp == NULL) { 828 dmp = hmp->b_cont; 829 hmp->b_cont = NULL; 830 srp->nexttsn = tsn; 831 srp->msglen = 0; 832 srp->needed = 0; 833 srp->got = 0; 834 srp->partial_delivered = B_TRUE; 835 srp->tail = NULL; 836 } else { 837 dmp = hmp->b_cont; 838 hmp->b_cont = mp; 839 } 840 srp->hasBchunk = B_FALSE; 841 /* 842 * mp now points at the last chunk in the sequence, 843 * and prev points to mp's previous in the list. 844 * We chop the list at prev, and convert mp into the 845 * new list head by setting the B bit. Subsequence 846 * fragment deliveries will follow the normal reassembly 847 * path. 848 */ 849 prev->b_cont = NULL; 850 srp->partial_delivered = B_TRUE; 851 852 dprint(4, ("trypartial: got some, got=%d, needed=%d\n", 853 (int)(srp->got), (int)(srp->needed))); 854 855 /* 856 * Adjust all mblk's except the lead so their rptr's point to the 857 * payload. sctp_data_chunk() will need to process the lead's 858 * data chunk section, so leave it's rptr pointing at the data chunk. 859 */ 860 *dc = (sctp_data_hdr_t *)dmp->b_rptr; 861 if (srp->tail != NULL) { 862 srp->got--; 863 ASSERT(srp->got != 0); 864 if (srp->needed != 0) { 865 srp->needed--; 866 ASSERT(srp->needed != 0); 867 } 868 srp->msglen -= ntohs((*dc)->sdh_len); 869 } 870 for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) { 871 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 872 qmp->b_rptr = (uchar_t *)(qdc + 1); 873 874 /* 875 * Deduct the balance from got and needed here, now that 876 * we know we are actually delivering these data. 877 */ 878 if (srp->tail != NULL) { 879 srp->got--; 880 ASSERT(srp->got != 0); 881 if (srp->needed != 0) { 882 srp->needed--; 883 ASSERT(srp->needed != 0); 884 } 885 srp->msglen -= ntohs(qdc->sdh_len); 886 } 887 } 888 ASSERT(srp->msglen == 0); 889 BUMP_LOCAL(sctp->sctp_reassmsgs); 890 891 return (dmp); 892 } 893 894 /* 895 * Fragment list for ordered messages. 896 * If no error occures, error is set to 0. If we run out of memory, error 897 * is set to 1. If the peer commits a fatal error (like using different 898 * sequence numbers for the same data fragment series), the association is 899 * aborted and error is set to 2. tpfinished indicates whether we have 900 * assembled a complete message, this is used in sctp_data_chunk() to 901 * see if we can try to send any queued message for this stream. 902 */ 903 static mblk_t * 904 sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error, 905 sctp_instr_t *sip, boolean_t *tpfinished) 906 { 907 mblk_t *hmp; 908 mblk_t *pmp; 909 mblk_t *qmp; 910 mblk_t *first_mp; 911 sctp_reass_t *srp; 912 sctp_data_hdr_t *qdc; 913 sctp_data_hdr_t *bdc; 914 sctp_data_hdr_t *edc; 915 uint32_t tsn; 916 uint16_t fraglen = 0; 917 918 *error = 0; 919 920 /* find the reassembly queue for this data chunk */ 921 hmp = qmp = sip->istr_reass; 922 for (; hmp != NULL; hmp = hmp->b_next) { 923 srp = (sctp_reass_t *)DB_BASE(hmp); 924 if (ntohs((*dc)->sdh_ssn) == srp->ssn) 925 goto foundit; 926 else if (SSN_GT(srp->ssn, ntohs((*dc)->sdh_ssn))) 927 break; 928 qmp = hmp; 929 } 930 931 /* 932 * Allocate a M_CTL that will contain information about this 933 * fragmented message. 934 */ 935 if ((pmp = allocb(sizeof (*srp), BPRI_MED)) == NULL) { 936 *error = 1; 937 return (NULL); 938 } 939 DB_TYPE(pmp) = M_CTL; 940 srp = (sctp_reass_t *)DB_BASE(pmp); 941 pmp->b_cont = dmp; 942 943 if (hmp != NULL) { 944 if (sip->istr_reass == hmp) { 945 sip->istr_reass = pmp; 946 pmp->b_next = hmp; 947 pmp->b_prev = NULL; 948 hmp->b_prev = pmp; 949 } else { 950 qmp->b_next = pmp; 951 pmp->b_prev = qmp; 952 pmp->b_next = hmp; 953 hmp->b_prev = pmp; 954 } 955 } else { 956 /* make a new reass head and stick it on the end */ 957 if (sip->istr_reass == NULL) { 958 sip->istr_reass = pmp; 959 pmp->b_prev = NULL; 960 } else { 961 qmp->b_next = pmp; 962 pmp->b_prev = qmp; 963 } 964 pmp->b_next = NULL; 965 } 966 srp->partial_delivered = B_FALSE; 967 srp->ssn = ntohs((*dc)->sdh_ssn); 968 empty_srp: 969 srp->needed = 0; 970 srp->got = 1; 971 srp->tail = dmp; 972 if (SCTP_DATA_GET_BBIT(*dc)) { 973 srp->msglen = ntohs((*dc)->sdh_len); 974 srp->nexttsn = ntohl((*dc)->sdh_tsn) + 1; 975 srp->hasBchunk = B_TRUE; 976 } else if (srp->partial_delivered && 977 srp->nexttsn == ntohl((*dc)->sdh_tsn)) { 978 SCTP_DATA_SET_BBIT(*dc); 979 /* Last fragment */ 980 if (SCTP_DATA_GET_EBIT(*dc)) { 981 srp->needed = 1; 982 goto frag_done; 983 } 984 srp->hasBchunk = B_TRUE; 985 srp->msglen = ntohs((*dc)->sdh_len); 986 srp->nexttsn++; 987 } 988 return (NULL); 989 foundit: 990 /* 991 * else already have a reassembly queue. Insert the new data chunk 992 * in the reassemble queue. Try the tail first, on the assumption 993 * that the fragments are coming in in order. 994 */ 995 qmp = srp->tail; 996 997 /* 998 * This means the message was partially delivered. 999 */ 1000 if (qmp == NULL) { 1001 ASSERT(srp->got == 0 && srp->needed == 0 && 1002 srp->partial_delivered); 1003 ASSERT(hmp->b_cont == NULL); 1004 hmp->b_cont = dmp; 1005 goto empty_srp; 1006 } 1007 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1008 ASSERT(qmp->b_cont == NULL); 1009 1010 /* XXXIs it fine to do this just here? */ 1011 if ((*dc)->sdh_sid != qdc->sdh_sid) { 1012 /* our peer is fatally confused; XXX abort the assc */ 1013 *error = 2; 1014 return (NULL); 1015 } 1016 if (SEQ_GT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1017 qmp->b_cont = dmp; 1018 srp->tail = dmp; 1019 dmp->b_cont = NULL; 1020 if (srp->hasBchunk && srp->nexttsn == ntohl((*dc)->sdh_tsn)) { 1021 srp->msglen += ntohs((*dc)->sdh_len); 1022 srp->nexttsn++; 1023 } 1024 goto inserted; 1025 } 1026 1027 /* Next check for insertion at the beginning */ 1028 qmp = hmp->b_cont; 1029 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1030 if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1031 dmp->b_cont = qmp; 1032 hmp->b_cont = dmp; 1033 if (SCTP_DATA_GET_BBIT(*dc)) { 1034 srp->hasBchunk = B_TRUE; 1035 srp->nexttsn = ntohl((*dc)->sdh_tsn); 1036 } 1037 goto preinserted; 1038 } 1039 1040 /* Insert somewhere in the middle */ 1041 for (;;) { 1042 /* Tail check above should have caught this */ 1043 ASSERT(qmp->b_cont != NULL); 1044 1045 qdc = (sctp_data_hdr_t *)qmp->b_cont->b_rptr; 1046 if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1047 /* insert here */ 1048 dmp->b_cont = qmp->b_cont; 1049 qmp->b_cont = dmp; 1050 break; 1051 } 1052 qmp = qmp->b_cont; 1053 } 1054 preinserted: 1055 if (!srp->hasBchunk || ntohl((*dc)->sdh_tsn) != srp->nexttsn) 1056 goto inserted; 1057 /* 1058 * fraglen contains the length of consecutive chunks of fragments. 1059 * starting from the chunk inserted recently. 1060 */ 1061 tsn = srp->nexttsn; 1062 for (qmp = dmp; qmp != NULL; qmp = qmp->b_cont) { 1063 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1064 if (tsn != ntohl(qdc->sdh_tsn)) 1065 break; 1066 fraglen += ntohs(qdc->sdh_len); 1067 tsn++; 1068 } 1069 srp->nexttsn = tsn; 1070 srp->msglen += fraglen; 1071 inserted: 1072 srp->got++; 1073 first_mp = hmp->b_cont; 1074 if (srp->needed == 0) { 1075 /* check if we have the first and last fragments */ 1076 bdc = (sctp_data_hdr_t *)first_mp->b_rptr; 1077 edc = (sctp_data_hdr_t *)srp->tail->b_rptr; 1078 1079 /* calculate how many fragments are needed, if possible */ 1080 if (SCTP_DATA_GET_BBIT(bdc) && SCTP_DATA_GET_EBIT(edc)) { 1081 srp->needed = ntohl(edc->sdh_tsn) - 1082 ntohl(bdc->sdh_tsn) + 1; 1083 } 1084 } 1085 1086 /* 1087 * Try partial delivery if the message length has exceeded the 1088 * partial delivery point. Only do this if we can immediately 1089 * deliver the partially assembled message, and only partially 1090 * deliver one message at a time (i.e. messages cannot be 1091 * intermixed arriving at the upper layer). A simple way to 1092 * enforce this is to only try partial delivery if this TSN is 1093 * the next expected TSN. Partial Delivery not supported 1094 * for un-ordered message. 1095 */ 1096 if (srp->needed != srp->got) { 1097 dmp = NULL; 1098 if (ntohl((*dc)->sdh_tsn) == sctp->sctp_ftsn && 1099 srp->msglen >= sctp->sctp_pd_point) { 1100 dmp = sctp_try_partial_delivery(sctp, hmp, srp, dc); 1101 *tpfinished = B_FALSE; 1102 } 1103 return (dmp); 1104 } 1105 frag_done: 1106 /* 1107 * else reassembly done; prepare the data for delivery. 1108 * First unlink hmp from the ssn list. 1109 */ 1110 if (sip->istr_reass == hmp) { 1111 sip->istr_reass = hmp->b_next; 1112 if (hmp->b_next) 1113 hmp->b_next->b_prev = NULL; 1114 } else { 1115 ASSERT(hmp->b_prev != NULL); 1116 hmp->b_prev->b_next = hmp->b_next; 1117 if (hmp->b_next) 1118 hmp->b_next->b_prev = hmp->b_prev; 1119 } 1120 1121 /* 1122 * Using b_prev and b_next was a little sinful, but OK since 1123 * this mblk is never put*'d. However, freeb() will still 1124 * ASSERT that they are unused, so we need to NULL them out now. 1125 */ 1126 hmp->b_next = NULL; 1127 hmp->b_prev = NULL; 1128 dmp = hmp; 1129 dmp = dmp->b_cont; 1130 hmp->b_cont = NULL; 1131 freeb(hmp); 1132 *tpfinished = B_TRUE; 1133 1134 /* 1135 * Adjust all mblk's except the lead so their rptr's point to the 1136 * payload. sctp_data_chunk() will need to process the lead's 1137 * data chunk section, so leave it's rptr pointing at the data chunk. 1138 */ 1139 *dc = (sctp_data_hdr_t *)dmp->b_rptr; 1140 for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) { 1141 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1142 qmp->b_rptr = (uchar_t *)(qdc + 1); 1143 } 1144 BUMP_LOCAL(sctp->sctp_reassmsgs); 1145 1146 return (dmp); 1147 } 1148 static void 1149 sctp_add_dup(uint32_t tsn, mblk_t **dups) 1150 { 1151 mblk_t *mp; 1152 size_t bsize = SCTP_DUP_MBLK_SZ * sizeof (tsn); 1153 1154 if (dups == NULL) { 1155 return; 1156 } 1157 1158 /* first time? */ 1159 if (*dups == NULL) { 1160 *dups = allocb(bsize, BPRI_MED); 1161 if (*dups == NULL) { 1162 return; 1163 } 1164 } 1165 1166 mp = *dups; 1167 if ((mp->b_wptr - mp->b_rptr) >= bsize) { 1168 /* maximum reached */ 1169 return; 1170 } 1171 1172 /* add the duplicate tsn */ 1173 bcopy(&tsn, mp->b_wptr, sizeof (tsn)); 1174 mp->b_wptr += sizeof (tsn); 1175 ASSERT((mp->b_wptr - mp->b_rptr) <= bsize); 1176 } 1177 1178 static void 1179 sctp_data_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *mp, mblk_t **dups, 1180 sctp_faddr_t *fp, ip6_pkt_t *ipp) 1181 { 1182 sctp_data_hdr_t *dc; 1183 mblk_t *dmp, *pmp; 1184 sctp_instr_t *instr; 1185 int ubit; 1186 int isfrag; 1187 uint16_t ssn; 1188 uint32_t oftsn; 1189 boolean_t can_deliver = B_TRUE; 1190 uint32_t tsn; 1191 int dlen; 1192 boolean_t tpfinished = B_TRUE; 1193 int32_t new_rwnd; 1194 sctp_stack_t *sctps = sctp->sctp_sctps; 1195 1196 /* The following are used multiple times, so we inline them */ 1197 #define SCTP_ACK_IT(sctp, tsn) \ 1198 if (tsn == sctp->sctp_ftsn) { \ 1199 dprint(2, ("data_chunk: acking next %x\n", tsn)); \ 1200 (sctp)->sctp_ftsn++; \ 1201 if ((sctp)->sctp_sack_gaps > 0) \ 1202 (sctp)->sctp_force_sack = 1; \ 1203 } else if (SEQ_GT(tsn, sctp->sctp_ftsn)) { \ 1204 /* Got a gap; record it */ \ 1205 dprint(2, ("data_chunk: acking gap %x\n", tsn)); \ 1206 sctp_ack_add(&sctp->sctp_sack_info, tsn, \ 1207 &sctp->sctp_sack_gaps); \ 1208 sctp->sctp_force_sack = 1; \ 1209 } 1210 1211 dmp = NULL; 1212 1213 dc = (sctp_data_hdr_t *)ch; 1214 tsn = ntohl(dc->sdh_tsn); 1215 1216 dprint(3, ("sctp_data_chunk: mp=%p tsn=%x\n", (void *)mp, tsn)); 1217 1218 /* Check for duplicates */ 1219 if (SEQ_LT(tsn, sctp->sctp_ftsn)) { 1220 dprint(4, ("sctp_data_chunk: dropping duplicate\n")); 1221 sctp->sctp_force_sack = 1; 1222 sctp_add_dup(dc->sdh_tsn, dups); 1223 return; 1224 } 1225 1226 if (sctp->sctp_sack_info != NULL) { 1227 sctp_set_t *sp; 1228 1229 for (sp = sctp->sctp_sack_info; sp; sp = sp->next) { 1230 if (SEQ_GEQ(tsn, sp->begin) && SEQ_LEQ(tsn, sp->end)) { 1231 dprint(4, 1232 ("sctp_data_chunk: dropping dup > " 1233 "cumtsn\n")); 1234 sctp->sctp_force_sack = 1; 1235 sctp_add_dup(dc->sdh_tsn, dups); 1236 return; 1237 } 1238 } 1239 } 1240 1241 /* We cannot deliver anything up now but we still need to handle it. */ 1242 if (SCTP_IS_DETACHED(sctp)) { 1243 BUMP_MIB(&sctps->sctps_mib, sctpInClosed); 1244 can_deliver = B_FALSE; 1245 } 1246 1247 dlen = ntohs(dc->sdh_len) - sizeof (*dc); 1248 1249 /* Check for buffer space */ 1250 if (sctp->sctp_rwnd - sctp->sctp_rxqueued < dlen) { 1251 /* Drop and SACK, but don't advance the cumulative TSN. */ 1252 sctp->sctp_force_sack = 1; 1253 dprint(0, ("sctp_data_chunk: exceed rwnd %d rxqueued %d " 1254 "dlen %d ssn %d tsn %x\n", sctp->sctp_rwnd, 1255 sctp->sctp_rxqueued, dlen, ntohs(dc->sdh_ssn), 1256 ntohl(dc->sdh_tsn))); 1257 return; 1258 } 1259 1260 if (ntohs(dc->sdh_sid) >= sctp->sctp_num_istr) { 1261 uint16_t inval_parm[2]; 1262 1263 inval_parm[0] = dc->sdh_sid; 1264 /* RESERVED to be ignored at the receiving end */ 1265 inval_parm[1] = 0; 1266 /* ack and drop it */ 1267 sctp_add_err(sctp, SCTP_ERR_BAD_SID, inval_parm, 1268 sizeof (inval_parm), fp); 1269 SCTP_ACK_IT(sctp, tsn); 1270 return; 1271 } 1272 1273 ubit = SCTP_DATA_GET_UBIT(dc); 1274 ASSERT(sctp->sctp_instr != NULL); 1275 instr = &sctp->sctp_instr[ntohs(dc->sdh_sid)]; 1276 /* Initialize the stream, if not yet used */ 1277 if (instr->sctp == NULL) 1278 instr->sctp = sctp; 1279 1280 isfrag = !(SCTP_DATA_GET_BBIT(dc) && SCTP_DATA_GET_EBIT(dc)); 1281 ssn = ntohs(dc->sdh_ssn); 1282 1283 dmp = dupb(mp); 1284 if (dmp == NULL) { 1285 /* drop it and don't ack it, causing the peer to retransmit */ 1286 return; 1287 } 1288 dmp->b_wptr = (uchar_t *)ch + ntohs(ch->sch_len); 1289 1290 sctp->sctp_rxqueued += dlen; 1291 1292 oftsn = sctp->sctp_ftsn; 1293 1294 if (isfrag) { 1295 int error = 0; 1296 1297 /* fragmented data chunk */ 1298 dmp->b_rptr = (uchar_t *)dc; 1299 if (ubit) { 1300 dmp = sctp_uodata_frag(sctp, dmp, &dc); 1301 #if DEBUG 1302 if (dmp != NULL) { 1303 ASSERT(instr == 1304 &sctp->sctp_instr[ntohs(dc->sdh_sid)]); 1305 } 1306 #endif 1307 } else { 1308 dmp = sctp_data_frag(sctp, dmp, &dc, &error, instr, 1309 &tpfinished); 1310 } 1311 if (error != 0) { 1312 sctp->sctp_rxqueued -= dlen; 1313 if (error == 1) { 1314 /* 1315 * out of memory; don't ack it so 1316 * the peer retransmits 1317 */ 1318 return; 1319 } else if (error == 2) { 1320 /* 1321 * fatal error (i.e. peer used different 1322 * ssn's for same fragmented data) -- 1323 * the association has been aborted. 1324 * XXX need to return errval so state 1325 * machine can also abort processing. 1326 */ 1327 dprint(0, ("error 2: must not happen!\n")); 1328 return; 1329 } 1330 } 1331 1332 if (dmp == NULL) { 1333 /* 1334 * Can't process this data now, but the cumulative 1335 * TSN may be advanced, so do the checks at done. 1336 */ 1337 SCTP_ACK_IT(sctp, tsn); 1338 goto done; 1339 } 1340 } 1341 1342 /* 1343 * Insert complete messages in correct order for ordered delivery. 1344 * tpfinished is true when the incoming chunk contains a complete 1345 * message or is the final missing fragment which completed a message. 1346 */ 1347 if (!ubit && tpfinished && ssn != instr->nextseq) { 1348 /* Adjust rptr to point at the data chunk for compares */ 1349 dmp->b_rptr = (uchar_t *)dc; 1350 1351 dprint(2, 1352 ("data_chunk: inserted %x in pq (ssn %d expected %d)\n", 1353 ntohl(dc->sdh_tsn), (int)(ssn), (int)(instr->nextseq))); 1354 1355 if (instr->istr_msgs == NULL) { 1356 instr->istr_msgs = dmp; 1357 ASSERT(dmp->b_prev == NULL && dmp->b_next == NULL); 1358 } else { 1359 mblk_t *imblk = instr->istr_msgs; 1360 sctp_data_hdr_t *idc; 1361 1362 /* 1363 * XXXNeed to take sequence wraps into account, 1364 * ... and a more efficient insertion algo. 1365 */ 1366 for (;;) { 1367 idc = (sctp_data_hdr_t *)imblk->b_rptr; 1368 if (SSN_GT(ntohs(idc->sdh_ssn), 1369 ntohs(dc->sdh_ssn))) { 1370 if (instr->istr_msgs == imblk) { 1371 instr->istr_msgs = dmp; 1372 dmp->b_next = imblk; 1373 imblk->b_prev = dmp; 1374 } else { 1375 ASSERT(imblk->b_prev != NULL); 1376 imblk->b_prev->b_next = dmp; 1377 dmp->b_prev = imblk->b_prev; 1378 imblk->b_prev = dmp; 1379 dmp->b_next = imblk; 1380 } 1381 break; 1382 } 1383 if (imblk->b_next == NULL) { 1384 imblk->b_next = dmp; 1385 dmp->b_prev = imblk; 1386 break; 1387 } 1388 imblk = imblk->b_next; 1389 } 1390 } 1391 (instr->istr_nmsgs)++; 1392 (sctp->sctp_istr_nmsgs)++; 1393 SCTP_ACK_IT(sctp, tsn); 1394 return; 1395 } 1396 1397 /* 1398 * Else we can deliver the data directly. Recalculate 1399 * dlen now since we may have reassembled data. 1400 */ 1401 dlen = dmp->b_wptr - (uchar_t *)dc - sizeof (*dc); 1402 for (pmp = dmp->b_cont; pmp != NULL; pmp = pmp->b_cont) 1403 dlen += pmp->b_wptr - pmp->b_rptr; 1404 ASSERT(sctp->sctp_rxqueued >= dlen); 1405 ASSERT(sctp->sctp_rwnd >= dlen); 1406 1407 /* Deliver the message. */ 1408 sctp->sctp_rxqueued -= dlen; 1409 1410 if (can_deliver) { 1411 dmp->b_rptr = (uchar_t *)(dc + 1); 1412 if (sctp_input_add_ancillary(sctp, &dmp, dc, fp, ipp) == 0) { 1413 dprint(1, ("sctp_data_chunk: delivering %lu bytes\n", 1414 msgdsize(dmp))); 1415 sctp->sctp_rwnd -= dlen; 1416 new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, dmp, 1417 tpfinished ? 0 : SCTP_PARTIAL_DATA); 1418 if (new_rwnd > sctp->sctp_rwnd) { 1419 sctp->sctp_rwnd = new_rwnd; 1420 } 1421 SCTP_ACK_IT(sctp, tsn); 1422 } else { 1423 /* Just free the message if we don't have memory. */ 1424 freemsg(dmp); 1425 return; 1426 } 1427 } else { 1428 /* About to free the data */ 1429 freemsg(dmp); 1430 SCTP_ACK_IT(sctp, tsn); 1431 } 1432 1433 /* 1434 * data, now enqueued, may already have been processed and free'd 1435 * by the ULP (or we may have just freed it above, if we could not 1436 * deliver it), so we must not reference it (this is why we kept 1437 * the ssn and ubit above). 1438 */ 1439 if (ubit != 0) { 1440 BUMP_LOCAL(sctp->sctp_iudchunks); 1441 goto done; 1442 } 1443 BUMP_LOCAL(sctp->sctp_idchunks); 1444 1445 /* 1446 * If there was a partial delivery and it has not finished, 1447 * don't pull anything from the pqueues. 1448 */ 1449 if (!tpfinished) { 1450 goto done; 1451 } 1452 1453 instr->nextseq = ssn + 1; 1454 /* Deliver any successive data chunks in the instr queue */ 1455 while (instr->istr_nmsgs > 0) { 1456 dmp = (mblk_t *)instr->istr_msgs; 1457 dc = (sctp_data_hdr_t *)dmp->b_rptr; 1458 ssn = ntohs(dc->sdh_ssn); 1459 /* Gap in the sequence */ 1460 if (ssn != instr->nextseq) 1461 break; 1462 1463 /* Else deliver the data */ 1464 (instr->istr_nmsgs)--; 1465 (instr->nextseq)++; 1466 (sctp->sctp_istr_nmsgs)--; 1467 1468 instr->istr_msgs = instr->istr_msgs->b_next; 1469 if (instr->istr_msgs != NULL) 1470 instr->istr_msgs->b_prev = NULL; 1471 dmp->b_next = dmp->b_prev = NULL; 1472 1473 dprint(2, ("data_chunk: pulling %x from pq (ssn %d)\n", 1474 ntohl(dc->sdh_tsn), (int)ssn)); 1475 1476 /* 1477 * If this chunk was reassembled, each b_cont represents 1478 * another TSN; advance ftsn now. 1479 */ 1480 dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc); 1481 for (pmp = dmp->b_cont; pmp; pmp = pmp->b_cont) 1482 dlen += pmp->b_wptr - pmp->b_rptr; 1483 1484 ASSERT(sctp->sctp_rxqueued >= dlen); 1485 ASSERT(sctp->sctp_rwnd >= dlen); 1486 1487 sctp->sctp_rxqueued -= dlen; 1488 if (can_deliver) { 1489 dmp->b_rptr = (uchar_t *)(dc + 1); 1490 if (sctp_input_add_ancillary(sctp, &dmp, dc, fp, 1491 ipp) == 0) { 1492 dprint(1, ("sctp_data_chunk: delivering %lu " 1493 "bytes\n", msgdsize(dmp))); 1494 sctp->sctp_rwnd -= dlen; 1495 new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, 1496 dmp, tpfinished ? 0 : SCTP_PARTIAL_DATA); 1497 if (new_rwnd > sctp->sctp_rwnd) { 1498 sctp->sctp_rwnd = new_rwnd; 1499 } 1500 SCTP_ACK_IT(sctp, tsn); 1501 } else { 1502 freemsg(dmp); 1503 return; 1504 } 1505 } else { 1506 /* About to free the data */ 1507 freemsg(dmp); 1508 SCTP_ACK_IT(sctp, tsn); 1509 } 1510 } 1511 1512 done: 1513 1514 /* 1515 * If there are gap reports pending, check if advancing 1516 * the ftsn here closes a gap. If so, we can advance 1517 * ftsn to the end of the set. 1518 */ 1519 if (sctp->sctp_sack_info != NULL && 1520 sctp->sctp_ftsn == sctp->sctp_sack_info->begin) { 1521 sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1; 1522 } 1523 /* 1524 * If ftsn has moved forward, maybe we can remove gap reports. 1525 * NB: dmp may now be NULL, so don't dereference it here. 1526 */ 1527 if (oftsn != sctp->sctp_ftsn && sctp->sctp_sack_info != NULL) { 1528 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 1529 &sctp->sctp_sack_gaps); 1530 dprint(2, ("data_chunk: removed acks before %x (num=%d)\n", 1531 sctp->sctp_ftsn - 1, sctp->sctp_sack_gaps)); 1532 } 1533 1534 #ifdef DEBUG 1535 if (sctp->sctp_sack_info != NULL) { 1536 ASSERT(sctp->sctp_ftsn != sctp->sctp_sack_info->begin); 1537 } 1538 #endif 1539 1540 #undef SCTP_ACK_IT 1541 } 1542 1543 void 1544 sctp_fill_sack(sctp_t *sctp, unsigned char *dst, int sacklen) 1545 { 1546 sctp_chunk_hdr_t *sch; 1547 sctp_sack_chunk_t *sc; 1548 sctp_sack_frag_t *sf; 1549 uint16_t num_gaps = sctp->sctp_sack_gaps; 1550 sctp_set_t *sp; 1551 1552 /* Chunk hdr */ 1553 sch = (sctp_chunk_hdr_t *)dst; 1554 sch->sch_id = CHUNK_SACK; 1555 sch->sch_flags = 0; 1556 sch->sch_len = htons(sacklen); 1557 1558 /* SACK chunk */ 1559 sctp->sctp_lastacked = sctp->sctp_ftsn - 1; 1560 1561 sc = (sctp_sack_chunk_t *)(sch + 1); 1562 sc->ssc_cumtsn = htonl(sctp->sctp_lastacked); 1563 if (sctp->sctp_rxqueued < sctp->sctp_rwnd) { 1564 sc->ssc_a_rwnd = htonl(sctp->sctp_rwnd - sctp->sctp_rxqueued); 1565 } else { 1566 sc->ssc_a_rwnd = 0; 1567 } 1568 sc->ssc_numfrags = htons(num_gaps); 1569 sc->ssc_numdups = 0; 1570 1571 /* lay in gap reports */ 1572 sf = (sctp_sack_frag_t *)(sc + 1); 1573 for (sp = sctp->sctp_sack_info; sp; sp = sp->next) { 1574 uint16_t offset; 1575 1576 /* start */ 1577 if (sp->begin > sctp->sctp_lastacked) { 1578 offset = (uint16_t)(sp->begin - sctp->sctp_lastacked); 1579 } else { 1580 /* sequence number wrap */ 1581 offset = (uint16_t)(UINT32_MAX - sctp->sctp_lastacked + 1582 sp->begin); 1583 } 1584 sf->ssf_start = htons(offset); 1585 1586 /* end */ 1587 if (sp->end >= sp->begin) { 1588 offset += (uint16_t)(sp->end - sp->begin); 1589 } else { 1590 /* sequence number wrap */ 1591 offset += (uint16_t)(UINT32_MAX - sp->begin + sp->end); 1592 } 1593 sf->ssf_end = htons(offset); 1594 1595 sf++; 1596 /* This is just for debugging (a la the following assertion) */ 1597 num_gaps--; 1598 } 1599 1600 ASSERT(num_gaps == 0); 1601 1602 /* If the SACK timer is running, stop it */ 1603 if (sctp->sctp_ack_timer_running) { 1604 sctp_timer_stop(sctp->sctp_ack_mp); 1605 sctp->sctp_ack_timer_running = B_FALSE; 1606 } 1607 1608 BUMP_LOCAL(sctp->sctp_obchunks); 1609 } 1610 1611 mblk_t * 1612 sctp_make_sack(sctp_t *sctp, sctp_faddr_t *sendto, mblk_t *dups) 1613 { 1614 mblk_t *smp; 1615 size_t slen; 1616 sctp_chunk_hdr_t *sch; 1617 sctp_sack_chunk_t *sc; 1618 int32_t acks_max; 1619 sctp_stack_t *sctps = sctp->sctp_sctps; 1620 uint32_t dups_len; 1621 sctp_faddr_t *fp; 1622 1623 if (sctp->sctp_force_sack) { 1624 sctp->sctp_force_sack = 0; 1625 goto checks_done; 1626 } 1627 1628 acks_max = sctps->sctps_deferred_acks_max; 1629 if (sctp->sctp_state == SCTPS_ESTABLISHED) { 1630 if (sctp->sctp_sack_toggle < acks_max) { 1631 /* no need to SACK right now */ 1632 dprint(2, ("sctp_make_sack: %p no sack (toggle)\n", 1633 (void *)sctp)); 1634 return (NULL); 1635 } else if (sctp->sctp_sack_toggle >= acks_max) { 1636 sctp->sctp_sack_toggle = 0; 1637 } 1638 } 1639 1640 if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) { 1641 dprint(2, ("sctp_make_sack: %p no sack (already)\n", 1642 (void *)sctp)); 1643 return (NULL); 1644 } 1645 1646 checks_done: 1647 dprint(2, ("sctp_make_sack: acking %x\n", sctp->sctp_ftsn - 1)); 1648 1649 if (dups != NULL) 1650 dups_len = MBLKL(dups); 1651 else 1652 dups_len = 0; 1653 slen = sizeof (*sch) + sizeof (*sc) + 1654 (sizeof (sctp_sack_frag_t) * sctp->sctp_sack_gaps); 1655 1656 /* 1657 * If there are error chunks, check and see if we can send the 1658 * SACK chunk and error chunks together in one packet. If not, 1659 * send the error chunks out now. 1660 */ 1661 if (sctp->sctp_err_chunks != NULL) { 1662 fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks); 1663 if (sctp->sctp_err_len + slen + dups_len > fp->sfa_pmss) { 1664 if ((smp = sctp_make_mp(sctp, fp, 0)) == NULL) { 1665 SCTP_KSTAT(sctps, sctp_send_err_failed); 1666 SCTP_KSTAT(sctps, sctp_send_sack_failed); 1667 freemsg(sctp->sctp_err_chunks); 1668 sctp->sctp_err_chunks = NULL; 1669 sctp->sctp_err_len = 0; 1670 return (NULL); 1671 } 1672 smp->b_cont = sctp->sctp_err_chunks; 1673 sctp_set_iplen(sctp, smp); 1674 sctp_add_sendq(sctp, smp); 1675 sctp->sctp_err_chunks = NULL; 1676 sctp->sctp_err_len = 0; 1677 } 1678 } 1679 smp = sctp_make_mp(sctp, sendto, slen); 1680 if (smp == NULL) { 1681 SCTP_KSTAT(sctps, sctp_send_sack_failed); 1682 return (NULL); 1683 } 1684 sch = (sctp_chunk_hdr_t *)smp->b_wptr; 1685 1686 sctp_fill_sack(sctp, smp->b_wptr, slen); 1687 smp->b_wptr += slen; 1688 if (dups != NULL) { 1689 sc = (sctp_sack_chunk_t *)(sch + 1); 1690 sc->ssc_numdups = htons(MBLKL(dups) / sizeof (uint32_t)); 1691 sch->sch_len = htons(slen + dups_len); 1692 smp->b_cont = dups; 1693 } 1694 1695 if (sctp->sctp_err_chunks != NULL) { 1696 linkb(smp, sctp->sctp_err_chunks); 1697 sctp->sctp_err_chunks = NULL; 1698 sctp->sctp_err_len = 0; 1699 } 1700 return (smp); 1701 } 1702 1703 /* 1704 * Check and see if we need to send a SACK chunk. If it is needed, 1705 * send it out. Return true if a SACK chunk is sent, false otherwise. 1706 */ 1707 boolean_t 1708 sctp_sack(sctp_t *sctp, mblk_t *dups) 1709 { 1710 mblk_t *smp; 1711 sctp_stack_t *sctps = sctp->sctp_sctps; 1712 1713 /* If we are shutting down, let send_shutdown() bundle the SACK */ 1714 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 1715 sctp_send_shutdown(sctp, 0); 1716 } 1717 1718 ASSERT(sctp->sctp_lastdata != NULL); 1719 1720 if ((smp = sctp_make_sack(sctp, sctp->sctp_lastdata, dups)) == NULL) { 1721 /* The caller of sctp_sack() will not free the dups mblk. */ 1722 if (dups != NULL) 1723 freeb(dups); 1724 return (B_FALSE); 1725 } 1726 sctp_set_iplen(sctp, smp); 1727 1728 dprint(2, ("sctp_sack: sending to %p %x:%x:%x:%x\n", 1729 (void *)sctp->sctp_lastdata, 1730 SCTP_PRINTADDR(sctp->sctp_lastdata->faddr))); 1731 1732 sctp->sctp_active = lbolt64; 1733 1734 BUMP_MIB(&sctps->sctps_mib, sctpOutAck); 1735 sctp_add_sendq(sctp, smp); 1736 return (B_TRUE); 1737 } 1738 1739 /* 1740 * This is called if we have a message that was partially sent and is 1741 * abandoned. The cum TSN will be the last chunk sent for this message, 1742 * subsequent chunks will be marked ABANDONED. We send a Forward TSN 1743 * chunk in this case with the TSN of the last sent chunk so that the 1744 * peer can clean up its fragment list for this message. This message 1745 * will be removed from the transmit list when the peer sends a SACK 1746 * back. 1747 */ 1748 int 1749 sctp_check_abandoned_msg(sctp_t *sctp, mblk_t *meta) 1750 { 1751 sctp_data_hdr_t *dh; 1752 mblk_t *nmp; 1753 mblk_t *head; 1754 int32_t unsent = 0; 1755 mblk_t *mp1 = meta->b_cont; 1756 uint32_t adv_pap = sctp->sctp_adv_pap; 1757 sctp_faddr_t *fp = sctp->sctp_current; 1758 sctp_stack_t *sctps = sctp->sctp_sctps; 1759 1760 dh = (sctp_data_hdr_t *)mp1->b_rptr; 1761 if (SEQ_GEQ(sctp->sctp_lastack_rxd, ntohl(dh->sdh_tsn))) { 1762 sctp_ftsn_set_t *sets = NULL; 1763 uint_t nsets = 0; 1764 uint32_t seglen = sizeof (uint32_t); 1765 boolean_t ubit = SCTP_DATA_GET_UBIT(dh); 1766 1767 while (mp1->b_next != NULL && SCTP_CHUNK_ISSENT(mp1->b_next)) 1768 mp1 = mp1->b_next; 1769 dh = (sctp_data_hdr_t *)mp1->b_rptr; 1770 sctp->sctp_adv_pap = ntohl(dh->sdh_tsn); 1771 if (!ubit && 1772 !sctp_add_ftsn_set(&sets, fp, meta, &nsets, &seglen)) { 1773 sctp->sctp_adv_pap = adv_pap; 1774 return (ENOMEM); 1775 } 1776 nmp = sctp_make_ftsn_chunk(sctp, fp, sets, nsets, seglen); 1777 sctp_free_ftsn_set(sets); 1778 if (nmp == NULL) { 1779 sctp->sctp_adv_pap = adv_pap; 1780 return (ENOMEM); 1781 } 1782 head = sctp_add_proto_hdr(sctp, fp, nmp, 0, NULL); 1783 if (head == NULL) { 1784 sctp->sctp_adv_pap = adv_pap; 1785 freemsg(nmp); 1786 SCTP_KSTAT(sctps, sctp_send_ftsn_failed); 1787 return (ENOMEM); 1788 } 1789 SCTP_MSG_SET_ABANDONED(meta); 1790 sctp_set_iplen(sctp, head); 1791 sctp_add_sendq(sctp, head); 1792 if (!fp->timer_running) 1793 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 1794 mp1 = mp1->b_next; 1795 while (mp1 != NULL) { 1796 ASSERT(!SCTP_CHUNK_ISSENT(mp1)); 1797 ASSERT(!SCTP_CHUNK_ABANDONED(mp1)); 1798 SCTP_ABANDON_CHUNK(mp1); 1799 dh = (sctp_data_hdr_t *)mp1->b_rptr; 1800 unsent += ntohs(dh->sdh_len) - sizeof (*dh); 1801 mp1 = mp1->b_next; 1802 } 1803 ASSERT(sctp->sctp_unsent >= unsent); 1804 sctp->sctp_unsent -= unsent; 1805 /* 1806 * Update ULP the amount of queued data, which is 1807 * sent-unack'ed + unsent. 1808 */ 1809 if (!SCTP_IS_DETACHED(sctp)) { 1810 sctp->sctp_ulp_xmitted(sctp->sctp_ulpd, 1811 sctp->sctp_unacked + sctp->sctp_unsent); 1812 } 1813 return (0); 1814 } 1815 return (-1); 1816 } 1817 1818 uint32_t 1819 sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked) 1820 { 1821 mblk_t *ump, *nump, *mp = NULL; 1822 uint16_t chunklen; 1823 uint32_t xtsn; 1824 sctp_faddr_t *fp; 1825 sctp_data_hdr_t *sdc; 1826 uint32_t cumack_forward = 0; 1827 sctp_msg_hdr_t *mhdr; 1828 sctp_stack_t *sctps = sctp->sctp_sctps; 1829 1830 ump = sctp->sctp_xmit_head; 1831 1832 /* 1833 * Free messages only when they're completely acked. 1834 */ 1835 while (ump != NULL) { 1836 mhdr = (sctp_msg_hdr_t *)ump->b_rptr; 1837 for (mp = ump->b_cont; mp != NULL; mp = mp->b_next) { 1838 if (SCTP_CHUNK_ABANDONED(mp)) { 1839 ASSERT(SCTP_IS_MSG_ABANDONED(ump)); 1840 mp = NULL; 1841 break; 1842 } 1843 /* 1844 * We check for abandoned message if we are PR-SCTP 1845 * aware, if this is not the first chunk in the 1846 * message (b_cont) and if the message is marked 1847 * abandoned. 1848 */ 1849 if (!SCTP_CHUNK_ISSENT(mp)) { 1850 if (sctp->sctp_prsctp_aware && 1851 mp != ump->b_cont && 1852 (SCTP_IS_MSG_ABANDONED(ump) || 1853 SCTP_MSG_TO_BE_ABANDONED(ump, mhdr, 1854 sctp))) { 1855 (void) sctp_check_abandoned_msg(sctp, 1856 ump); 1857 } 1858 goto cum_ack_done; 1859 } 1860 sdc = (sctp_data_hdr_t *)mp->b_rptr; 1861 xtsn = ntohl(sdc->sdh_tsn); 1862 if (SEQ_GEQ(sctp->sctp_lastack_rxd, xtsn)) 1863 continue; 1864 if (SEQ_GEQ(tsn, xtsn)) { 1865 fp = SCTP_CHUNK_DEST(mp); 1866 chunklen = ntohs(sdc->sdh_len); 1867 1868 if (sctp->sctp_out_time != 0 && 1869 xtsn == sctp->sctp_rtt_tsn) { 1870 /* Got a new RTT measurement */ 1871 sctp_update_rtt(sctp, fp, 1872 lbolt64 - sctp->sctp_out_time); 1873 sctp->sctp_out_time = 0; 1874 } 1875 if (SCTP_CHUNK_ISACKED(mp)) 1876 continue; 1877 SCTP_CHUNK_SET_SACKCNT(mp, 0); 1878 SCTP_CHUNK_ACKED(mp); 1879 ASSERT(fp->suna >= chunklen); 1880 fp->suna -= chunklen; 1881 fp->acked += chunklen; 1882 cumack_forward += chunklen; 1883 ASSERT(sctp->sctp_unacked >= 1884 (chunklen - sizeof (*sdc))); 1885 sctp->sctp_unacked -= 1886 (chunklen - sizeof (*sdc)); 1887 if (fp->suna == 0) { 1888 /* all outstanding data acked */ 1889 fp->pba = 0; 1890 SCTP_FADDR_TIMER_STOP(fp); 1891 } else { 1892 SCTP_FADDR_TIMER_RESTART(sctp, fp, 1893 fp->rto); 1894 } 1895 } else { 1896 goto cum_ack_done; 1897 } 1898 } 1899 nump = ump->b_next; 1900 if (nump != NULL) 1901 nump->b_prev = NULL; 1902 if (ump == sctp->sctp_xmit_tail) 1903 sctp->sctp_xmit_tail = nump; 1904 if (SCTP_IS_MSG_ABANDONED(ump)) { 1905 BUMP_LOCAL(sctp->sctp_prsctpdrop); 1906 ump->b_next = NULL; 1907 sctp_sendfail_event(sctp, ump, 0, B_TRUE); 1908 } else { 1909 sctp_free_msg(ump); 1910 } 1911 sctp->sctp_xmit_head = ump = nump; 1912 } 1913 cum_ack_done: 1914 *first_unacked = mp; 1915 if (cumack_forward > 0) { 1916 BUMP_MIB(&sctps->sctps_mib, sctpInAck); 1917 if (SEQ_GT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) { 1918 sctp->sctp_recovery_tsn = sctp->sctp_lastack_rxd; 1919 } 1920 1921 /* 1922 * Update ULP the amount of queued data, which is 1923 * sent-unack'ed + unsent. 1924 */ 1925 if (!SCTP_IS_DETACHED(sctp)) { 1926 sctp->sctp_ulp_xmitted(sctp->sctp_ulpd, 1927 sctp->sctp_unacked + sctp->sctp_unsent); 1928 } 1929 1930 /* Time to send a shutdown? */ 1931 if (sctp->sctp_state == SCTPS_SHUTDOWN_PENDING) { 1932 sctp_send_shutdown(sctp, 0); 1933 } 1934 sctp->sctp_xmit_unacked = mp; 1935 } else { 1936 /* dup ack */ 1937 BUMP_MIB(&sctps->sctps_mib, sctpInDupAck); 1938 } 1939 sctp->sctp_lastack_rxd = tsn; 1940 if (SEQ_LT(sctp->sctp_adv_pap, sctp->sctp_lastack_rxd)) 1941 sctp->sctp_adv_pap = sctp->sctp_lastack_rxd; 1942 ASSERT(sctp->sctp_xmit_head || sctp->sctp_unacked == 0); 1943 1944 return (cumack_forward); 1945 } 1946 1947 static int 1948 sctp_set_frwnd(sctp_t *sctp, uint32_t frwnd) 1949 { 1950 uint32_t orwnd; 1951 1952 if (sctp->sctp_unacked > frwnd) { 1953 sctp->sctp_frwnd = 0; 1954 return (0); 1955 } 1956 orwnd = sctp->sctp_frwnd; 1957 sctp->sctp_frwnd = frwnd - sctp->sctp_unacked; 1958 if (orwnd < sctp->sctp_frwnd) { 1959 return (1); 1960 } else { 1961 return (0); 1962 } 1963 } 1964 1965 /* 1966 * For un-ordered messages. 1967 * Walk the sctp->sctp_uo_frag list and remove any fragments with TSN 1968 * less than/equal to ftsn. Fragments for un-ordered messages are 1969 * strictly in sequence (w.r.t TSN). 1970 */ 1971 static int 1972 sctp_ftsn_check_uo_frag(sctp_t *sctp, uint32_t ftsn) 1973 { 1974 mblk_t *hmp; 1975 mblk_t *hmp_next; 1976 sctp_data_hdr_t *dc; 1977 int dlen = 0; 1978 1979 hmp = sctp->sctp_uo_frags; 1980 while (hmp != NULL) { 1981 hmp_next = hmp->b_next; 1982 dc = (sctp_data_hdr_t *)hmp->b_rptr; 1983 if (SEQ_GT(ntohl(dc->sdh_tsn), ftsn)) 1984 return (dlen); 1985 sctp->sctp_uo_frags = hmp_next; 1986 if (hmp_next != NULL) 1987 hmp_next->b_prev = NULL; 1988 hmp->b_next = NULL; 1989 dlen += ntohs(dc->sdh_len) - sizeof (*dc); 1990 freeb(hmp); 1991 hmp = hmp_next; 1992 } 1993 return (dlen); 1994 } 1995 1996 /* 1997 * For ordered messages. 1998 * Check for existing fragments for an sid-ssn pair reported as abandoned, 1999 * hence will not receive, in the Forward TSN. If there are fragments, then 2000 * we just nuke them. If and when Partial Delivery API is supported, we 2001 * would need to send a notification to the upper layer about this. 2002 */ 2003 static int 2004 sctp_ftsn_check_frag(sctp_t *sctp, uint16_t ssn, sctp_instr_t *sip) 2005 { 2006 sctp_reass_t *srp; 2007 mblk_t *hmp; 2008 mblk_t *dmp; 2009 mblk_t *hmp_next; 2010 sctp_data_hdr_t *dc; 2011 int dlen = 0; 2012 2013 hmp = sip->istr_reass; 2014 while (hmp != NULL) { 2015 hmp_next = hmp->b_next; 2016 srp = (sctp_reass_t *)DB_BASE(hmp); 2017 if (SSN_GT(srp->ssn, ssn)) 2018 return (dlen); 2019 /* 2020 * If we had sent part of this message up, send a partial 2021 * delivery event. Since this is ordered delivery, we should 2022 * have sent partial message only for the next in sequence, 2023 * hence the ASSERT. See comments in sctp_data_chunk() for 2024 * trypartial. 2025 */ 2026 if (srp->partial_delivered) { 2027 ASSERT(sip->nextseq == srp->ssn); 2028 sctp_partial_delivery_event(sctp); 2029 } 2030 /* Take it out of the reass queue */ 2031 sip->istr_reass = hmp_next; 2032 if (hmp_next != NULL) 2033 hmp_next->b_prev = NULL; 2034 hmp->b_next = NULL; 2035 ASSERT(hmp->b_prev == NULL); 2036 dmp = hmp; 2037 ASSERT(DB_TYPE(hmp) == M_CTL); 2038 dmp = hmp->b_cont; 2039 hmp->b_cont = NULL; 2040 freeb(hmp); 2041 hmp = dmp; 2042 while (dmp != NULL) { 2043 dc = (sctp_data_hdr_t *)dmp->b_rptr; 2044 dlen += ntohs(dc->sdh_len) - sizeof (*dc); 2045 dmp = dmp->b_cont; 2046 } 2047 freemsg(hmp); 2048 hmp = hmp_next; 2049 } 2050 return (dlen); 2051 } 2052 2053 /* 2054 * Update sctp_ftsn to the cumulative TSN from the Forward TSN chunk. Remove 2055 * any SACK gaps less than the newly updated sctp_ftsn. Walk through the 2056 * sid-ssn pair in the Forward TSN and for each, clean the fragment list 2057 * for this pair, if needed, and check if we can deliver subsequent 2058 * messages, if any, from the instream queue (that were waiting for this 2059 * sid-ssn message to show up). Once we are done try to update the SACK 2060 * info. We could get a duplicate Forward TSN, in which case just send 2061 * a SACK. If any of the sid values in the the Forward TSN is invalid, 2062 * send back an "Invalid Stream Identifier" error and continue processing 2063 * the rest. 2064 */ 2065 static void 2066 sctp_process_forward_tsn(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp, 2067 ip6_pkt_t *ipp) 2068 { 2069 uint32_t *ftsn = (uint32_t *)(ch + 1); 2070 ftsn_entry_t *ftsn_entry; 2071 sctp_instr_t *instr; 2072 boolean_t can_deliver = B_TRUE; 2073 size_t dlen; 2074 int flen; 2075 mblk_t *dmp; 2076 mblk_t *pmp; 2077 sctp_data_hdr_t *dc; 2078 ssize_t remaining; 2079 sctp_stack_t *sctps = sctp->sctp_sctps; 2080 2081 *ftsn = ntohl(*ftsn); 2082 remaining = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*ftsn); 2083 2084 if (SCTP_IS_DETACHED(sctp)) { 2085 BUMP_MIB(&sctps->sctps_mib, sctpInClosed); 2086 can_deliver = B_FALSE; 2087 } 2088 /* 2089 * un-ordered messages don't have SID-SSN pair entries, we check 2090 * for any fragments (for un-ordered message) to be discarded using 2091 * the cumulative FTSN. 2092 */ 2093 flen = sctp_ftsn_check_uo_frag(sctp, *ftsn); 2094 if (flen > 0) { 2095 ASSERT(sctp->sctp_rxqueued >= flen); 2096 sctp->sctp_rxqueued -= flen; 2097 } 2098 ftsn_entry = (ftsn_entry_t *)(ftsn + 1); 2099 while (remaining >= sizeof (*ftsn_entry)) { 2100 ftsn_entry->ftsn_sid = ntohs(ftsn_entry->ftsn_sid); 2101 ftsn_entry->ftsn_ssn = ntohs(ftsn_entry->ftsn_ssn); 2102 if (ftsn_entry->ftsn_sid >= sctp->sctp_num_istr) { 2103 uint16_t inval_parm[2]; 2104 2105 inval_parm[0] = htons(ftsn_entry->ftsn_sid); 2106 /* RESERVED to be ignored at the receiving end */ 2107 inval_parm[1] = 0; 2108 sctp_add_err(sctp, SCTP_ERR_BAD_SID, inval_parm, 2109 sizeof (inval_parm), fp); 2110 ftsn_entry++; 2111 remaining -= sizeof (*ftsn_entry); 2112 continue; 2113 } 2114 instr = &sctp->sctp_instr[ftsn_entry->ftsn_sid]; 2115 flen = sctp_ftsn_check_frag(sctp, ftsn_entry->ftsn_ssn, instr); 2116 /* Indicates frags were nuked, update rxqueued */ 2117 if (flen > 0) { 2118 ASSERT(sctp->sctp_rxqueued >= flen); 2119 sctp->sctp_rxqueued -= flen; 2120 } 2121 /* 2122 * It is possible to receive an FTSN chunk with SSN smaller 2123 * than then nextseq if this chunk is a retransmission because 2124 * of incomplete processing when it was first processed. 2125 */ 2126 if (SSN_GE(ftsn_entry->ftsn_ssn, instr->nextseq)) 2127 instr->nextseq = ftsn_entry->ftsn_ssn + 1; 2128 while (instr->istr_nmsgs > 0) { 2129 mblk_t *next; 2130 2131 dmp = (mblk_t *)instr->istr_msgs; 2132 dc = (sctp_data_hdr_t *)dmp->b_rptr; 2133 if (ntohs(dc->sdh_ssn) != instr->nextseq) 2134 break; 2135 2136 next = dmp->b_next; 2137 dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc); 2138 for (pmp = dmp->b_cont; pmp != NULL; 2139 pmp = pmp->b_cont) { 2140 dlen += pmp->b_wptr - pmp->b_rptr; 2141 } 2142 if (can_deliver) { 2143 int32_t nrwnd; 2144 2145 dmp->b_rptr = (uchar_t *)(dc + 1); 2146 dmp->b_next = NULL; 2147 ASSERT(dmp->b_prev == NULL); 2148 if (sctp_input_add_ancillary(sctp, 2149 &dmp, dc, fp, ipp) == 0) { 2150 sctp->sctp_rxqueued -= dlen; 2151 sctp->sctp_rwnd -= dlen; 2152 nrwnd = sctp->sctp_ulp_recv( 2153 sctp->sctp_ulpd, dmp, 0); 2154 if (nrwnd > sctp->sctp_rwnd) 2155 sctp->sctp_rwnd = nrwnd; 2156 } else { 2157 /* 2158 * We will resume processing when 2159 * the FTSN chunk is re-xmitted. 2160 */ 2161 dmp->b_rptr = (uchar_t *)dc; 2162 dmp->b_next = next; 2163 dprint(0, 2164 ("FTSN dequeuing %u failed\n", 2165 ntohs(dc->sdh_ssn))); 2166 return; 2167 } 2168 } else { 2169 sctp->sctp_rxqueued -= dlen; 2170 ASSERT(dmp->b_prev == NULL); 2171 dmp->b_next = NULL; 2172 freemsg(dmp); 2173 } 2174 instr->istr_nmsgs--; 2175 instr->nextseq++; 2176 sctp->sctp_istr_nmsgs--; 2177 if (next != NULL) 2178 next->b_prev = NULL; 2179 instr->istr_msgs = next; 2180 } 2181 ftsn_entry++; 2182 remaining -= sizeof (*ftsn_entry); 2183 } 2184 /* Duplicate FTSN */ 2185 if (*ftsn <= (sctp->sctp_ftsn - 1)) { 2186 sctp->sctp_force_sack = 1; 2187 return; 2188 } 2189 /* Advance cum TSN to that reported in the Forward TSN chunk */ 2190 sctp->sctp_ftsn = *ftsn + 1; 2191 2192 /* Remove all the SACK gaps before the new cum TSN */ 2193 if (sctp->sctp_sack_info != NULL) { 2194 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 2195 &sctp->sctp_sack_gaps); 2196 } 2197 /* 2198 * If there are gap reports pending, check if advancing 2199 * the ftsn here closes a gap. If so, we can advance 2200 * ftsn to the end of the set. 2201 * If ftsn has moved forward, maybe we can remove gap reports. 2202 */ 2203 if (sctp->sctp_sack_info != NULL && 2204 sctp->sctp_ftsn == sctp->sctp_sack_info->begin) { 2205 sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1; 2206 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 2207 &sctp->sctp_sack_gaps); 2208 } 2209 } 2210 2211 /* 2212 * When we have processed a SACK we check to see if we can advance the 2213 * cumulative TSN if there are abandoned chunks immediately following 2214 * the updated cumulative TSN. If there are, we attempt to send a 2215 * Forward TSN chunk. 2216 */ 2217 static void 2218 sctp_check_abandoned_data(sctp_t *sctp, sctp_faddr_t *fp) 2219 { 2220 mblk_t *meta = sctp->sctp_xmit_head; 2221 mblk_t *mp; 2222 mblk_t *nmp; 2223 uint32_t seglen; 2224 uint32_t adv_pap = sctp->sctp_adv_pap; 2225 2226 /* 2227 * We only check in the first meta since otherwise we can't 2228 * advance the cumulative ack point. We just look for chunks 2229 * marked for retransmission, else we might prematurely 2230 * send an FTSN for a sent, but unacked, chunk. 2231 */ 2232 for (mp = meta->b_cont; mp != NULL; mp = mp->b_next) { 2233 if (!SCTP_CHUNK_ISSENT(mp)) 2234 return; 2235 if (SCTP_CHUNK_WANT_REXMIT(mp)) 2236 break; 2237 } 2238 if (mp == NULL) 2239 return; 2240 sctp_check_adv_ack_pt(sctp, meta, mp); 2241 if (SEQ_GT(sctp->sctp_adv_pap, adv_pap)) { 2242 sctp_make_ftsns(sctp, meta, mp, &nmp, fp, &seglen); 2243 if (nmp == NULL) { 2244 sctp->sctp_adv_pap = adv_pap; 2245 if (!fp->timer_running) 2246 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 2247 return; 2248 } 2249 sctp_set_iplen(sctp, nmp); 2250 sctp_add_sendq(sctp, nmp); 2251 if (!fp->timer_running) 2252 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 2253 } 2254 } 2255 2256 /* 2257 * The processing here follows the same logic in sctp_got_sack(), the reason 2258 * we do this separately is because, usually, gap blocks are ordered and 2259 * we can process it in sctp_got_sack(). However if they aren't we would 2260 * need to do some additional non-optimal stuff when we start processing the 2261 * unordered gaps. To that effect sctp_got_sack() does the processing in the 2262 * simple case and this does the same in the more involved case. 2263 */ 2264 static uint32_t 2265 sctp_process_uo_gaps(sctp_t *sctp, uint32_t ctsn, sctp_sack_frag_t *ssf, 2266 int num_gaps, mblk_t *umphead, mblk_t *mphead, int *trysend, 2267 boolean_t *fast_recovery, uint32_t fr_xtsn) 2268 { 2269 uint32_t xtsn; 2270 uint32_t gapstart = 0; 2271 uint32_t gapend = 0; 2272 int gapcnt; 2273 uint16_t chunklen; 2274 sctp_data_hdr_t *sdc; 2275 int gstart; 2276 mblk_t *ump = umphead; 2277 mblk_t *mp = mphead; 2278 sctp_faddr_t *fp; 2279 uint32_t acked = 0; 2280 sctp_stack_t *sctps = sctp->sctp_sctps; 2281 2282 /* 2283 * gstart tracks the last (in the order of TSN) gapstart that 2284 * we process in this SACK gaps walk. 2285 */ 2286 gstart = ctsn; 2287 2288 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2289 xtsn = ntohl(sdc->sdh_tsn); 2290 for (gapcnt = 0; gapcnt < num_gaps; gapcnt++, ssf++) { 2291 if (gapstart != 0) { 2292 /* 2293 * If we have reached the end of the transmit list or 2294 * hit an unsent chunk or encountered an unordered gap 2295 * block start from the ctsn again. 2296 */ 2297 if (ump == NULL || !SCTP_CHUNK_ISSENT(mp) || 2298 SEQ_LT(ctsn + ntohs(ssf->ssf_start), xtsn)) { 2299 ump = umphead; 2300 mp = mphead; 2301 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2302 xtsn = ntohl(sdc->sdh_tsn); 2303 } 2304 } 2305 2306 gapstart = ctsn + ntohs(ssf->ssf_start); 2307 gapend = ctsn + ntohs(ssf->ssf_end); 2308 2309 /* SACK for TSN we have not sent - ABORT */ 2310 if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) || 2311 SEQ_GT(gapend, sctp->sctp_ltsn - 1)) { 2312 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2313 *trysend = -1; 2314 return (acked); 2315 } else if (SEQ_LT(gapend, gapstart)) { 2316 break; 2317 } 2318 /* 2319 * The xtsn can be the TSN processed for the last gap 2320 * (gapend) or it could be the cumulative TSN. We continue 2321 * with the last xtsn as long as the gaps are ordered, when 2322 * we hit an unordered gap, we re-start from the cumulative 2323 * TSN. For the first gap it is always the cumulative TSN. 2324 */ 2325 while (xtsn != gapstart) { 2326 /* 2327 * We can't reliably check for reneged chunks 2328 * when walking the unordered list, so we don't. 2329 * In case the peer reneges then we will end up 2330 * sending the reneged chunk via timeout. 2331 */ 2332 mp = mp->b_next; 2333 if (mp == NULL) { 2334 ump = ump->b_next; 2335 /* 2336 * ump can't be NULL because of the sanity 2337 * check above. 2338 */ 2339 ASSERT(ump != NULL); 2340 mp = ump->b_cont; 2341 } 2342 /* 2343 * mp can't be unsent because of the sanity check 2344 * above. 2345 */ 2346 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2347 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2348 xtsn = ntohl(sdc->sdh_tsn); 2349 } 2350 /* 2351 * Now that we have found the chunk with TSN == 'gapstart', 2352 * let's walk till we hit the chunk with TSN == 'gapend'. 2353 * All intermediate chunks will be marked ACKED, if they 2354 * haven't already been. 2355 */ 2356 while (SEQ_LEQ(xtsn, gapend)) { 2357 /* 2358 * SACKed 2359 */ 2360 SCTP_CHUNK_SET_SACKCNT(mp, 0); 2361 if (!SCTP_CHUNK_ISACKED(mp)) { 2362 SCTP_CHUNK_ACKED(mp); 2363 2364 fp = SCTP_CHUNK_DEST(mp); 2365 chunklen = ntohs(sdc->sdh_len); 2366 ASSERT(fp->suna >= chunklen); 2367 fp->suna -= chunklen; 2368 if (fp->suna == 0) { 2369 /* All outstanding data acked. */ 2370 fp->pba = 0; 2371 SCTP_FADDR_TIMER_STOP(fp); 2372 } 2373 fp->acked += chunklen; 2374 acked += chunklen; 2375 sctp->sctp_unacked -= chunklen - sizeof (*sdc); 2376 ASSERT(sctp->sctp_unacked >= 0); 2377 } 2378 /* 2379 * Move to the next message in the transmit list 2380 * if we are done with all the chunks from the current 2381 * message. Note, it is possible to hit the end of the 2382 * transmit list here, i.e. if we have already completed 2383 * processing the gap block. 2384 */ 2385 mp = mp->b_next; 2386 if (mp == NULL) { 2387 ump = ump->b_next; 2388 if (ump == NULL) { 2389 ASSERT(xtsn == gapend); 2390 break; 2391 } 2392 mp = ump->b_cont; 2393 } 2394 /* 2395 * Likewise, we can hit an unsent chunk once we have 2396 * completed processing the gap block. 2397 */ 2398 if (!SCTP_CHUNK_ISSENT(mp)) { 2399 ASSERT(xtsn == gapend); 2400 break; 2401 } 2402 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2403 xtsn = ntohl(sdc->sdh_tsn); 2404 } 2405 /* 2406 * We keep track of the last gap we successfully processed 2407 * so that we can terminate the walk below for incrementing 2408 * the SACK count. 2409 */ 2410 if (SEQ_LT(gstart, gapstart)) 2411 gstart = gapstart; 2412 } 2413 /* 2414 * Check if have incremented the SACK count for all unacked TSNs in 2415 * sctp_got_sack(), if so we are done. 2416 */ 2417 if (SEQ_LEQ(gstart, fr_xtsn)) 2418 return (acked); 2419 2420 ump = umphead; 2421 mp = mphead; 2422 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2423 xtsn = ntohl(sdc->sdh_tsn); 2424 while (SEQ_LT(xtsn, gstart)) { 2425 /* 2426 * We have incremented SACK count for TSNs less than fr_tsn 2427 * in sctp_got_sack(), so don't increment them again here. 2428 */ 2429 if (SEQ_GT(xtsn, fr_xtsn) && !SCTP_CHUNK_ISACKED(mp)) { 2430 SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1); 2431 if (SCTP_CHUNK_SACKCNT(mp) == 2432 sctps->sctps_fast_rxt_thresh) { 2433 SCTP_CHUNK_REXMIT(mp); 2434 sctp->sctp_chk_fast_rexmit = B_TRUE; 2435 *trysend = 1; 2436 if (!*fast_recovery) { 2437 /* 2438 * Entering fast recovery. 2439 */ 2440 fp = SCTP_CHUNK_DEST(mp); 2441 fp->ssthresh = fp->cwnd / 2; 2442 if (fp->ssthresh < 2 * fp->sfa_pmss) { 2443 fp->ssthresh = 2444 2 * fp->sfa_pmss; 2445 } 2446 fp->cwnd = fp->ssthresh; 2447 fp->pba = 0; 2448 sctp->sctp_recovery_tsn = 2449 sctp->sctp_ltsn - 1; 2450 *fast_recovery = B_TRUE; 2451 } 2452 } 2453 } 2454 mp = mp->b_next; 2455 if (mp == NULL) { 2456 ump = ump->b_next; 2457 /* We can't get to the end of the transmit list here */ 2458 ASSERT(ump != NULL); 2459 mp = ump->b_cont; 2460 } 2461 /* We can't hit an unsent chunk here */ 2462 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2463 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2464 xtsn = ntohl(sdc->sdh_tsn); 2465 } 2466 return (acked); 2467 } 2468 2469 static int 2470 sctp_got_sack(sctp_t *sctp, sctp_chunk_hdr_t *sch) 2471 { 2472 sctp_sack_chunk_t *sc; 2473 sctp_data_hdr_t *sdc; 2474 sctp_sack_frag_t *ssf; 2475 mblk_t *ump; 2476 mblk_t *mp; 2477 mblk_t *mp1; 2478 uint32_t cumtsn; 2479 uint32_t xtsn; 2480 uint32_t gapstart = 0; 2481 uint32_t gapend = 0; 2482 uint32_t acked = 0; 2483 uint16_t chunklen; 2484 sctp_faddr_t *fp; 2485 int num_gaps; 2486 int trysend = 0; 2487 int i; 2488 boolean_t fast_recovery = B_FALSE; 2489 boolean_t cumack_forward = B_FALSE; 2490 boolean_t fwd_tsn = B_FALSE; 2491 sctp_stack_t *sctps = sctp->sctp_sctps; 2492 2493 BUMP_LOCAL(sctp->sctp_ibchunks); 2494 chunklen = ntohs(sch->sch_len); 2495 if (chunklen < (sizeof (*sch) + sizeof (*sc))) 2496 return (0); 2497 2498 sc = (sctp_sack_chunk_t *)(sch + 1); 2499 cumtsn = ntohl(sc->ssc_cumtsn); 2500 2501 dprint(2, ("got sack cumtsn %x -> %x\n", sctp->sctp_lastack_rxd, 2502 cumtsn)); 2503 2504 /* out of order */ 2505 if (SEQ_LT(cumtsn, sctp->sctp_lastack_rxd)) 2506 return (0); 2507 2508 if (SEQ_GT(cumtsn, sctp->sctp_ltsn - 1)) { 2509 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2510 /* Send an ABORT */ 2511 return (-1); 2512 } 2513 2514 /* 2515 * Cwnd only done when not in fast recovery mode. 2516 */ 2517 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) 2518 fast_recovery = B_TRUE; 2519 2520 /* 2521 * .. and if the cum TSN is not moving ahead on account Forward TSN 2522 */ 2523 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_adv_pap)) 2524 fwd_tsn = B_TRUE; 2525 2526 if (cumtsn == sctp->sctp_lastack_rxd && 2527 (sctp->sctp_xmit_unacked == NULL || 2528 !SCTP_CHUNK_ABANDONED(sctp->sctp_xmit_unacked))) { 2529 if (sctp->sctp_xmit_unacked != NULL) 2530 mp = sctp->sctp_xmit_unacked; 2531 else if (sctp->sctp_xmit_head != NULL) 2532 mp = sctp->sctp_xmit_head->b_cont; 2533 else 2534 mp = NULL; 2535 BUMP_MIB(&sctps->sctps_mib, sctpInDupAck); 2536 /* 2537 * If we were doing a zero win probe and the win 2538 * has now opened to at least MSS, re-transmit the 2539 * zero win probe via sctp_rexmit_packet(). 2540 */ 2541 if (mp != NULL && sctp->sctp_zero_win_probe && 2542 ntohl(sc->ssc_a_rwnd) >= sctp->sctp_current->sfa_pmss) { 2543 mblk_t *pkt; 2544 uint_t pkt_len; 2545 mblk_t *mp1 = mp; 2546 mblk_t *meta = sctp->sctp_xmit_head; 2547 2548 /* 2549 * Reset the RTO since we have been backing-off 2550 * to send the ZWP. 2551 */ 2552 fp = sctp->sctp_current; 2553 fp->rto = fp->srtt + 4 * fp->rttvar; 2554 /* Resend the ZWP */ 2555 pkt = sctp_rexmit_packet(sctp, &meta, &mp1, fp, 2556 &pkt_len); 2557 if (pkt == NULL) { 2558 SCTP_KSTAT(sctps, sctp_ss_rexmit_failed); 2559 return (0); 2560 } 2561 ASSERT(pkt_len <= fp->sfa_pmss); 2562 sctp->sctp_zero_win_probe = B_FALSE; 2563 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 2564 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 2565 sctp_set_iplen(sctp, pkt); 2566 sctp_add_sendq(sctp, pkt); 2567 } 2568 } else { 2569 if (sctp->sctp_zero_win_probe) { 2570 /* 2571 * Reset the RTO since we have been backing-off 2572 * to send the ZWP. 2573 */ 2574 fp = sctp->sctp_current; 2575 fp->rto = fp->srtt + 4 * fp->rttvar; 2576 sctp->sctp_zero_win_probe = B_FALSE; 2577 /* This is probably not required */ 2578 if (!sctp->sctp_rexmitting) { 2579 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 2580 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 2581 } 2582 } 2583 acked = sctp_cumack(sctp, cumtsn, &mp); 2584 sctp->sctp_xmit_unacked = mp; 2585 if (acked > 0) { 2586 trysend = 1; 2587 cumack_forward = B_TRUE; 2588 if (fwd_tsn && SEQ_GEQ(sctp->sctp_lastack_rxd, 2589 sctp->sctp_adv_pap)) { 2590 cumack_forward = B_FALSE; 2591 } 2592 } 2593 } 2594 num_gaps = ntohs(sc->ssc_numfrags); 2595 if (num_gaps == 0 || mp == NULL || !SCTP_CHUNK_ISSENT(mp) || 2596 chunklen < (sizeof (*sch) + sizeof (*sc) + 2597 num_gaps * sizeof (*ssf))) { 2598 goto ret; 2599 } 2600 #ifdef DEBUG 2601 /* 2602 * Since we delete any message that has been acked completely, 2603 * the unacked chunk must belong to sctp_xmit_head (as 2604 * we don't have a back pointer from the mp to the meta data 2605 * we do this). 2606 */ 2607 { 2608 mblk_t *mp2 = sctp->sctp_xmit_head->b_cont; 2609 2610 while (mp2 != NULL) { 2611 if (mp2 == mp) 2612 break; 2613 mp2 = mp2->b_next; 2614 } 2615 ASSERT(mp2 != NULL); 2616 } 2617 #endif 2618 ump = sctp->sctp_xmit_head; 2619 2620 /* 2621 * Just remember where we started from, in case we need to call 2622 * sctp_process_uo_gaps() if the gap blocks are unordered. 2623 */ 2624 mp1 = mp; 2625 2626 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2627 xtsn = ntohl(sdc->sdh_tsn); 2628 ASSERT(xtsn == cumtsn + 1); 2629 2630 /* 2631 * Go through SACK gaps. They are ordered based on start TSN. 2632 */ 2633 ssf = (sctp_sack_frag_t *)(sc + 1); 2634 for (i = 0; i < num_gaps; i++, ssf++) { 2635 if (gapstart != 0) { 2636 /* check for unordered gap */ 2637 if (SEQ_LEQ(cumtsn + ntohs(ssf->ssf_start), gapstart)) { 2638 acked += sctp_process_uo_gaps(sctp, 2639 cumtsn, ssf, num_gaps - i, 2640 sctp->sctp_xmit_head, mp1, 2641 &trysend, &fast_recovery, gapstart); 2642 if (trysend < 0) { 2643 BUMP_MIB(&sctps->sctps_mib, 2644 sctpInAckUnsent); 2645 return (-1); 2646 } 2647 break; 2648 } 2649 } 2650 gapstart = cumtsn + ntohs(ssf->ssf_start); 2651 gapend = cumtsn + ntohs(ssf->ssf_end); 2652 2653 /* SACK for TSN we have not sent - ABORT */ 2654 if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) || 2655 SEQ_GT(gapend, sctp->sctp_ltsn - 1)) { 2656 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2657 return (-1); 2658 } else if (SEQ_LT(gapend, gapstart)) { 2659 break; 2660 } 2661 /* 2662 * Let's start at the current TSN (for the 1st gap we start 2663 * from the cumulative TSN, for subsequent ones we start from 2664 * where the previous gapend was found - second while loop 2665 * below) and walk the transmit list till we find the TSN 2666 * corresponding to gapstart. All the unacked chunks till we 2667 * get to the chunk with TSN == gapstart will have their 2668 * SACKCNT incremented by 1. Note since the gap blocks are 2669 * ordered, we won't be incrementing the SACKCNT for an 2670 * unacked chunk by more than one while processing the gap 2671 * blocks. If the SACKCNT for any unacked chunk exceeds 2672 * the fast retransmit threshold, we will fast retransmit 2673 * after processing all the gap blocks. 2674 */ 2675 ASSERT(SEQ_LT(xtsn, gapstart)); 2676 while (xtsn != gapstart) { 2677 SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1); 2678 if (SCTP_CHUNK_SACKCNT(mp) == 2679 sctps->sctps_fast_rxt_thresh) { 2680 SCTP_CHUNK_REXMIT(mp); 2681 sctp->sctp_chk_fast_rexmit = B_TRUE; 2682 trysend = 1; 2683 if (!fast_recovery) { 2684 /* 2685 * Entering fast recovery. 2686 */ 2687 fp = SCTP_CHUNK_DEST(mp); 2688 fp->ssthresh = fp->cwnd / 2; 2689 if (fp->ssthresh < 2 * fp->sfa_pmss) { 2690 fp->ssthresh = 2691 2 * fp->sfa_pmss; 2692 } 2693 fp->cwnd = fp->ssthresh; 2694 fp->pba = 0; 2695 sctp->sctp_recovery_tsn = 2696 sctp->sctp_ltsn - 1; 2697 fast_recovery = B_TRUE; 2698 } 2699 } 2700 2701 /* 2702 * Peer may have reneged on this chunk, so un-sack 2703 * it now. If the peer did renege, we need to 2704 * readjust unacked. 2705 */ 2706 if (SCTP_CHUNK_ISACKED(mp)) { 2707 chunklen = ntohs(sdc->sdh_len); 2708 fp = SCTP_CHUNK_DEST(mp); 2709 fp->suna += chunklen; 2710 sctp->sctp_unacked += chunklen - sizeof (*sdc); 2711 SCTP_CHUNK_CLEAR_ACKED(mp); 2712 if (!fp->timer_running) { 2713 SCTP_FADDR_TIMER_RESTART(sctp, fp, 2714 fp->rto); 2715 } 2716 } 2717 2718 mp = mp->b_next; 2719 if (mp == NULL) { 2720 ump = ump->b_next; 2721 /* 2722 * ump can't be NULL given the sanity check 2723 * above. 2724 */ 2725 ASSERT(ump != NULL); 2726 mp = ump->b_cont; 2727 } 2728 /* 2729 * mp can't be unsent given the sanity check above. 2730 */ 2731 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2732 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2733 xtsn = ntohl(sdc->sdh_tsn); 2734 } 2735 /* 2736 * Now that we have found the chunk with TSN == 'gapstart', 2737 * let's walk till we hit the chunk with TSN == 'gapend'. 2738 * All intermediate chunks will be marked ACKED, if they 2739 * haven't already been. 2740 */ 2741 while (SEQ_LEQ(xtsn, gapend)) { 2742 /* 2743 * SACKed 2744 */ 2745 SCTP_CHUNK_SET_SACKCNT(mp, 0); 2746 if (!SCTP_CHUNK_ISACKED(mp)) { 2747 SCTP_CHUNK_ACKED(mp); 2748 2749 fp = SCTP_CHUNK_DEST(mp); 2750 chunklen = ntohs(sdc->sdh_len); 2751 ASSERT(fp->suna >= chunklen); 2752 fp->suna -= chunklen; 2753 if (fp->suna == 0) { 2754 /* All outstanding data acked. */ 2755 fp->pba = 0; 2756 SCTP_FADDR_TIMER_STOP(fp); 2757 } 2758 fp->acked += chunklen; 2759 acked += chunklen; 2760 sctp->sctp_unacked -= chunklen - sizeof (*sdc); 2761 ASSERT(sctp->sctp_unacked >= 0); 2762 } 2763 /* Go to the next chunk of the current message */ 2764 mp = mp->b_next; 2765 /* 2766 * Move to the next message in the transmit list 2767 * if we are done with all the chunks from the current 2768 * message. Note, it is possible to hit the end of the 2769 * transmit list here, i.e. if we have already completed 2770 * processing the gap block. 2771 * Also, note that we break here, which means we 2772 * continue processing gap blocks, if any. In case of 2773 * ordered gap blocks there can't be any following 2774 * this (if there is it will fail the sanity check 2775 * above). In case of un-ordered gap blocks we will 2776 * switch to sctp_process_uo_gaps(). In either case 2777 * it should be fine to continue with NULL ump/mp, 2778 * but we just reset it to xmit_head. 2779 */ 2780 if (mp == NULL) { 2781 ump = ump->b_next; 2782 if (ump == NULL) { 2783 ASSERT(xtsn == gapend); 2784 ump = sctp->sctp_xmit_head; 2785 mp = mp1; 2786 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2787 xtsn = ntohl(sdc->sdh_tsn); 2788 break; 2789 } 2790 mp = ump->b_cont; 2791 } 2792 /* 2793 * Likewise, we could hit an unsent chunk once we have 2794 * completed processing the gap block. Again, it is 2795 * fine to continue processing gap blocks with mp 2796 * pointing to the unsent chunk, because if there 2797 * are more ordered gap blocks, they will fail the 2798 * sanity check, and if there are un-ordered gap blocks, 2799 * we will continue processing in sctp_process_uo_gaps() 2800 * We just reset the mp to the one we started with. 2801 */ 2802 if (!SCTP_CHUNK_ISSENT(mp)) { 2803 ASSERT(xtsn == gapend); 2804 ump = sctp->sctp_xmit_head; 2805 mp = mp1; 2806 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2807 xtsn = ntohl(sdc->sdh_tsn); 2808 break; 2809 } 2810 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2811 xtsn = ntohl(sdc->sdh_tsn); 2812 } 2813 } 2814 if (sctp->sctp_prsctp_aware) 2815 sctp_check_abandoned_data(sctp, sctp->sctp_current); 2816 if (sctp->sctp_chk_fast_rexmit) 2817 sctp_fast_rexmit(sctp); 2818 ret: 2819 trysend += sctp_set_frwnd(sctp, ntohl(sc->ssc_a_rwnd)); 2820 2821 /* 2822 * If receive window is closed while there is unsent data, 2823 * set a timer for doing zero window probes. 2824 */ 2825 if (sctp->sctp_frwnd == 0 && sctp->sctp_unacked == 0 && 2826 sctp->sctp_unsent != 0) { 2827 SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current, 2828 sctp->sctp_current->rto); 2829 } 2830 2831 /* 2832 * Set cwnd for all destinations. 2833 * Congestion window gets increased only when cumulative 2834 * TSN moves forward, we're not in fast recovery, and 2835 * cwnd has been fully utilized (almost fully, need to allow 2836 * some leeway due to non-MSS sized messages). 2837 */ 2838 if (sctp->sctp_current->acked == acked) { 2839 /* 2840 * Fast-path, only data sent to sctp_current got acked. 2841 */ 2842 fp = sctp->sctp_current; 2843 if (cumack_forward && !fast_recovery && 2844 (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) { 2845 if (fp->cwnd < fp->ssthresh) { 2846 /* 2847 * Slow start 2848 */ 2849 if (fp->acked > fp->sfa_pmss) { 2850 fp->cwnd += fp->sfa_pmss; 2851 } else { 2852 fp->cwnd += fp->acked; 2853 } 2854 fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max); 2855 } else { 2856 /* 2857 * Congestion avoidance 2858 */ 2859 fp->pba += fp->acked; 2860 if (fp->pba >= fp->cwnd) { 2861 fp->pba -= fp->cwnd; 2862 fp->cwnd += fp->sfa_pmss; 2863 fp->cwnd = MIN(fp->cwnd, 2864 sctp->sctp_cwnd_max); 2865 } 2866 } 2867 } 2868 /* 2869 * Limit the burst of transmitted data segments. 2870 */ 2871 if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss < 2872 fp->cwnd) { 2873 fp->cwnd = fp->suna + sctps->sctps_maxburst * 2874 fp->sfa_pmss; 2875 } 2876 fp->acked = 0; 2877 goto check_ss_rxmit; 2878 } 2879 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 2880 if (cumack_forward && fp->acked && !fast_recovery && 2881 (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) { 2882 if (fp->cwnd < fp->ssthresh) { 2883 if (fp->acked > fp->sfa_pmss) { 2884 fp->cwnd += fp->sfa_pmss; 2885 } else { 2886 fp->cwnd += fp->acked; 2887 } 2888 fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max); 2889 } else { 2890 fp->pba += fp->acked; 2891 if (fp->pba >= fp->cwnd) { 2892 fp->pba -= fp->cwnd; 2893 fp->cwnd += fp->sfa_pmss; 2894 fp->cwnd = MIN(fp->cwnd, 2895 sctp->sctp_cwnd_max); 2896 } 2897 } 2898 } 2899 if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss < 2900 fp->cwnd) { 2901 fp->cwnd = fp->suna + sctps->sctps_maxburst * 2902 fp->sfa_pmss; 2903 } 2904 fp->acked = 0; 2905 } 2906 fp = sctp->sctp_current; 2907 check_ss_rxmit: 2908 /* 2909 * If this is a SACK following a timeout, check if there are 2910 * still unacked chunks (sent before the timeout) that we can 2911 * send. 2912 */ 2913 if (sctp->sctp_rexmitting) { 2914 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_rxt_maxtsn)) { 2915 /* 2916 * As we are in retransmission phase, we may get a 2917 * SACK which indicates some new chunks are received 2918 * but cum_tsn does not advance. During this 2919 * phase, the other side advances cum_tsn only because 2920 * it receives our retransmitted chunks. Only 2921 * this signals that some chunks are still 2922 * missing. 2923 */ 2924 if (cumack_forward) { 2925 fp->rxt_unacked -= acked; 2926 sctp_ss_rexmit(sctp); 2927 } 2928 } else { 2929 sctp->sctp_rexmitting = B_FALSE; 2930 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 2931 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 2932 fp->rxt_unacked = 0; 2933 } 2934 } 2935 return (trysend); 2936 } 2937 2938 /* 2939 * Returns 0 if the caller should stop processing any more chunks, 2940 * 1 if the caller should skip this chunk and continue processing. 2941 */ 2942 static int 2943 sctp_strange_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp) 2944 { 2945 size_t len; 2946 2947 BUMP_LOCAL(sctp->sctp_ibchunks); 2948 /* check top two bits for action required */ 2949 if (ch->sch_id & 0x40) { /* also matches 0xc0 */ 2950 len = ntohs(ch->sch_len); 2951 sctp_add_err(sctp, SCTP_ERR_UNREC_CHUNK, ch, len, fp); 2952 2953 if ((ch->sch_id & 0xc0) == 0xc0) { 2954 /* skip and continue */ 2955 return (1); 2956 } else { 2957 /* stop processing */ 2958 return (0); 2959 } 2960 } 2961 if (ch->sch_id & 0x80) { 2962 /* skip and continue, no error */ 2963 return (1); 2964 } 2965 /* top two bits are clear; stop processing and no error */ 2966 return (0); 2967 } 2968 2969 /* 2970 * Basic sanity checks on all input chunks and parameters: they must 2971 * be of legitimate size for their purported type, and must follow 2972 * ordering conventions as defined in rfc2960. 2973 * 2974 * Returns 1 if the chunk and all encloded params are legitimate, 2975 * 0 otherwise. 2976 */ 2977 /*ARGSUSED*/ 2978 static int 2979 sctp_check_input(sctp_t *sctp, sctp_chunk_hdr_t *ch, ssize_t len, int first) 2980 { 2981 sctp_parm_hdr_t *ph; 2982 void *p = NULL; 2983 ssize_t clen; 2984 uint16_t ch_len; 2985 2986 ch_len = ntohs(ch->sch_len); 2987 if (ch_len > len) { 2988 return (0); 2989 } 2990 2991 switch (ch->sch_id) { 2992 case CHUNK_DATA: 2993 if (ch_len < sizeof (sctp_data_hdr_t)) { 2994 return (0); 2995 } 2996 return (1); 2997 case CHUNK_INIT: 2998 case CHUNK_INIT_ACK: 2999 { 3000 ssize_t remlen = len; 3001 3002 /* 3003 * INIT and INIT-ACK chunks must not be bundled with 3004 * any other. 3005 */ 3006 if (!first || sctp_next_chunk(ch, &remlen) != NULL || 3007 (ch_len < (sizeof (*ch) + 3008 sizeof (sctp_init_chunk_t)))) { 3009 return (0); 3010 } 3011 /* may have params that need checking */ 3012 p = (char *)(ch + 1) + sizeof (sctp_init_chunk_t); 3013 clen = ch_len - (sizeof (*ch) + 3014 sizeof (sctp_init_chunk_t)); 3015 } 3016 break; 3017 case CHUNK_SACK: 3018 if (ch_len < (sizeof (*ch) + sizeof (sctp_sack_chunk_t))) { 3019 return (0); 3020 } 3021 /* dup and gap reports checked by got_sack() */ 3022 return (1); 3023 case CHUNK_SHUTDOWN: 3024 if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) { 3025 return (0); 3026 } 3027 return (1); 3028 case CHUNK_ABORT: 3029 case CHUNK_ERROR: 3030 if (ch_len < sizeof (*ch)) { 3031 return (0); 3032 } 3033 /* may have params that need checking */ 3034 p = ch + 1; 3035 clen = ch_len - sizeof (*ch); 3036 break; 3037 case CHUNK_ECNE: 3038 case CHUNK_CWR: 3039 case CHUNK_HEARTBEAT: 3040 case CHUNK_HEARTBEAT_ACK: 3041 /* Full ASCONF chunk and parameter checks are in asconf.c */ 3042 case CHUNK_ASCONF: 3043 case CHUNK_ASCONF_ACK: 3044 if (ch_len < sizeof (*ch)) { 3045 return (0); 3046 } 3047 /* heartbeat data checked by process_heartbeat() */ 3048 return (1); 3049 case CHUNK_SHUTDOWN_COMPLETE: 3050 { 3051 ssize_t remlen = len; 3052 3053 /* 3054 * SHUTDOWN-COMPLETE chunk must not be bundled with any 3055 * other 3056 */ 3057 if (!first || sctp_next_chunk(ch, &remlen) != NULL || 3058 ch_len < sizeof (*ch)) { 3059 return (0); 3060 } 3061 } 3062 return (1); 3063 case CHUNK_COOKIE: 3064 case CHUNK_COOKIE_ACK: 3065 case CHUNK_SHUTDOWN_ACK: 3066 if (ch_len < sizeof (*ch) || !first) { 3067 return (0); 3068 } 3069 return (1); 3070 case CHUNK_FORWARD_TSN: 3071 if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) 3072 return (0); 3073 return (1); 3074 default: 3075 return (1); /* handled by strange_chunk() */ 3076 } 3077 3078 /* check and byteorder parameters */ 3079 if (clen <= 0) { 3080 return (1); 3081 } 3082 ASSERT(p != NULL); 3083 3084 ph = p; 3085 while (ph != NULL && clen > 0) { 3086 ch_len = ntohs(ph->sph_len); 3087 if (ch_len > len || ch_len < sizeof (*ph)) { 3088 return (0); 3089 } 3090 ph = sctp_next_parm(ph, &clen); 3091 } 3092 3093 /* All OK */ 3094 return (1); 3095 } 3096 3097 /* ARGSUSED */ 3098 static sctp_hdr_t * 3099 find_sctp_hdrs(mblk_t *mp, in6_addr_t *src, in6_addr_t *dst, 3100 uint_t *ifindex, uint_t *ip_hdr_len, ip6_pkt_t *ipp, ip_pktinfo_t *pinfo) 3101 { 3102 uchar_t *rptr; 3103 ipha_t *ip4h; 3104 ip6_t *ip6h; 3105 mblk_t *mp1; 3106 3107 rptr = mp->b_rptr; 3108 if (IPH_HDR_VERSION(rptr) == IPV4_VERSION) { 3109 *ip_hdr_len = IPH_HDR_LENGTH(rptr); 3110 ip4h = (ipha_t *)rptr; 3111 IN6_IPADDR_TO_V4MAPPED(ip4h->ipha_src, src); 3112 IN6_IPADDR_TO_V4MAPPED(ip4h->ipha_dst, dst); 3113 3114 ipp->ipp_fields |= IPPF_HOPLIMIT; 3115 ipp->ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl; 3116 if (pinfo != NULL && (pinfo->ip_pkt_flags & IPF_RECVIF)) { 3117 ipp->ipp_fields |= IPPF_IFINDEX; 3118 ipp->ipp_ifindex = pinfo->ip_pkt_ifindex; 3119 } 3120 } else { 3121 ASSERT(IPH_HDR_VERSION(rptr) == IPV6_VERSION); 3122 ip6h = (ip6_t *)rptr; 3123 ipp->ipp_fields = IPPF_HOPLIMIT; 3124 ipp->ipp_hoplimit = ip6h->ip6_hops; 3125 3126 if (ip6h->ip6_nxt != IPPROTO_SCTP) { 3127 /* Look for ifindex information */ 3128 if (ip6h->ip6_nxt == IPPROTO_RAW) { 3129 ip6i_t *ip6i = (ip6i_t *)ip6h; 3130 3131 if (ip6i->ip6i_flags & IP6I_IFINDEX) { 3132 ASSERT(ip6i->ip6i_ifindex != 0); 3133 ipp->ipp_fields |= IPPF_IFINDEX; 3134 ipp->ipp_ifindex = ip6i->ip6i_ifindex; 3135 } 3136 rptr = (uchar_t *)&ip6i[1]; 3137 mp->b_rptr = rptr; 3138 if (rptr == mp->b_wptr) { 3139 mp1 = mp->b_cont; 3140 freeb(mp); 3141 mp = mp1; 3142 rptr = mp->b_rptr; 3143 } 3144 ASSERT(mp->b_wptr - rptr >= 3145 IPV6_HDR_LEN + sizeof (sctp_hdr_t)); 3146 ip6h = (ip6_t *)rptr; 3147 } 3148 /* 3149 * Find any potentially interesting extension headers 3150 * as well as the length of the IPv6 + extension 3151 * headers. 3152 */ 3153 *ip_hdr_len = ip_find_hdr_v6(mp, ip6h, ipp, NULL); 3154 } else { 3155 *ip_hdr_len = IPV6_HDR_LEN; 3156 } 3157 *src = ip6h->ip6_src; 3158 *dst = ip6h->ip6_dst; 3159 } 3160 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 3161 return ((sctp_hdr_t *)&rptr[*ip_hdr_len]); 3162 #undef IPVER 3163 } 3164 3165 static mblk_t * 3166 sctp_check_in_policy(mblk_t *mp, mblk_t *ipsec_mp) 3167 { 3168 ipsec_in_t *ii; 3169 boolean_t check = B_TRUE; 3170 boolean_t policy_present; 3171 ipha_t *ipha; 3172 ip6_t *ip6h; 3173 netstack_t *ns; 3174 ipsec_stack_t *ipss; 3175 3176 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 3177 ASSERT(ii->ipsec_in_type == IPSEC_IN); 3178 ns = ii->ipsec_in_ns; 3179 ipss = ns->netstack_ipsec; 3180 3181 if (ii->ipsec_in_dont_check) { 3182 check = B_FALSE; 3183 if (!ii->ipsec_in_secure) { 3184 freeb(ipsec_mp); 3185 ipsec_mp = NULL; 3186 } 3187 } 3188 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 3189 policy_present = ipss->ipsec_inbound_v4_policy_present; 3190 ipha = (ipha_t *)mp->b_rptr; 3191 ip6h = NULL; 3192 } else { 3193 policy_present = ipss->ipsec_inbound_v6_policy_present; 3194 ipha = NULL; 3195 ip6h = (ip6_t *)mp->b_rptr; 3196 } 3197 3198 if (check && policy_present) { 3199 /* 3200 * The conn_t parameter is NULL because we already know 3201 * nobody's home. 3202 */ 3203 ipsec_mp = ipsec_check_global_policy(ipsec_mp, (conn_t *)NULL, 3204 ipha, ip6h, B_TRUE, ns); 3205 if (ipsec_mp == NULL) 3206 return (NULL); 3207 } 3208 if (ipsec_mp != NULL) 3209 freeb(ipsec_mp); 3210 return (mp); 3211 } 3212 3213 /* Handle out-of-the-blue packets */ 3214 void 3215 sctp_ootb_input(mblk_t *mp, ill_t *recv_ill, zoneid_t zoneid, 3216 boolean_t mctl_present) 3217 { 3218 sctp_t *sctp; 3219 sctp_chunk_hdr_t *ch; 3220 sctp_hdr_t *sctph; 3221 in6_addr_t src, dst; 3222 uint_t ip_hdr_len; 3223 uint_t ifindex; 3224 ip6_pkt_t ipp; 3225 ssize_t mlen; 3226 ip_pktinfo_t *pinfo = NULL; 3227 mblk_t *first_mp; 3228 sctp_stack_t *sctps; 3229 ip_stack_t *ipst; 3230 3231 ASSERT(recv_ill != NULL); 3232 ipst = recv_ill->ill_ipst; 3233 sctps = ipst->ips_netstack->netstack_sctp; 3234 3235 BUMP_MIB(&sctps->sctps_mib, sctpOutOfBlue); 3236 BUMP_MIB(&sctps->sctps_mib, sctpInSCTPPkts); 3237 3238 if (sctps->sctps_gsctp == NULL) { 3239 /* 3240 * For non-zero stackids the default queue isn't created 3241 * until the first open, thus there can be a need to send 3242 * an error before then. But we can't do that, hence we just 3243 * drop the packet. Later during boot, when the default queue 3244 * has been setup, a retransmitted packet from the peer 3245 * will result in a error. 3246 */ 3247 ASSERT(sctps->sctps_netstack->netstack_stackid != 3248 GLOBAL_NETSTACKID); 3249 freemsg(mp); 3250 return; 3251 } 3252 3253 first_mp = mp; 3254 if (mctl_present) 3255 mp = mp->b_cont; 3256 3257 /* Initiate IPPf processing, if needed. */ 3258 if (IPP_ENABLED(IPP_LOCAL_IN, ipst)) { 3259 ip_process(IPP_LOCAL_IN, &mp, 3260 recv_ill->ill_phyint->phyint_ifindex); 3261 if (mp == NULL) { 3262 if (mctl_present) 3263 freeb(first_mp); 3264 return; 3265 } 3266 } 3267 3268 if (mp->b_cont != NULL) { 3269 /* 3270 * All subsequent code is vastly simplified if it can 3271 * assume a single contiguous chunk of data. 3272 */ 3273 if (pullupmsg(mp, -1) == 0) { 3274 BUMP_MIB(recv_ill->ill_ip_mib, ipIfStatsInDiscards); 3275 freemsg(first_mp); 3276 return; 3277 } 3278 } 3279 3280 /* 3281 * We don't really need to call this function... Need to 3282 * optimize later. 3283 */ 3284 sctph = find_sctp_hdrs(mp, &src, &dst, &ifindex, &ip_hdr_len, 3285 &ipp, pinfo); 3286 mlen = mp->b_wptr - (uchar_t *)(sctph + 1); 3287 if ((ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen)) == NULL) { 3288 dprint(3, ("sctp_ootb_input: invalid packet\n")); 3289 BUMP_MIB(recv_ill->ill_ip_mib, ipIfStatsInDiscards); 3290 freemsg(first_mp); 3291 return; 3292 } 3293 3294 switch (ch->sch_id) { 3295 case CHUNK_INIT: 3296 /* no listener; send abort */ 3297 if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL) 3298 return; 3299 sctp_send_abort(sctps->sctps_gsctp, sctp_init2vtag(ch), 0, 3300 NULL, 0, mp, 0, B_TRUE); 3301 break; 3302 case CHUNK_INIT_ACK: 3303 /* check for changed src addr */ 3304 sctp = sctp_addrlist2sctp(mp, sctph, ch, zoneid, sctps); 3305 if (sctp != NULL) { 3306 /* success; proceed to normal path */ 3307 mutex_enter(&sctp->sctp_lock); 3308 if (sctp->sctp_running) { 3309 if (!sctp_add_recvq(sctp, mp, B_FALSE)) { 3310 BUMP_MIB(recv_ill->ill_ip_mib, 3311 ipIfStatsInDiscards); 3312 freemsg(mp); 3313 } 3314 mutex_exit(&sctp->sctp_lock); 3315 } else { 3316 /* 3317 * If the source address is changed, we 3318 * don't need to worry too much about 3319 * out of order processing. So we don't 3320 * check if the recvq is empty or not here. 3321 */ 3322 sctp->sctp_running = B_TRUE; 3323 mutex_exit(&sctp->sctp_lock); 3324 sctp_input_data(sctp, mp, NULL); 3325 WAKE_SCTP(sctp); 3326 sctp_process_sendq(sctp); 3327 } 3328 SCTP_REFRELE(sctp); 3329 return; 3330 } 3331 if (mctl_present) 3332 freeb(first_mp); 3333 /* else bogus init ack; drop it */ 3334 break; 3335 case CHUNK_SHUTDOWN_ACK: 3336 if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL) 3337 return; 3338 sctp_ootb_shutdown_ack(sctps->sctps_gsctp, mp, ip_hdr_len); 3339 sctp_process_sendq(sctps->sctps_gsctp); 3340 return; 3341 case CHUNK_ERROR: 3342 case CHUNK_ABORT: 3343 case CHUNK_COOKIE_ACK: 3344 case CHUNK_SHUTDOWN_COMPLETE: 3345 if (mctl_present) 3346 freeb(first_mp); 3347 break; 3348 default: 3349 if (mctl_present && sctp_check_in_policy(mp, first_mp) == NULL) 3350 return; 3351 sctp_send_abort(sctps->sctps_gsctp, sctph->sh_verf, 0, 3352 NULL, 0, mp, 0, B_TRUE); 3353 break; 3354 } 3355 sctp_process_sendq(sctps->sctps_gsctp); 3356 freemsg(mp); 3357 } 3358 3359 void 3360 sctp_input(conn_t *connp, ipha_t *ipha, mblk_t *mp, mblk_t *first_mp, 3361 ill_t *recv_ill, boolean_t isv4, boolean_t mctl_present) 3362 { 3363 sctp_t *sctp = CONN2SCTP(connp); 3364 ip_stack_t *ipst = recv_ill->ill_ipst; 3365 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 3366 3367 /* 3368 * We check some fields in conn_t without holding a lock. 3369 * This should be fine. 3370 */ 3371 if (CONN_INBOUND_POLICY_PRESENT(connp, ipss) || mctl_present) { 3372 first_mp = ipsec_check_inbound_policy(first_mp, connp, 3373 ipha, NULL, mctl_present); 3374 if (first_mp == NULL) { 3375 BUMP_MIB(recv_ill->ill_ip_mib, ipIfStatsInDiscards); 3376 SCTP_REFRELE(sctp); 3377 return; 3378 } 3379 } 3380 3381 /* Initiate IPPF processing for fastpath */ 3382 if (IPP_ENABLED(IPP_LOCAL_IN, ipst)) { 3383 ip_process(IPP_LOCAL_IN, &mp, 3384 recv_ill->ill_phyint->phyint_ifindex); 3385 if (mp == NULL) { 3386 SCTP_REFRELE(sctp); 3387 if (mctl_present) 3388 freeb(first_mp); 3389 return; 3390 } else if (mctl_present) { 3391 /* 3392 * ip_process might return a new mp. 3393 */ 3394 ASSERT(first_mp != mp); 3395 first_mp->b_cont = mp; 3396 } else { 3397 first_mp = mp; 3398 } 3399 } 3400 3401 if (connp->conn_recvif || connp->conn_recvslla || 3402 connp->conn_ip_recvpktinfo) { 3403 int in_flags = 0; 3404 3405 if (connp->conn_recvif || connp->conn_ip_recvpktinfo) { 3406 in_flags = IPF_RECVIF; 3407 } 3408 if (connp->conn_recvslla) { 3409 in_flags |= IPF_RECVSLLA; 3410 } 3411 if (isv4) { 3412 mp = ip_add_info(mp, recv_ill, in_flags, 3413 IPCL_ZONEID(connp), ipst); 3414 } else { 3415 mp = ip_add_info_v6(mp, recv_ill, 3416 &(((ip6_t *)ipha)->ip6_dst)); 3417 } 3418 if (mp == NULL) { 3419 BUMP_MIB(recv_ill->ill_ip_mib, ipIfStatsInDiscards); 3420 SCTP_REFRELE(sctp); 3421 if (mctl_present) 3422 freeb(first_mp); 3423 return; 3424 } else if (mctl_present) { 3425 /* 3426 * ip_add_info might return a new mp. 3427 */ 3428 ASSERT(first_mp != mp); 3429 first_mp->b_cont = mp; 3430 } else { 3431 first_mp = mp; 3432 } 3433 } 3434 3435 mutex_enter(&sctp->sctp_lock); 3436 if (sctp->sctp_running) { 3437 if (mctl_present) 3438 mp->b_prev = first_mp; 3439 if (!sctp_add_recvq(sctp, mp, B_FALSE)) { 3440 BUMP_MIB(recv_ill->ill_ip_mib, ipIfStatsInDiscards); 3441 freemsg(first_mp); 3442 } 3443 mutex_exit(&sctp->sctp_lock); 3444 SCTP_REFRELE(sctp); 3445 return; 3446 } else { 3447 sctp->sctp_running = B_TRUE; 3448 mutex_exit(&sctp->sctp_lock); 3449 3450 mutex_enter(&sctp->sctp_recvq_lock); 3451 if (sctp->sctp_recvq != NULL) { 3452 if (mctl_present) 3453 mp->b_prev = first_mp; 3454 if (!sctp_add_recvq(sctp, mp, B_TRUE)) { 3455 BUMP_MIB(recv_ill->ill_ip_mib, 3456 ipIfStatsInDiscards); 3457 freemsg(first_mp); 3458 } 3459 mutex_exit(&sctp->sctp_recvq_lock); 3460 WAKE_SCTP(sctp); 3461 SCTP_REFRELE(sctp); 3462 return; 3463 } 3464 } 3465 mutex_exit(&sctp->sctp_recvq_lock); 3466 sctp_input_data(sctp, mp, (mctl_present ? first_mp : NULL)); 3467 WAKE_SCTP(sctp); 3468 sctp_process_sendq(sctp); 3469 SCTP_REFRELE(sctp); 3470 } 3471 3472 static void 3473 sctp_process_abort(sctp_t *sctp, sctp_chunk_hdr_t *ch, int err) 3474 { 3475 sctp_stack_t *sctps = sctp->sctp_sctps; 3476 3477 BUMP_MIB(&sctps->sctps_mib, sctpAborted); 3478 BUMP_LOCAL(sctp->sctp_ibchunks); 3479 3480 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3481 ntohs(((sctp_parm_hdr_t *)(ch + 1))->sph_type), ch); 3482 sctp_clean_death(sctp, err); 3483 } 3484 3485 void 3486 sctp_input_data(sctp_t *sctp, mblk_t *mp, mblk_t *ipsec_mp) 3487 { 3488 sctp_chunk_hdr_t *ch; 3489 ssize_t mlen; 3490 int gotdata; 3491 int trysend; 3492 sctp_faddr_t *fp; 3493 sctp_init_chunk_t *iack; 3494 uint32_t tsn; 3495 sctp_data_hdr_t *sdc; 3496 ip6_pkt_t ipp; 3497 in6_addr_t src; 3498 in6_addr_t dst; 3499 uint_t ifindex; 3500 sctp_hdr_t *sctph; 3501 uint_t ip_hdr_len; 3502 mblk_t *dups = NULL; 3503 int recv_adaptation; 3504 boolean_t wake_eager = B_FALSE; 3505 mblk_t *pinfo_mp; 3506 ip_pktinfo_t *pinfo = NULL; 3507 in6_addr_t peer_src; 3508 int64_t now; 3509 sctp_stack_t *sctps = sctp->sctp_sctps; 3510 ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; 3511 boolean_t hb_already = B_FALSE; 3512 3513 if (DB_TYPE(mp) != M_DATA) { 3514 ASSERT(DB_TYPE(mp) == M_CTL); 3515 if (MBLKL(mp) == sizeof (ip_pktinfo_t) && 3516 ((ip_pktinfo_t *)mp->b_rptr)->ip_pkt_ulp_type == 3517 IN_PKTINFO) { 3518 pinfo = (ip_pktinfo_t *)mp->b_rptr; 3519 pinfo_mp = mp; 3520 mp = mp->b_cont; 3521 } else { 3522 if (ipsec_mp != NULL) 3523 freeb(ipsec_mp); 3524 sctp_icmp_error(sctp, mp); 3525 return; 3526 } 3527 } 3528 ASSERT(DB_TYPE(mp) == M_DATA); 3529 3530 if (mp->b_cont != NULL) { 3531 /* 3532 * All subsequent code is vastly simplified if it can 3533 * assume a single contiguous chunk of data. 3534 */ 3535 if (pullupmsg(mp, -1) == 0) { 3536 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3537 if (ipsec_mp != NULL) 3538 freeb(ipsec_mp); 3539 if (pinfo != NULL) 3540 freeb(pinfo_mp); 3541 freemsg(mp); 3542 return; 3543 } 3544 } 3545 3546 BUMP_LOCAL(sctp->sctp_ipkts); 3547 sctph = find_sctp_hdrs(mp, &src, &dst, &ifindex, &ip_hdr_len, 3548 &ipp, pinfo); 3549 if (pinfo != NULL) 3550 freeb(pinfo_mp); 3551 mlen = mp->b_wptr - (uchar_t *)(sctph + 1); 3552 ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen); 3553 if (ch == NULL) { 3554 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3555 if (ipsec_mp != NULL) 3556 freeb(ipsec_mp); 3557 freemsg(mp); 3558 return; 3559 } 3560 3561 if (!sctp_check_input(sctp, ch, mlen, 1)) { 3562 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3563 goto done; 3564 } 3565 /* 3566 * Check verfication tag (special handling for INIT, 3567 * COOKIE, SHUTDOWN_COMPLETE and SHUTDOWN_ACK chunks). 3568 * ABORTs are handled in the chunk processing loop, since 3569 * may not appear first. All other checked chunks must 3570 * appear first, or will have been dropped by check_input(). 3571 */ 3572 switch (ch->sch_id) { 3573 case CHUNK_INIT: 3574 if (sctph->sh_verf != 0) { 3575 /* drop it */ 3576 goto done; 3577 } 3578 break; 3579 case CHUNK_SHUTDOWN_COMPLETE: 3580 if (sctph->sh_verf == sctp->sctp_lvtag) 3581 break; 3582 if (sctph->sh_verf == sctp->sctp_fvtag && 3583 SCTP_GET_TBIT(ch)) { 3584 break; 3585 } 3586 /* else drop it */ 3587 goto done; 3588 case CHUNK_ABORT: 3589 case CHUNK_COOKIE: 3590 /* handled below */ 3591 break; 3592 case CHUNK_SHUTDOWN_ACK: 3593 if (sctp->sctp_state > SCTPS_BOUND && 3594 sctp->sctp_state < SCTPS_ESTABLISHED) { 3595 /* treat as OOTB */ 3596 sctp_ootb_shutdown_ack(sctp, mp, ip_hdr_len); 3597 if (ipsec_mp != NULL) 3598 freeb(ipsec_mp); 3599 return; 3600 } 3601 /* else fallthru */ 3602 default: 3603 /* 3604 * All other packets must have a valid 3605 * verification tag, however if this is a 3606 * listener, we use a refined version of 3607 * out-of-the-blue logic. 3608 */ 3609 if (sctph->sh_verf != sctp->sctp_lvtag && 3610 sctp->sctp_state != SCTPS_LISTEN) { 3611 /* drop it */ 3612 goto done; 3613 } 3614 break; 3615 } 3616 3617 /* Have a valid sctp for this packet */ 3618 fp = sctp_lookup_faddr(sctp, &src); 3619 dprint(2, ("sctp_dispatch_rput: mp=%p fp=%p sctp=%p\n", (void *)mp, 3620 (void *)fp, (void *)sctp)); 3621 3622 gotdata = 0; 3623 trysend = 0; 3624 3625 now = lbolt64; 3626 /* Process the chunks */ 3627 do { 3628 dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n", 3629 sctp->sctp_state, (int)(ch->sch_id))); 3630 3631 if (ch->sch_id == CHUNK_ABORT) { 3632 if (sctph->sh_verf != sctp->sctp_lvtag && 3633 sctph->sh_verf != sctp->sctp_fvtag) { 3634 /* drop it */ 3635 goto done; 3636 } 3637 } 3638 3639 switch (sctp->sctp_state) { 3640 3641 case SCTPS_ESTABLISHED: 3642 case SCTPS_SHUTDOWN_PENDING: 3643 case SCTPS_SHUTDOWN_SENT: 3644 switch (ch->sch_id) { 3645 case CHUNK_DATA: 3646 /* 0-length data chunks are not allowed */ 3647 if (ntohs(ch->sch_len) == sizeof (*sdc)) { 3648 sdc = (sctp_data_hdr_t *)ch; 3649 tsn = sdc->sdh_tsn; 3650 sctp_send_abort(sctp, sctp->sctp_fvtag, 3651 SCTP_ERR_NO_USR_DATA, (char *)&tsn, 3652 sizeof (tsn), mp, 0, B_FALSE); 3653 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3654 0, NULL); 3655 sctp_clean_death(sctp, ECONNABORTED); 3656 goto done; 3657 } 3658 3659 ASSERT(fp != NULL); 3660 sctp->sctp_lastdata = fp; 3661 sctp_data_chunk(sctp, ch, mp, &dups, fp, &ipp); 3662 gotdata = 1; 3663 /* Restart shutdown timer if shutting down */ 3664 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 3665 /* 3666 * If we have exceeded our max 3667 * wait bound for waiting for a 3668 * shutdown ack from the peer, 3669 * abort the association. 3670 */ 3671 if (sctps->sctps_shutack_wait_bound != 3672 0 && 3673 TICK_TO_MSEC(now - 3674 sctp->sctp_out_time) > 3675 sctps->sctps_shutack_wait_bound) { 3676 sctp_send_abort(sctp, 3677 sctp->sctp_fvtag, 0, NULL, 3678 0, mp, 0, B_FALSE); 3679 sctp_assoc_event(sctp, 3680 SCTP_COMM_LOST, 0, NULL); 3681 sctp_clean_death(sctp, 3682 ECONNABORTED); 3683 goto done; 3684 } 3685 SCTP_FADDR_TIMER_RESTART(sctp, fp, 3686 fp->rto); 3687 } 3688 break; 3689 case CHUNK_SACK: 3690 ASSERT(fp != NULL); 3691 /* 3692 * Peer is real and alive if it can ack our 3693 * data. 3694 */ 3695 sctp_faddr_alive(sctp, fp); 3696 trysend = sctp_got_sack(sctp, ch); 3697 if (trysend < 0) { 3698 sctp_send_abort(sctp, sctph->sh_verf, 3699 0, NULL, 0, mp, 0, B_FALSE); 3700 sctp_assoc_event(sctp, 3701 SCTP_COMM_LOST, 0, NULL); 3702 sctp_clean_death(sctp, 3703 ECONNABORTED); 3704 goto done; 3705 } 3706 break; 3707 case CHUNK_HEARTBEAT: 3708 if (!hb_already) { 3709 /* 3710 * In any one packet, there should 3711 * only be one heartbeat chunk. So 3712 * we should not process more than 3713 * once. 3714 */ 3715 sctp_return_heartbeat(sctp, ch, mp); 3716 hb_already = B_TRUE; 3717 } 3718 break; 3719 case CHUNK_HEARTBEAT_ACK: 3720 sctp_process_heartbeat(sctp, ch); 3721 break; 3722 case CHUNK_SHUTDOWN: 3723 sctp_shutdown_event(sctp); 3724 trysend = sctp_shutdown_received(sctp, ch, 3725 B_FALSE, B_FALSE, fp); 3726 BUMP_LOCAL(sctp->sctp_ibchunks); 3727 break; 3728 case CHUNK_SHUTDOWN_ACK: 3729 BUMP_LOCAL(sctp->sctp_ibchunks); 3730 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 3731 sctp_shutdown_complete(sctp); 3732 BUMP_MIB(&sctps->sctps_mib, 3733 sctpShutdowns); 3734 sctp_assoc_event(sctp, 3735 SCTP_SHUTDOWN_COMP, 0, NULL); 3736 sctp_clean_death(sctp, 0); 3737 goto done; 3738 } 3739 break; 3740 case CHUNK_ABORT: { 3741 sctp_saddr_ipif_t *sp; 3742 3743 /* Ignore if delete pending */ 3744 sp = sctp_saddr_lookup(sctp, &dst, 0); 3745 ASSERT(sp != NULL); 3746 if (sp->saddr_ipif_delete_pending) { 3747 BUMP_LOCAL(sctp->sctp_ibchunks); 3748 break; 3749 } 3750 3751 sctp_process_abort(sctp, ch, ECONNRESET); 3752 goto done; 3753 } 3754 case CHUNK_INIT: 3755 sctp_send_initack(sctp, sctph, ch, mp); 3756 break; 3757 case CHUNK_COOKIE: 3758 if (sctp_process_cookie(sctp, ch, mp, &iack, 3759 sctph, &recv_adaptation, NULL) != -1) { 3760 sctp_send_cookie_ack(sctp); 3761 sctp_assoc_event(sctp, SCTP_RESTART, 3762 0, NULL); 3763 if (recv_adaptation) { 3764 sctp->sctp_recv_adaptation = 1; 3765 sctp_adaptation_event(sctp); 3766 } 3767 } else { 3768 BUMP_MIB(&sctps->sctps_mib, 3769 sctpInInvalidCookie); 3770 } 3771 break; 3772 case CHUNK_ERROR: { 3773 int error; 3774 3775 BUMP_LOCAL(sctp->sctp_ibchunks); 3776 error = sctp_handle_error(sctp, sctph, ch, mp); 3777 if (error != 0) { 3778 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3779 0, NULL); 3780 sctp_clean_death(sctp, error); 3781 goto done; 3782 } 3783 break; 3784 } 3785 case CHUNK_ASCONF: 3786 ASSERT(fp != NULL); 3787 sctp_input_asconf(sctp, ch, fp); 3788 BUMP_LOCAL(sctp->sctp_ibchunks); 3789 break; 3790 case CHUNK_ASCONF_ACK: 3791 ASSERT(fp != NULL); 3792 sctp_faddr_alive(sctp, fp); 3793 sctp_input_asconf_ack(sctp, ch, fp); 3794 BUMP_LOCAL(sctp->sctp_ibchunks); 3795 break; 3796 case CHUNK_FORWARD_TSN: 3797 ASSERT(fp != NULL); 3798 sctp->sctp_lastdata = fp; 3799 sctp_process_forward_tsn(sctp, ch, fp, &ipp); 3800 gotdata = 1; 3801 BUMP_LOCAL(sctp->sctp_ibchunks); 3802 break; 3803 default: 3804 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 3805 goto nomorechunks; 3806 } /* else skip and continue processing */ 3807 break; 3808 } 3809 break; 3810 3811 case SCTPS_LISTEN: 3812 switch (ch->sch_id) { 3813 case CHUNK_INIT: 3814 sctp_send_initack(sctp, sctph, ch, mp); 3815 break; 3816 case CHUNK_COOKIE: { 3817 sctp_t *eager; 3818 3819 if (sctp_process_cookie(sctp, ch, mp, &iack, 3820 sctph, &recv_adaptation, &peer_src) == -1) { 3821 BUMP_MIB(&sctps->sctps_mib, 3822 sctpInInvalidCookie); 3823 goto done; 3824 } 3825 3826 /* 3827 * The cookie is good; ensure that 3828 * the peer used the verification 3829 * tag from the init ack in the header. 3830 */ 3831 if (iack->sic_inittag != sctph->sh_verf) 3832 goto done; 3833 3834 eager = sctp_conn_request(sctp, mp, ifindex, 3835 ip_hdr_len, iack, ipsec_mp); 3836 if (eager == NULL) { 3837 sctp_send_abort(sctp, sctph->sh_verf, 3838 SCTP_ERR_NO_RESOURCES, NULL, 0, mp, 3839 0, B_FALSE); 3840 goto done; 3841 } 3842 3843 /* 3844 * If there were extra chunks 3845 * bundled with the cookie, 3846 * they must be processed 3847 * on the eager's queue. We 3848 * accomplish this by refeeding 3849 * the whole packet into the 3850 * state machine on the right 3851 * q. The packet (mp) gets 3852 * there via the eager's 3853 * cookie_mp field (overloaded 3854 * with the active open role). 3855 * This is picked up when 3856 * processing the null bind 3857 * request put on the eager's 3858 * q by sctp_accept(). We must 3859 * first revert the cookie 3860 * chunk's length field to network 3861 * byteorder so it can be 3862 * properly reprocessed on the 3863 * eager's queue. 3864 */ 3865 BUMP_MIB(&sctps->sctps_mib, sctpPassiveEstab); 3866 if (mlen > ntohs(ch->sch_len)) { 3867 eager->sctp_cookie_mp = dupb(mp); 3868 mblk_setcred(eager->sctp_cookie_mp, 3869 CONN_CRED(eager->sctp_connp)); 3870 /* 3871 * If no mem, just let 3872 * the peer retransmit. 3873 */ 3874 } 3875 sctp_assoc_event(eager, SCTP_COMM_UP, 0, NULL); 3876 if (recv_adaptation) { 3877 eager->sctp_recv_adaptation = 1; 3878 eager->sctp_rx_adaptation_code = 3879 sctp->sctp_rx_adaptation_code; 3880 sctp_adaptation_event(eager); 3881 } 3882 3883 eager->sctp_active = now; 3884 sctp_send_cookie_ack(eager); 3885 3886 wake_eager = B_TRUE; 3887 3888 /* 3889 * Process rest of the chunks with eager. 3890 */ 3891 sctp = eager; 3892 fp = sctp_lookup_faddr(sctp, &peer_src); 3893 /* 3894 * Confirm peer's original source. fp can 3895 * only be NULL if peer does not use the 3896 * original source as one of its addresses... 3897 */ 3898 if (fp == NULL) 3899 fp = sctp_lookup_faddr(sctp, &src); 3900 else 3901 sctp_faddr_alive(sctp, fp); 3902 3903 /* 3904 * Validate the peer addresses. It also starts 3905 * the heartbeat timer. 3906 */ 3907 sctp_validate_peer(sctp); 3908 break; 3909 } 3910 /* Anything else is considered out-of-the-blue */ 3911 case CHUNK_ERROR: 3912 case CHUNK_ABORT: 3913 case CHUNK_COOKIE_ACK: 3914 case CHUNK_SHUTDOWN_COMPLETE: 3915 BUMP_LOCAL(sctp->sctp_ibchunks); 3916 goto done; 3917 default: 3918 BUMP_LOCAL(sctp->sctp_ibchunks); 3919 sctp_send_abort(sctp, sctph->sh_verf, 0, NULL, 3920 0, mp, 0, B_TRUE); 3921 goto done; 3922 } 3923 break; 3924 3925 case SCTPS_COOKIE_WAIT: 3926 switch (ch->sch_id) { 3927 case CHUNK_INIT_ACK: 3928 sctp_stop_faddr_timers(sctp); 3929 sctp_faddr_alive(sctp, sctp->sctp_current); 3930 sctp_send_cookie_echo(sctp, ch, mp); 3931 BUMP_LOCAL(sctp->sctp_ibchunks); 3932 break; 3933 case CHUNK_ABORT: 3934 sctp_process_abort(sctp, ch, ECONNREFUSED); 3935 goto done; 3936 case CHUNK_INIT: 3937 sctp_send_initack(sctp, sctph, ch, mp); 3938 break; 3939 case CHUNK_COOKIE: 3940 if (sctp_process_cookie(sctp, ch, mp, &iack, 3941 sctph, &recv_adaptation, NULL) == -1) { 3942 BUMP_MIB(&sctps->sctps_mib, 3943 sctpInInvalidCookie); 3944 break; 3945 } 3946 sctp_send_cookie_ack(sctp); 3947 sctp_stop_faddr_timers(sctp); 3948 if (!SCTP_IS_DETACHED(sctp)) { 3949 sctp->sctp_ulp_connected( 3950 sctp->sctp_ulpd); 3951 sctp_set_ulp_prop(sctp); 3952 } 3953 sctp->sctp_state = SCTPS_ESTABLISHED; 3954 sctp->sctp_assoc_start_time = (uint32_t)lbolt; 3955 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 3956 if (sctp->sctp_cookie_mp) { 3957 freemsg(sctp->sctp_cookie_mp); 3958 sctp->sctp_cookie_mp = NULL; 3959 } 3960 3961 /* Validate the peer addresses. */ 3962 sctp->sctp_active = now; 3963 sctp_validate_peer(sctp); 3964 3965 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 3966 if (recv_adaptation) { 3967 sctp->sctp_recv_adaptation = 1; 3968 sctp_adaptation_event(sctp); 3969 } 3970 /* Try sending queued data, or ASCONFs */ 3971 trysend = 1; 3972 break; 3973 default: 3974 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 3975 goto nomorechunks; 3976 } /* else skip and continue processing */ 3977 break; 3978 } 3979 break; 3980 3981 case SCTPS_COOKIE_ECHOED: 3982 switch (ch->sch_id) { 3983 case CHUNK_COOKIE_ACK: 3984 if (!SCTP_IS_DETACHED(sctp)) { 3985 sctp->sctp_ulp_connected( 3986 sctp->sctp_ulpd); 3987 sctp_set_ulp_prop(sctp); 3988 } 3989 if (sctp->sctp_unacked == 0) 3990 sctp_stop_faddr_timers(sctp); 3991 sctp->sctp_state = SCTPS_ESTABLISHED; 3992 sctp->sctp_assoc_start_time = (uint32_t)lbolt; 3993 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 3994 BUMP_LOCAL(sctp->sctp_ibchunks); 3995 if (sctp->sctp_cookie_mp) { 3996 freemsg(sctp->sctp_cookie_mp); 3997 sctp->sctp_cookie_mp = NULL; 3998 } 3999 sctp_faddr_alive(sctp, fp); 4000 /* Validate the peer addresses. */ 4001 sctp->sctp_active = now; 4002 sctp_validate_peer(sctp); 4003 4004 /* Try sending queued data, or ASCONFs */ 4005 trysend = 1; 4006 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 4007 sctp_adaptation_event(sctp); 4008 break; 4009 case CHUNK_ABORT: 4010 sctp_process_abort(sctp, ch, ECONNREFUSED); 4011 goto done; 4012 case CHUNK_COOKIE: 4013 if (sctp_process_cookie(sctp, ch, mp, &iack, 4014 sctph, &recv_adaptation, NULL) == -1) { 4015 BUMP_MIB(&sctps->sctps_mib, 4016 sctpInInvalidCookie); 4017 break; 4018 } 4019 sctp_send_cookie_ack(sctp); 4020 4021 if (!SCTP_IS_DETACHED(sctp)) { 4022 sctp->sctp_ulp_connected( 4023 sctp->sctp_ulpd); 4024 sctp_set_ulp_prop(sctp); 4025 } 4026 if (sctp->sctp_unacked == 0) 4027 sctp_stop_faddr_timers(sctp); 4028 sctp->sctp_state = SCTPS_ESTABLISHED; 4029 sctp->sctp_assoc_start_time = (uint32_t)lbolt; 4030 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 4031 if (sctp->sctp_cookie_mp) { 4032 freemsg(sctp->sctp_cookie_mp); 4033 sctp->sctp_cookie_mp = NULL; 4034 } 4035 /* Validate the peer addresses. */ 4036 sctp->sctp_active = now; 4037 sctp_validate_peer(sctp); 4038 4039 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 4040 if (recv_adaptation) { 4041 sctp->sctp_recv_adaptation = 1; 4042 sctp_adaptation_event(sctp); 4043 } 4044 /* Try sending queued data, or ASCONFs */ 4045 trysend = 1; 4046 break; 4047 case CHUNK_INIT: 4048 sctp_send_initack(sctp, sctph, ch, mp); 4049 break; 4050 case CHUNK_ERROR: { 4051 sctp_parm_hdr_t *p; 4052 4053 BUMP_LOCAL(sctp->sctp_ibchunks); 4054 /* check for a stale cookie */ 4055 if (ntohs(ch->sch_len) >= 4056 (sizeof (*p) + sizeof (*ch)) + 4057 sizeof (uint32_t)) { 4058 4059 p = (sctp_parm_hdr_t *)(ch + 1); 4060 if (p->sph_type == 4061 htons(SCTP_ERR_STALE_COOKIE)) { 4062 BUMP_MIB(&sctps->sctps_mib, 4063 sctpAborted); 4064 sctp_error_event(sctp, ch); 4065 sctp_assoc_event(sctp, 4066 SCTP_COMM_LOST, 0, NULL); 4067 sctp_clean_death(sctp, 4068 ECONNREFUSED); 4069 goto done; 4070 } 4071 } 4072 break; 4073 } 4074 case CHUNK_HEARTBEAT: 4075 if (!hb_already) { 4076 sctp_return_heartbeat(sctp, ch, mp); 4077 hb_already = B_TRUE; 4078 } 4079 break; 4080 default: 4081 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4082 goto nomorechunks; 4083 } /* else skip and continue processing */ 4084 } /* switch (ch->sch_id) */ 4085 break; 4086 4087 case SCTPS_SHUTDOWN_ACK_SENT: 4088 switch (ch->sch_id) { 4089 case CHUNK_ABORT: 4090 /* Pass gathered wisdom to IP for keeping */ 4091 sctp_update_ire(sctp); 4092 sctp_process_abort(sctp, ch, 0); 4093 goto done; 4094 case CHUNK_SHUTDOWN_COMPLETE: 4095 BUMP_LOCAL(sctp->sctp_ibchunks); 4096 BUMP_MIB(&sctps->sctps_mib, sctpShutdowns); 4097 sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0, 4098 NULL); 4099 4100 /* Pass gathered wisdom to IP for keeping */ 4101 sctp_update_ire(sctp); 4102 sctp_clean_death(sctp, 0); 4103 goto done; 4104 case CHUNK_SHUTDOWN_ACK: 4105 sctp_shutdown_complete(sctp); 4106 BUMP_LOCAL(sctp->sctp_ibchunks); 4107 BUMP_MIB(&sctps->sctps_mib, sctpShutdowns); 4108 sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0, 4109 NULL); 4110 sctp_clean_death(sctp, 0); 4111 goto done; 4112 case CHUNK_COOKIE: 4113 (void) sctp_shutdown_received(sctp, NULL, 4114 B_TRUE, B_FALSE, fp); 4115 BUMP_LOCAL(sctp->sctp_ibchunks); 4116 break; 4117 case CHUNK_HEARTBEAT: 4118 if (!hb_already) { 4119 sctp_return_heartbeat(sctp, ch, mp); 4120 hb_already = B_TRUE; 4121 } 4122 break; 4123 default: 4124 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4125 goto nomorechunks; 4126 } /* else skip and continue processing */ 4127 break; 4128 } 4129 break; 4130 4131 case SCTPS_SHUTDOWN_RECEIVED: 4132 switch (ch->sch_id) { 4133 case CHUNK_SHUTDOWN: 4134 trysend = sctp_shutdown_received(sctp, ch, 4135 B_FALSE, B_FALSE, fp); 4136 break; 4137 case CHUNK_SACK: 4138 trysend = sctp_got_sack(sctp, ch); 4139 if (trysend < 0) { 4140 sctp_send_abort(sctp, sctph->sh_verf, 4141 0, NULL, 0, mp, 0, B_FALSE); 4142 sctp_assoc_event(sctp, 4143 SCTP_COMM_LOST, 0, NULL); 4144 sctp_clean_death(sctp, 4145 ECONNABORTED); 4146 goto done; 4147 } 4148 break; 4149 case CHUNK_ABORT: 4150 sctp_process_abort(sctp, ch, ECONNRESET); 4151 goto done; 4152 case CHUNK_HEARTBEAT: 4153 if (!hb_already) { 4154 sctp_return_heartbeat(sctp, ch, mp); 4155 hb_already = B_TRUE; 4156 } 4157 break; 4158 default: 4159 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4160 goto nomorechunks; 4161 } /* else skip and continue processing */ 4162 break; 4163 } 4164 break; 4165 4166 default: 4167 /* 4168 * The only remaining states are SCTPS_IDLE and 4169 * SCTPS_BOUND, and we should not be getting here 4170 * for these. 4171 */ 4172 ASSERT(0); 4173 } /* switch (sctp->sctp_state) */ 4174 4175 ch = sctp_next_chunk(ch, &mlen); 4176 if (ch != NULL && !sctp_check_input(sctp, ch, mlen, 0)) 4177 goto done; 4178 } while (ch != NULL); 4179 4180 /* Finished processing all chunks in packet */ 4181 4182 nomorechunks: 4183 /* SACK if necessary */ 4184 if (gotdata) { 4185 boolean_t sack_sent; 4186 4187 (sctp->sctp_sack_toggle)++; 4188 sack_sent = sctp_sack(sctp, dups); 4189 dups = NULL; 4190 4191 /* If a SACK is sent, no need to restart the timer. */ 4192 if (!sack_sent && !sctp->sctp_ack_timer_running) { 4193 sctp->sctp_ack_timer_running = B_TRUE; 4194 sctp_timer(sctp, sctp->sctp_ack_mp, 4195 MSEC_TO_TICK(sctps->sctps_deferred_ack_interval)); 4196 } 4197 } 4198 4199 if (trysend) { 4200 sctp_output(sctp, UINT_MAX); 4201 if (sctp->sctp_cxmit_list != NULL) 4202 sctp_wput_asconf(sctp, NULL); 4203 } 4204 /* If there is unsent data, make sure a timer is running */ 4205 if (sctp->sctp_unsent > 0 && !sctp->sctp_current->timer_running) { 4206 SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current, 4207 sctp->sctp_current->rto); 4208 } 4209 4210 done: 4211 if (dups != NULL) 4212 freeb(dups); 4213 if (ipsec_mp != NULL) 4214 freeb(ipsec_mp); 4215 freemsg(mp); 4216 4217 if (sctp->sctp_err_chunks != NULL) 4218 sctp_process_err(sctp); 4219 4220 if (wake_eager) { 4221 /* 4222 * sctp points to newly created control block, need to 4223 * release it before exiting. Before releasing it and 4224 * processing the sendq, need to grab a hold on it. 4225 * Otherwise, another thread can close it while processing 4226 * the sendq. 4227 */ 4228 SCTP_REFHOLD(sctp); 4229 WAKE_SCTP(sctp); 4230 sctp_process_sendq(sctp); 4231 SCTP_REFRELE(sctp); 4232 } 4233 } 4234 4235 /* 4236 * Some amount of data got removed from rx q. 4237 * Check if we should send a window update. 4238 * 4239 * Due to way sctp_rwnd updates are made, ULP can give reports out-of-order. 4240 * To keep from dropping incoming data due to this, we only update 4241 * sctp_rwnd when if it's larger than what we've reported to peer earlier. 4242 */ 4243 void 4244 sctp_recvd(sctp_t *sctp, int len) 4245 { 4246 int32_t old, new; 4247 sctp_stack_t *sctps = sctp->sctp_sctps; 4248 4249 ASSERT(sctp != NULL); 4250 RUN_SCTP(sctp); 4251 4252 if (len < sctp->sctp_rwnd) { 4253 WAKE_SCTP(sctp); 4254 return; 4255 } 4256 ASSERT(sctp->sctp_rwnd >= sctp->sctp_rxqueued); 4257 old = sctp->sctp_rwnd - sctp->sctp_rxqueued; 4258 new = len - sctp->sctp_rxqueued; 4259 sctp->sctp_rwnd = len; 4260 4261 if (sctp->sctp_state >= SCTPS_ESTABLISHED && 4262 ((old <= new >> 1) || (old < sctp->sctp_mss))) { 4263 sctp->sctp_force_sack = 1; 4264 BUMP_MIB(&sctps->sctps_mib, sctpOutWinUpdate); 4265 (void) sctp_sack(sctp, NULL); 4266 old = 1; 4267 } else { 4268 old = 0; 4269 } 4270 WAKE_SCTP(sctp); 4271 if (old > 0) { 4272 sctp_process_sendq(sctp); 4273 } 4274 } 4275