1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/systm.h> 28 #include <sys/stream.h> 29 #include <sys/cmn_err.h> 30 #include <sys/kmem.h> 31 #define _SUN_TPI_VERSION 2 32 #include <sys/tihdr.h> 33 #include <sys/socket.h> 34 #include <sys/strsun.h> 35 #include <sys/strsubr.h> 36 37 #include <netinet/in.h> 38 #include <netinet/ip6.h> 39 #include <netinet/tcp_seq.h> 40 #include <netinet/sctp.h> 41 42 #include <inet/common.h> 43 #include <inet/ip.h> 44 #include <inet/ip_if.h> 45 #include <inet/ip6.h> 46 #include <inet/mib2.h> 47 #include <inet/ipclassifier.h> 48 #include <inet/ipp_common.h> 49 #include <inet/ipsec_impl.h> 50 #include <inet/sctp_ip.h> 51 52 #include "sctp_impl.h" 53 #include "sctp_asconf.h" 54 #include "sctp_addr.h" 55 56 static struct kmem_cache *sctp_kmem_set_cache; 57 58 /* 59 * PR-SCTP comments. 60 * 61 * When we get a valid Forward TSN chunk, we check the fragment list for this 62 * SSN and preceeding SSNs free all them. Further, if this Forward TSN causes 63 * the next expected SSN to be present in the stream queue, we deliver any 64 * such stranded messages upstream. We also update the SACK info. appropriately. 65 * When checking for advancing the cumulative ack (in sctp_cumack()) we must 66 * check for abandoned chunks and messages. While traversing the tramsmit 67 * list if we come across an abandoned chunk, we can skip the message (i.e. 68 * take it out of the (re)transmit list) since this message, and hence this 69 * chunk, has been marked abandoned by sctp_rexmit(). If we come across an 70 * unsent chunk for a message this now abandoned we need to check if a 71 * Forward TSN needs to be sent, this could be a case where we deferred sending 72 * a Forward TSN in sctp_get_msg_to_send(). Further, after processing a 73 * SACK we check if the Advanced peer ack point can be moved ahead, i.e. 74 * if we can send a Forward TSN via sctp_check_abandoned_data(). 75 */ 76 void 77 sctp_free_set(sctp_set_t *s) 78 { 79 sctp_set_t *p; 80 81 while (s) { 82 p = s->next; 83 kmem_cache_free(sctp_kmem_set_cache, s); 84 s = p; 85 } 86 } 87 88 static void 89 sctp_ack_add(sctp_set_t **head, uint32_t tsn, int *num) 90 { 91 sctp_set_t *p, *t; 92 93 if (head == NULL || num == NULL) 94 return; 95 96 ASSERT(*num >= 0); 97 ASSERT((*num == 0 && *head == NULL) || (*num > 0 && *head != NULL)); 98 99 if (*head == NULL) { 100 *head = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 101 if (*head == NULL) 102 return; 103 (*head)->prev = (*head)->next = NULL; 104 (*head)->begin = tsn; 105 (*head)->end = tsn; 106 *num = 1; 107 return; 108 } 109 110 ASSERT((*head)->prev == NULL); 111 112 /* 113 * Handle this special case here so we don't have to check 114 * for it each time in the loop. 115 */ 116 if (SEQ_LT(tsn + 1, (*head)->begin)) { 117 /* add a new set, and move the head pointer */ 118 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 119 if (t == NULL) 120 return; 121 t->next = *head; 122 t->prev = NULL; 123 (*head)->prev = t; 124 t->begin = tsn; 125 t->end = tsn; 126 (*num)++; 127 *head = t; 128 return; 129 } 130 131 /* 132 * We need to handle the following cases, where p points to 133 * the current set (as we walk through the loop): 134 * 135 * 1. tsn is entirely less than p; create a new set before p. 136 * 2. tsn borders p from less; coalesce p with tsn. 137 * 3. tsn is withing p; do nothing. 138 * 4. tsn borders p from greater; coalesce p with tsn. 139 * 4a. p may now border p->next from less; if so, coalesce those 140 * two sets. 141 * 5. tsn is entirely greater then all sets; add a new set at 142 * the end. 143 */ 144 for (p = *head; ; p = p->next) { 145 if (SEQ_LT(tsn + 1, p->begin)) { 146 /* 1: add a new set before p. */ 147 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 148 if (t == NULL) 149 return; 150 t->next = p; 151 t->prev = NULL; 152 t->begin = tsn; 153 t->end = tsn; 154 if (p->prev) { 155 t->prev = p->prev; 156 p->prev->next = t; 157 } 158 p->prev = t; 159 (*num)++; 160 return; 161 } 162 163 if ((tsn + 1) == p->begin) { 164 /* 2: adjust p->begin */ 165 p->begin = tsn; 166 return; 167 } 168 169 if (SEQ_GEQ(tsn, p->begin) && SEQ_LEQ(tsn, p->end)) { 170 /* 3; do nothing */ 171 return; 172 } 173 174 if ((p->end + 1) == tsn) { 175 /* 4; adjust p->end */ 176 p->end = tsn; 177 178 if (p->next != NULL && (tsn + 1) == p->next->begin) { 179 /* 4a: coalesce p and p->next */ 180 t = p->next; 181 p->end = t->end; 182 p->next = t->next; 183 if (t->next != NULL) 184 t->next->prev = p; 185 kmem_cache_free(sctp_kmem_set_cache, t); 186 (*num)--; 187 } 188 return; 189 } 190 191 if (p->next == NULL) { 192 /* 5: add new set at the end */ 193 t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP); 194 if (t == NULL) 195 return; 196 t->next = NULL; 197 t->prev = p; 198 t->begin = tsn; 199 t->end = tsn; 200 p->next = t; 201 (*num)++; 202 return; 203 } 204 205 if (SEQ_GT(tsn, p->end + 1)) 206 continue; 207 } 208 } 209 210 static void 211 sctp_ack_rem(sctp_set_t **head, uint32_t end, int *num) 212 { 213 sctp_set_t *p, *t; 214 215 if (head == NULL || *head == NULL || num == NULL) 216 return; 217 218 /* Nothing to remove */ 219 if (SEQ_LT(end, (*head)->begin)) 220 return; 221 222 /* Find out where to start removing sets */ 223 for (p = *head; p->next; p = p->next) { 224 if (SEQ_LEQ(end, p->end)) 225 break; 226 } 227 228 if (SEQ_LT(end, p->end) && SEQ_GEQ(end, p->begin)) { 229 /* adjust p */ 230 p->begin = end + 1; 231 /* all done */ 232 if (p == *head) 233 return; 234 } else if (SEQ_GEQ(end, p->end)) { 235 /* remove this set too */ 236 p = p->next; 237 } 238 239 /* unlink everything before this set */ 240 t = *head; 241 *head = p; 242 if (p != NULL && p->prev != NULL) { 243 p->prev->next = NULL; 244 p->prev = NULL; 245 } 246 247 sctp_free_set(t); 248 249 /* recount the number of sets */ 250 *num = 0; 251 252 for (p = *head; p != NULL; p = p->next) 253 (*num)++; 254 } 255 256 void 257 sctp_sets_init() 258 { 259 sctp_kmem_set_cache = kmem_cache_create("sctp_set_cache", 260 sizeof (sctp_set_t), 0, NULL, NULL, NULL, NULL, 261 NULL, 0); 262 } 263 264 void 265 sctp_sets_fini() 266 { 267 kmem_cache_destroy(sctp_kmem_set_cache); 268 } 269 270 sctp_chunk_hdr_t * 271 sctp_first_chunk(uchar_t *rptr, ssize_t remaining) 272 { 273 sctp_chunk_hdr_t *ch; 274 uint16_t ch_len; 275 276 if (remaining < sizeof (*ch)) { 277 return (NULL); 278 } 279 280 ch = (sctp_chunk_hdr_t *)rptr; 281 ch_len = ntohs(ch->sch_len); 282 283 if (ch_len < sizeof (*ch) || remaining < ch_len) { 284 return (NULL); 285 } 286 287 return (ch); 288 } 289 290 sctp_chunk_hdr_t * 291 sctp_next_chunk(sctp_chunk_hdr_t *ch, ssize_t *remaining) 292 { 293 int pad; 294 uint16_t ch_len; 295 296 if (!ch) { 297 return (NULL); 298 } 299 300 ch_len = ntohs(ch->sch_len); 301 302 if ((pad = ch_len & (SCTP_ALIGN - 1)) != 0) { 303 pad = SCTP_ALIGN - pad; 304 } 305 306 *remaining -= (ch_len + pad); 307 ch = (sctp_chunk_hdr_t *)((char *)ch + ch_len + pad); 308 309 return (sctp_first_chunk((uchar_t *)ch, *remaining)); 310 } 311 312 /* 313 * Attach ancillary data to a received SCTP segments. 314 * If the source address (fp) is not the primary, send up a 315 * unitdata_ind so recvfrom() can populate the msg_name field. 316 * If ancillary data is also requested, we append it to the 317 * unitdata_req. Otherwise, we just send up an optdata_ind. 318 */ 319 static int 320 sctp_input_add_ancillary(sctp_t *sctp, mblk_t **mp, sctp_data_hdr_t *dcp, 321 sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira) 322 { 323 struct T_unitdata_ind *tudi; 324 int optlen; 325 int hdrlen; 326 uchar_t *optptr; 327 struct cmsghdr *cmsg; 328 mblk_t *mp1; 329 struct sockaddr_in6 sin_buf[1]; 330 struct sockaddr_in6 *sin6; 331 struct sockaddr_in *sin4; 332 crb_t addflag; /* Which pieces to add */ 333 conn_t *connp = sctp->sctp_connp; 334 335 sin4 = NULL; 336 sin6 = NULL; 337 338 optlen = hdrlen = 0; 339 addflag.crb_all = 0; 340 341 /* Figure out address size */ 342 if (connp->conn_family == AF_INET) { 343 sin4 = (struct sockaddr_in *)sin_buf; 344 sin4->sin_family = AF_INET; 345 sin4->sin_port = connp->conn_fport; 346 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr); 347 hdrlen = sizeof (*tudi) + sizeof (*sin4); 348 } else { 349 sin6 = sin_buf; 350 sin6->sin6_family = AF_INET6; 351 sin6->sin6_port = connp->conn_fport; 352 sin6->sin6_addr = fp->faddr; 353 hdrlen = sizeof (*tudi) + sizeof (*sin6); 354 } 355 /* If app asked to receive send / recv info */ 356 if (sctp->sctp_recvsndrcvinfo) 357 optlen += sizeof (*cmsg) + sizeof (struct sctp_sndrcvinfo); 358 359 if (connp->conn_recv_ancillary.crb_all == 0) 360 goto noancillary; 361 362 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 363 ira->ira_ruifindex != sctp->sctp_recvifindex) { 364 optlen += sizeof (*cmsg) + sizeof (struct in6_pktinfo); 365 if (hdrlen == 0) 366 hdrlen = sizeof (struct T_unitdata_ind); 367 addflag.crb_ip_recvpktinfo = 1; 368 } 369 /* If app asked for hoplimit and it has changed ... */ 370 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 371 ipp->ipp_hoplimit != sctp->sctp_recvhops) { 372 optlen += sizeof (*cmsg) + sizeof (uint_t); 373 if (hdrlen == 0) 374 hdrlen = sizeof (struct T_unitdata_ind); 375 addflag.crb_ipv6_recvhoplimit = 1; 376 } 377 /* If app asked for tclass and it has changed ... */ 378 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 379 ipp->ipp_tclass != sctp->sctp_recvtclass) { 380 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 381 if (hdrlen == 0) 382 hdrlen = sizeof (struct T_unitdata_ind); 383 addflag.crb_ipv6_recvtclass = 1; 384 } 385 /* If app asked for hopbyhop headers and it has changed ... */ 386 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 387 ip_cmpbuf(sctp->sctp_hopopts, sctp->sctp_hopoptslen, 388 (ipp->ipp_fields & IPPF_HOPOPTS), 389 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 390 optlen += sizeof (*cmsg) + ipp->ipp_hopoptslen - 391 sctp->sctp_v6label_len; 392 if (hdrlen == 0) 393 hdrlen = sizeof (struct T_unitdata_ind); 394 addflag.crb_ipv6_recvhopopts = 1; 395 if (!ip_allocbuf((void **)&sctp->sctp_hopopts, 396 &sctp->sctp_hopoptslen, 397 (ipp->ipp_fields & IPPF_HOPOPTS), 398 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 399 return (-1); 400 } 401 /* If app asked for dst headers before routing headers ... */ 402 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 403 ip_cmpbuf(sctp->sctp_rthdrdstopts, sctp->sctp_rthdrdstoptslen, 404 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 405 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 406 optlen += sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen; 407 if (hdrlen == 0) 408 hdrlen = sizeof (struct T_unitdata_ind); 409 addflag.crb_ipv6_recvrthdrdstopts = 1; 410 if (!ip_allocbuf((void **)&sctp->sctp_rthdrdstopts, 411 &sctp->sctp_rthdrdstoptslen, 412 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 413 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 414 return (-1); 415 } 416 /* If app asked for routing headers and it has changed ... */ 417 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 418 ip_cmpbuf(sctp->sctp_rthdr, sctp->sctp_rthdrlen, 419 (ipp->ipp_fields & IPPF_RTHDR), 420 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 421 optlen += sizeof (*cmsg) + ipp->ipp_rthdrlen; 422 if (hdrlen == 0) 423 hdrlen = sizeof (struct T_unitdata_ind); 424 addflag.crb_ipv6_recvrthdr = 1; 425 if (!ip_allocbuf((void **)&sctp->sctp_rthdr, 426 &sctp->sctp_rthdrlen, 427 (ipp->ipp_fields & IPPF_RTHDR), 428 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 429 return (-1); 430 } 431 /* If app asked for dest headers and it has changed ... */ 432 if (connp->conn_recv_ancillary.crb_ipv6_recvdstopts && 433 ip_cmpbuf(sctp->sctp_dstopts, sctp->sctp_dstoptslen, 434 (ipp->ipp_fields & IPPF_DSTOPTS), 435 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 436 optlen += sizeof (*cmsg) + ipp->ipp_dstoptslen; 437 if (hdrlen == 0) 438 hdrlen = sizeof (struct T_unitdata_ind); 439 addflag.crb_ipv6_recvdstopts = 1; 440 if (!ip_allocbuf((void **)&sctp->sctp_dstopts, 441 &sctp->sctp_dstoptslen, 442 (ipp->ipp_fields & IPPF_DSTOPTS), 443 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 444 return (-1); 445 } 446 noancillary: 447 /* Nothing to add */ 448 if (hdrlen == 0) 449 return (-1); 450 451 mp1 = allocb(hdrlen + optlen + sizeof (void *), BPRI_MED); 452 if (mp1 == NULL) 453 return (-1); 454 mp1->b_cont = *mp; 455 *mp = mp1; 456 mp1->b_rptr += sizeof (void *); /* pointer worth of padding */ 457 mp1->b_wptr = mp1->b_rptr + hdrlen + optlen; 458 DB_TYPE(mp1) = M_PROTO; 459 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 460 tudi->PRIM_type = T_UNITDATA_IND; 461 tudi->SRC_length = sin4 ? sizeof (*sin4) : sizeof (*sin6); 462 tudi->SRC_offset = sizeof (*tudi); 463 tudi->OPT_offset = sizeof (*tudi) + tudi->SRC_length; 464 tudi->OPT_length = optlen; 465 if (sin4) { 466 bcopy(sin4, tudi + 1, sizeof (*sin4)); 467 } else { 468 bcopy(sin6, tudi + 1, sizeof (*sin6)); 469 } 470 optptr = (uchar_t *)tudi + tudi->OPT_offset; 471 472 if (sctp->sctp_recvsndrcvinfo) { 473 /* XXX need backout method if memory allocation fails. */ 474 struct sctp_sndrcvinfo *sri; 475 476 cmsg = (struct cmsghdr *)optptr; 477 cmsg->cmsg_level = IPPROTO_SCTP; 478 cmsg->cmsg_type = SCTP_SNDRCV; 479 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*sri); 480 optptr += sizeof (*cmsg); 481 482 sri = (struct sctp_sndrcvinfo *)(cmsg + 1); 483 ASSERT(OK_32PTR(sri)); 484 sri->sinfo_stream = ntohs(dcp->sdh_sid); 485 sri->sinfo_ssn = ntohs(dcp->sdh_ssn); 486 if (SCTP_DATA_GET_UBIT(dcp)) { 487 sri->sinfo_flags = MSG_UNORDERED; 488 } else { 489 sri->sinfo_flags = 0; 490 } 491 sri->sinfo_ppid = dcp->sdh_payload_id; 492 sri->sinfo_context = 0; 493 sri->sinfo_timetolive = 0; 494 sri->sinfo_tsn = ntohl(dcp->sdh_tsn); 495 sri->sinfo_cumtsn = sctp->sctp_ftsn; 496 sri->sinfo_assoc_id = 0; 497 498 optptr += sizeof (*sri); 499 } 500 501 /* 502 * If app asked for pktinfo and the index has changed ... 503 * Note that the local address never changes for the connection. 504 */ 505 if (addflag.crb_ip_recvpktinfo) { 506 struct in6_pktinfo *pkti; 507 uint_t ifindex; 508 509 ifindex = ira->ira_ruifindex; 510 cmsg = (struct cmsghdr *)optptr; 511 cmsg->cmsg_level = IPPROTO_IPV6; 512 cmsg->cmsg_type = IPV6_PKTINFO; 513 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*pkti); 514 optptr += sizeof (*cmsg); 515 516 pkti = (struct in6_pktinfo *)optptr; 517 if (connp->conn_family == AF_INET6) 518 pkti->ipi6_addr = sctp->sctp_ip6h->ip6_src; 519 else 520 IN6_IPADDR_TO_V4MAPPED(sctp->sctp_ipha->ipha_src, 521 &pkti->ipi6_addr); 522 523 pkti->ipi6_ifindex = ifindex; 524 optptr += sizeof (*pkti); 525 ASSERT(OK_32PTR(optptr)); 526 /* Save as "last" value */ 527 sctp->sctp_recvifindex = ifindex; 528 } 529 /* If app asked for hoplimit and it has changed ... */ 530 if (addflag.crb_ipv6_recvhoplimit) { 531 cmsg = (struct cmsghdr *)optptr; 532 cmsg->cmsg_level = IPPROTO_IPV6; 533 cmsg->cmsg_type = IPV6_HOPLIMIT; 534 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t); 535 optptr += sizeof (*cmsg); 536 537 *(uint_t *)optptr = ipp->ipp_hoplimit; 538 optptr += sizeof (uint_t); 539 ASSERT(OK_32PTR(optptr)); 540 /* Save as "last" value */ 541 sctp->sctp_recvhops = ipp->ipp_hoplimit; 542 } 543 /* If app asked for tclass and it has changed ... */ 544 if (addflag.crb_ipv6_recvtclass) { 545 cmsg = (struct cmsghdr *)optptr; 546 cmsg->cmsg_level = IPPROTO_IPV6; 547 cmsg->cmsg_type = IPV6_TCLASS; 548 cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t); 549 optptr += sizeof (*cmsg); 550 551 *(uint_t *)optptr = ipp->ipp_tclass; 552 optptr += sizeof (uint_t); 553 ASSERT(OK_32PTR(optptr)); 554 /* Save as "last" value */ 555 sctp->sctp_recvtclass = ipp->ipp_tclass; 556 } 557 if (addflag.crb_ipv6_recvhopopts) { 558 cmsg = (struct cmsghdr *)optptr; 559 cmsg->cmsg_level = IPPROTO_IPV6; 560 cmsg->cmsg_type = IPV6_HOPOPTS; 561 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_hopoptslen; 562 optptr += sizeof (*cmsg); 563 564 bcopy(ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 565 optptr += ipp->ipp_hopoptslen; 566 ASSERT(OK_32PTR(optptr)); 567 /* Save as last value */ 568 ip_savebuf((void **)&sctp->sctp_hopopts, 569 &sctp->sctp_hopoptslen, 570 (ipp->ipp_fields & IPPF_HOPOPTS), 571 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 572 } 573 if (addflag.crb_ipv6_recvrthdrdstopts) { 574 cmsg = (struct cmsghdr *)optptr; 575 cmsg->cmsg_level = IPPROTO_IPV6; 576 cmsg->cmsg_type = IPV6_RTHDRDSTOPTS; 577 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen; 578 optptr += sizeof (*cmsg); 579 580 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 581 optptr += ipp->ipp_rthdrdstoptslen; 582 ASSERT(OK_32PTR(optptr)); 583 /* Save as last value */ 584 ip_savebuf((void **)&sctp->sctp_rthdrdstopts, 585 &sctp->sctp_rthdrdstoptslen, 586 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 587 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 588 } 589 if (addflag.crb_ipv6_recvrthdr) { 590 cmsg = (struct cmsghdr *)optptr; 591 cmsg->cmsg_level = IPPROTO_IPV6; 592 cmsg->cmsg_type = IPV6_RTHDR; 593 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrlen; 594 optptr += sizeof (*cmsg); 595 596 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 597 optptr += ipp->ipp_rthdrlen; 598 ASSERT(OK_32PTR(optptr)); 599 /* Save as last value */ 600 ip_savebuf((void **)&sctp->sctp_rthdr, 601 &sctp->sctp_rthdrlen, 602 (ipp->ipp_fields & IPPF_RTHDR), 603 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 604 } 605 if (addflag.crb_ipv6_recvdstopts) { 606 cmsg = (struct cmsghdr *)optptr; 607 cmsg->cmsg_level = IPPROTO_IPV6; 608 cmsg->cmsg_type = IPV6_DSTOPTS; 609 cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_dstoptslen; 610 optptr += sizeof (*cmsg); 611 612 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 613 optptr += ipp->ipp_dstoptslen; 614 ASSERT(OK_32PTR(optptr)); 615 /* Save as last value */ 616 ip_savebuf((void **)&sctp->sctp_dstopts, 617 &sctp->sctp_dstoptslen, 618 (ipp->ipp_fields & IPPF_DSTOPTS), 619 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 620 } 621 622 ASSERT(optptr == mp1->b_wptr); 623 624 return (0); 625 } 626 627 void 628 sctp_free_reass(sctp_instr_t *sip) 629 { 630 mblk_t *mp, *mpnext, *mctl; 631 #ifdef DEBUG 632 sctp_reass_t *srp; 633 #endif 634 635 for (mp = sip->istr_reass; mp != NULL; mp = mpnext) { 636 mpnext = mp->b_next; 637 mp->b_next = NULL; 638 mp->b_prev = NULL; 639 if (DB_TYPE(mp) == M_CTL) { 640 mctl = mp; 641 #ifdef DEBUG 642 srp = (sctp_reass_t *)DB_BASE(mctl); 643 /* Partial delivery can leave empty srp */ 644 ASSERT(mp->b_cont != NULL || srp->got == 0); 645 #endif 646 mp = mp->b_cont; 647 mctl->b_cont = NULL; 648 freeb(mctl); 649 } 650 freemsg(mp); 651 } 652 sip->istr_reass = NULL; 653 } 654 655 /* 656 * If the series of data fragments of which dmp is a part is successfully 657 * reassembled, the first mblk in the series is returned. dc is adjusted 658 * to point at the data chunk in the lead mblk, and b_rptr also points to 659 * the data chunk; the following mblk's b_rptr's point at the actual payload. 660 * 661 * If the series is not yet reassembled, NULL is returned. dc is not changed. 662 * XXX should probably move this up into the state machine. 663 */ 664 665 /* Fragment list for un-ordered messages. Partial delivery is not supported */ 666 static mblk_t * 667 sctp_uodata_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc) 668 { 669 mblk_t *hmp; 670 mblk_t *begin = NULL; 671 mblk_t *end = NULL; 672 sctp_data_hdr_t *qdc; 673 uint32_t ntsn; 674 uint32_t tsn = ntohl((*dc)->sdh_tsn); 675 #ifdef DEBUG 676 mblk_t *mp1; 677 #endif 678 679 /* First frag. */ 680 if (sctp->sctp_uo_frags == NULL) { 681 sctp->sctp_uo_frags = dmp; 682 return (NULL); 683 } 684 hmp = sctp->sctp_uo_frags; 685 /* 686 * Insert the segment according to the TSN, fragmented unordered 687 * chunks are sequenced by TSN. 688 */ 689 while (hmp != NULL) { 690 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 691 ntsn = ntohl(qdc->sdh_tsn); 692 if (SEQ_GT(ntsn, tsn)) { 693 if (hmp->b_prev == NULL) { 694 dmp->b_next = hmp; 695 hmp->b_prev = dmp; 696 sctp->sctp_uo_frags = dmp; 697 } else { 698 dmp->b_next = hmp; 699 dmp->b_prev = hmp->b_prev; 700 hmp->b_prev->b_next = dmp; 701 hmp->b_prev = dmp; 702 } 703 break; 704 } 705 if (hmp->b_next == NULL) { 706 hmp->b_next = dmp; 707 dmp->b_prev = hmp; 708 break; 709 } 710 hmp = hmp->b_next; 711 } 712 /* check if we completed a msg */ 713 if (SCTP_DATA_GET_BBIT(*dc)) { 714 begin = dmp; 715 } else if (SCTP_DATA_GET_EBIT(*dc)) { 716 end = dmp; 717 } 718 /* 719 * We walk consecutive TSNs backwards till we get a seg. with 720 * the B bit 721 */ 722 if (begin == NULL) { 723 for (hmp = dmp->b_prev; hmp != NULL; hmp = hmp->b_prev) { 724 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 725 ntsn = ntohl(qdc->sdh_tsn); 726 if ((int32_t)(tsn - ntsn) > 1) { 727 return (NULL); 728 } 729 if (SCTP_DATA_GET_BBIT(qdc)) { 730 begin = hmp; 731 break; 732 } 733 tsn = ntsn; 734 } 735 } 736 tsn = ntohl((*dc)->sdh_tsn); 737 /* 738 * We walk consecutive TSNs till we get a seg. with the E bit 739 */ 740 if (end == NULL) { 741 for (hmp = dmp->b_next; hmp != NULL; hmp = hmp->b_next) { 742 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 743 ntsn = ntohl(qdc->sdh_tsn); 744 if ((int32_t)(ntsn - tsn) > 1) { 745 return (NULL); 746 } 747 if (SCTP_DATA_GET_EBIT(qdc)) { 748 end = hmp; 749 break; 750 } 751 tsn = ntsn; 752 } 753 } 754 if (begin == NULL || end == NULL) { 755 return (NULL); 756 } 757 /* Got one!, Remove the msg from the list */ 758 if (sctp->sctp_uo_frags == begin) { 759 ASSERT(begin->b_prev == NULL); 760 sctp->sctp_uo_frags = end->b_next; 761 if (end->b_next != NULL) 762 end->b_next->b_prev = NULL; 763 } else { 764 begin->b_prev->b_next = end->b_next; 765 if (end->b_next != NULL) 766 end->b_next->b_prev = begin->b_prev; 767 } 768 begin->b_prev = NULL; 769 end->b_next = NULL; 770 771 /* 772 * Null out b_next and b_prev and chain using b_cont. 773 */ 774 dmp = end = begin; 775 hmp = begin->b_next; 776 *dc = (sctp_data_hdr_t *)begin->b_rptr; 777 begin->b_next = NULL; 778 while (hmp != NULL) { 779 qdc = (sctp_data_hdr_t *)hmp->b_rptr; 780 hmp->b_rptr = (uchar_t *)(qdc + 1); 781 end = hmp->b_next; 782 dmp->b_cont = hmp; 783 dmp = hmp; 784 785 if (end != NULL) 786 hmp->b_next = NULL; 787 hmp->b_prev = NULL; 788 hmp = end; 789 } 790 BUMP_LOCAL(sctp->sctp_reassmsgs); 791 #ifdef DEBUG 792 mp1 = begin; 793 while (mp1 != NULL) { 794 ASSERT(mp1->b_next == NULL); 795 ASSERT(mp1->b_prev == NULL); 796 mp1 = mp1->b_cont; 797 } 798 #endif 799 return (begin); 800 } 801 802 /* 803 * Try partial delivery. 804 */ 805 static mblk_t * 806 sctp_try_partial_delivery(sctp_t *sctp, mblk_t *hmp, sctp_reass_t *srp, 807 sctp_data_hdr_t **dc) 808 { 809 mblk_t *mp; 810 mblk_t *dmp; 811 mblk_t *qmp; 812 mblk_t *prev; 813 sctp_data_hdr_t *qdc; 814 uint32_t tsn; 815 816 ASSERT(DB_TYPE(hmp) == M_CTL); 817 818 dprint(4, ("trypartial: got=%d, needed=%d\n", 819 (int)(srp->got), (int)(srp->needed))); 820 821 mp = hmp->b_cont; 822 qdc = (sctp_data_hdr_t *)mp->b_rptr; 823 824 ASSERT(SCTP_DATA_GET_BBIT(qdc) && srp->hasBchunk); 825 826 tsn = ntohl(qdc->sdh_tsn) + 1; 827 828 /* 829 * This loop has two exit conditions: the 830 * end of received chunks has been reached, or 831 * there is a break in the sequence. We want 832 * to chop the reassembly list as follows (the 833 * numbers are TSNs): 834 * 10 -> 11 -> (end of chunks) 835 * 10 -> 11 -> | 13 (break in sequence) 836 */ 837 prev = mp; 838 mp = mp->b_cont; 839 while (mp != NULL) { 840 qdc = (sctp_data_hdr_t *)mp->b_rptr; 841 if (ntohl(qdc->sdh_tsn) != tsn) 842 break; 843 prev = mp; 844 mp = mp->b_cont; 845 tsn++; 846 } 847 /* 848 * We are sending all the fragments upstream, we have to retain 849 * the srp info for further fragments. 850 */ 851 if (mp == NULL) { 852 dmp = hmp->b_cont; 853 hmp->b_cont = NULL; 854 srp->nexttsn = tsn; 855 srp->msglen = 0; 856 srp->needed = 0; 857 srp->got = 0; 858 srp->tail = NULL; 859 } else { 860 /* 861 * There is a gap then some ordered frags which are not 862 * the next deliverable tsn. When the next deliverable 863 * frag arrives it will be set as the new list head in 864 * sctp_data_frag() by setting the B bit. 865 */ 866 dmp = hmp->b_cont; 867 hmp->b_cont = mp; 868 } 869 srp->hasBchunk = B_FALSE; 870 /* 871 * mp now points at the last chunk in the sequence, 872 * and prev points to mp's previous in the list. 873 * We chop the list at prev. Subsequent fragment 874 * deliveries will follow the normal reassembly 875 * path unless they too exceed the sctp_pd_point. 876 */ 877 prev->b_cont = NULL; 878 srp->partial_delivered = B_TRUE; 879 880 dprint(4, ("trypartial: got some, got=%d, needed=%d\n", 881 (int)(srp->got), (int)(srp->needed))); 882 883 /* 884 * Adjust all mblk's except the lead so their rptr's point to the 885 * payload. sctp_data_chunk() will need to process the lead's 886 * data chunk section, so leave it's rptr pointing at the data chunk. 887 */ 888 *dc = (sctp_data_hdr_t *)dmp->b_rptr; 889 if (srp->tail != NULL) { 890 srp->got--; 891 ASSERT(srp->got != 0); 892 if (srp->needed != 0) { 893 srp->needed--; 894 ASSERT(srp->needed != 0); 895 } 896 srp->msglen -= ntohs((*dc)->sdh_len); 897 } 898 for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) { 899 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 900 qmp->b_rptr = (uchar_t *)(qdc + 1); 901 902 /* 903 * Deduct the balance from got and needed here, now that 904 * we know we are actually delivering these data. 905 */ 906 if (srp->tail != NULL) { 907 srp->got--; 908 ASSERT(srp->got != 0); 909 if (srp->needed != 0) { 910 srp->needed--; 911 ASSERT(srp->needed != 0); 912 } 913 srp->msglen -= ntohs(qdc->sdh_len); 914 } 915 } 916 ASSERT(srp->msglen == 0); 917 BUMP_LOCAL(sctp->sctp_reassmsgs); 918 919 return (dmp); 920 } 921 922 /* 923 * Handle received fragments for ordered delivery to upper layer protocol. 924 * Manage the per message reassembly queue and if this fragment completes 925 * reassembly of the message, or qualifies the already reassembled data 926 * for partial delivery, prepare the message for delivery upstream. 927 * 928 * tpfinished in the caller remains set only when the incoming fragment 929 * has completed the reassembly of the message associated with its ssn. 930 */ 931 static mblk_t * 932 sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error, 933 sctp_instr_t *sip, boolean_t *tpfinished) 934 { 935 mblk_t *reassq_curr, *reassq_next, *reassq_prev; 936 mblk_t *new_reassq; 937 mblk_t *qmp; 938 mblk_t *first_mp; 939 sctp_reass_t *srp; 940 sctp_data_hdr_t *qdc; 941 sctp_data_hdr_t *bdc; 942 sctp_data_hdr_t *edc; 943 uint32_t tsn; 944 uint16_t fraglen = 0; 945 946 *error = 0; 947 948 /* 949 * Find the reassembly queue for this data chunk, if none 950 * yet exists, a new per message queue will be created and 951 * appended to the end of the list of per message queues. 952 * 953 * sip points on sctp_instr_t representing instream messages 954 * as yet undelivered for this stream (sid) of the association. 955 */ 956 reassq_next = reassq_prev = sip->istr_reass; 957 for (; reassq_next != NULL; reassq_next = reassq_next->b_next) { 958 srp = (sctp_reass_t *)DB_BASE(reassq_next); 959 if (ntohs((*dc)->sdh_ssn) == srp->ssn) { 960 reassq_curr = reassq_next; 961 goto foundit; 962 } else if (SSN_GT(srp->ssn, ntohs((*dc)->sdh_ssn))) 963 break; 964 reassq_prev = reassq_next; 965 } 966 967 /* 968 * First fragment of this message received, allocate a M_CTL that 969 * will head the reassembly queue for this message. The message 970 * and all its fragments are identified by having the same ssn. 971 * 972 * Arriving fragments will be inserted in tsn order on the 973 * reassembly queue for this message (ssn), linked by b_cont. 974 */ 975 if ((new_reassq = allocb(sizeof (*srp), BPRI_MED)) == NULL) { 976 *error = ENOMEM; 977 return (NULL); 978 } 979 DB_TYPE(new_reassq) = M_CTL; 980 srp = (sctp_reass_t *)DB_BASE(new_reassq); 981 new_reassq->b_cont = dmp; 982 983 /* 984 * All per ssn reassembly queues, (one for each message) on 985 * this stream are doubly linked by b_next/b_prev back to the 986 * instr_reass of the instream structure associated with this 987 * stream id, (sip is initialized as sctp->sctp_instr[sid]). 988 * Insert the new reassembly queue in the correct (ssn) order. 989 */ 990 if (reassq_next != NULL) { 991 if (sip->istr_reass == reassq_next) { 992 /* head insertion */ 993 sip->istr_reass = new_reassq; 994 new_reassq->b_next = reassq_next; 995 new_reassq->b_prev = NULL; 996 reassq_next->b_prev = new_reassq; 997 } else { 998 /* mid queue insertion */ 999 reassq_prev->b_next = new_reassq; 1000 new_reassq->b_prev = reassq_prev; 1001 new_reassq->b_next = reassq_next; 1002 reassq_next->b_prev = new_reassq; 1003 } 1004 } else { 1005 /* place new reassembly queue at the end */ 1006 if (sip->istr_reass == NULL) { 1007 sip->istr_reass = new_reassq; 1008 new_reassq->b_prev = NULL; 1009 } else { 1010 reassq_prev->b_next = new_reassq; 1011 new_reassq->b_prev = reassq_prev; 1012 } 1013 new_reassq->b_next = NULL; 1014 } 1015 srp->partial_delivered = B_FALSE; 1016 srp->ssn = ntohs((*dc)->sdh_ssn); 1017 srp->hasBchunk = B_FALSE; 1018 empty_srp: 1019 srp->needed = 0; 1020 srp->got = 1; 1021 /* tail always the highest tsn on the reassembly queue for this ssn */ 1022 srp->tail = dmp; 1023 if (SCTP_DATA_GET_BBIT(*dc)) { 1024 /* Incoming frag is flagged as the beginning of message */ 1025 srp->msglen = ntohs((*dc)->sdh_len); 1026 srp->nexttsn = ntohl((*dc)->sdh_tsn) + 1; 1027 srp->hasBchunk = B_TRUE; 1028 } else if (srp->partial_delivered && 1029 srp->nexttsn == ntohl((*dc)->sdh_tsn)) { 1030 /* 1031 * The real beginning fragment of the message was already 1032 * delivered upward, so this is the earliest frag expected. 1033 * Fake the B-bit then see if this frag also completes the 1034 * message. 1035 */ 1036 SCTP_DATA_SET_BBIT(*dc); 1037 srp->hasBchunk = B_TRUE; 1038 srp->msglen = ntohs((*dc)->sdh_len); 1039 if (SCTP_DATA_GET_EBIT(*dc)) { 1040 /* This frag is marked as the end of message */ 1041 srp->needed = 1; 1042 /* Got all fragments of this message now */ 1043 goto frag_done; 1044 } 1045 srp->nexttsn++; 1046 } 1047 1048 /* The only fragment of this message currently queued */ 1049 *tpfinished = B_FALSE; 1050 return (NULL); 1051 foundit: 1052 /* 1053 * This message already has a reassembly queue. Insert the new frag 1054 * in the reassembly queue. Try the tail first, on the assumption 1055 * that the fragments are arriving in order. 1056 */ 1057 qmp = srp->tail; 1058 1059 /* 1060 * A NULL tail means all existing fragments of the message have 1061 * been entirely consumed during a partially delivery. 1062 */ 1063 if (qmp == NULL) { 1064 ASSERT(srp->got == 0 && srp->needed == 0 && 1065 srp->partial_delivered); 1066 ASSERT(reassq_curr->b_cont == NULL); 1067 reassq_curr->b_cont = dmp; 1068 goto empty_srp; 1069 } else { 1070 /* 1071 * If partial delivery did take place but the next arriving 1072 * fragment was not the next to be delivered, or partial 1073 * delivery broke off due to a gap, fragments remain on the 1074 * tail. The next fragment due to be delivered still has to 1075 * be set as the new head of list upon arrival. Fake B-bit 1076 * on that frag then see if it also completes the message. 1077 */ 1078 if (srp->partial_delivered && 1079 srp->nexttsn == ntohl((*dc)->sdh_tsn)) { 1080 SCTP_DATA_SET_BBIT(*dc); 1081 srp->hasBchunk = B_TRUE; 1082 if (SCTP_DATA_GET_EBIT(*dc)) { 1083 /* Got all fragments of this message now */ 1084 goto frag_done; 1085 } 1086 } 1087 } 1088 1089 /* grab the frag header of already queued tail frag for comparison */ 1090 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1091 ASSERT(qmp->b_cont == NULL); 1092 1093 /* check if the frag goes on the tail in order */ 1094 if (SEQ_GT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1095 qmp->b_cont = dmp; 1096 srp->tail = dmp; 1097 dmp->b_cont = NULL; 1098 if (srp->hasBchunk && srp->nexttsn == ntohl((*dc)->sdh_tsn)) { 1099 srp->msglen += ntohs((*dc)->sdh_len); 1100 srp->nexttsn++; 1101 } 1102 goto inserted; 1103 } 1104 1105 /* Next check if we should insert this frag at the beginning */ 1106 qmp = reassq_curr->b_cont; 1107 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1108 if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1109 dmp->b_cont = qmp; 1110 reassq_curr->b_cont = dmp; 1111 if (SCTP_DATA_GET_BBIT(*dc)) { 1112 srp->hasBchunk = B_TRUE; 1113 srp->nexttsn = ntohl((*dc)->sdh_tsn); 1114 } 1115 goto preinserted; 1116 } 1117 1118 /* Insert this frag in it's correct order in the middle */ 1119 for (;;) { 1120 /* Tail check above should have caught this */ 1121 ASSERT(qmp->b_cont != NULL); 1122 1123 qdc = (sctp_data_hdr_t *)qmp->b_cont->b_rptr; 1124 if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) { 1125 /* insert here */ 1126 dmp->b_cont = qmp->b_cont; 1127 qmp->b_cont = dmp; 1128 break; 1129 } 1130 qmp = qmp->b_cont; 1131 } 1132 preinserted: 1133 /* 1134 * Need head of message and to be due to deliver, otherwise skip 1135 * the recalculation of the message length below. 1136 */ 1137 if (!srp->hasBchunk || ntohl((*dc)->sdh_tsn) != srp->nexttsn) 1138 goto inserted; 1139 /* 1140 * fraglen contains the length of consecutive chunks of fragments. 1141 * starting from the chunk we just inserted. 1142 */ 1143 tsn = srp->nexttsn; 1144 for (qmp = dmp; qmp != NULL; qmp = qmp->b_cont) { 1145 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1146 if (tsn != ntohl(qdc->sdh_tsn)) 1147 break; 1148 fraglen += ntohs(qdc->sdh_len); 1149 tsn++; 1150 } 1151 srp->nexttsn = tsn; 1152 srp->msglen += fraglen; 1153 inserted: 1154 srp->got++; 1155 first_mp = reassq_curr->b_cont; 1156 /* Prior to this frag either the beginning or end frag was missing */ 1157 if (srp->needed == 0) { 1158 /* used to check if we have the first and last fragments */ 1159 bdc = (sctp_data_hdr_t *)first_mp->b_rptr; 1160 edc = (sctp_data_hdr_t *)srp->tail->b_rptr; 1161 1162 /* 1163 * If we now have both the beginning and the end of the message, 1164 * calculate how many fragments in the complete message. 1165 */ 1166 if (SCTP_DATA_GET_BBIT(bdc) && SCTP_DATA_GET_EBIT(edc)) { 1167 srp->needed = ntohl(edc->sdh_tsn) - 1168 ntohl(bdc->sdh_tsn) + 1; 1169 } 1170 } 1171 1172 /* 1173 * Try partial delivery if the message length has exceeded the 1174 * partial delivery point. Only do this if we can immediately 1175 * deliver the partially assembled message, and only partially 1176 * deliver one message at a time (i.e. messages cannot be 1177 * intermixed arriving at the upper layer). 1178 * sctp_try_partial_delivery() will return a message consisting 1179 * of only consecutive fragments. 1180 */ 1181 if (srp->needed != srp->got) { 1182 /* we don't have the full message yet */ 1183 dmp = NULL; 1184 if (ntohl((*dc)->sdh_tsn) <= sctp->sctp_ftsn && 1185 srp->msglen >= sctp->sctp_pd_point && 1186 srp->ssn == sip->nextseq) { 1187 dmp = sctp_try_partial_delivery(sctp, reassq_curr, 1188 srp, dc); 1189 } 1190 *tpfinished = B_FALSE; 1191 /* 1192 * NULL unless a segment of the message now qualified for 1193 * partial_delivery and has been prepared for delivery by 1194 * sctp_try_partial_delivery(). 1195 */ 1196 return (dmp); 1197 } 1198 frag_done: 1199 /* 1200 * Reassembly complete for this message, prepare the data for delivery. 1201 * First unlink the reassembly queue for this ssn from the list of 1202 * messages in reassembly. 1203 */ 1204 if (sip->istr_reass == reassq_curr) { 1205 sip->istr_reass = reassq_curr->b_next; 1206 if (reassq_curr->b_next) 1207 reassq_curr->b_next->b_prev = NULL; 1208 } else { 1209 ASSERT(reassq_curr->b_prev != NULL); 1210 reassq_curr->b_prev->b_next = reassq_curr->b_next; 1211 if (reassq_curr->b_next) 1212 reassq_curr->b_next->b_prev = reassq_curr->b_prev; 1213 } 1214 1215 /* 1216 * Need to clean up b_prev and b_next as freeb() will 1217 * ASSERT that they are unused. 1218 */ 1219 reassq_curr->b_next = NULL; 1220 reassq_curr->b_prev = NULL; 1221 1222 dmp = reassq_curr; 1223 /* point to the head of the reassembled data message */ 1224 dmp = dmp->b_cont; 1225 reassq_curr->b_cont = NULL; 1226 freeb(reassq_curr); 1227 /* Tell our caller that we are returning a complete message. */ 1228 *tpfinished = B_TRUE; 1229 1230 /* 1231 * Adjust all mblk's except the lead so their rptr's point to the 1232 * payload. sctp_data_chunk() will need to process the lead's data 1233 * data chunk section, so leave its rptr pointing at the data chunk 1234 * header. 1235 */ 1236 *dc = (sctp_data_hdr_t *)dmp->b_rptr; 1237 for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) { 1238 qdc = (sctp_data_hdr_t *)qmp->b_rptr; 1239 qmp->b_rptr = (uchar_t *)(qdc + 1); 1240 } 1241 BUMP_LOCAL(sctp->sctp_reassmsgs); 1242 1243 return (dmp); 1244 } 1245 1246 static void 1247 sctp_add_dup(uint32_t tsn, mblk_t **dups) 1248 { 1249 mblk_t *mp; 1250 size_t bsize = SCTP_DUP_MBLK_SZ * sizeof (tsn); 1251 1252 if (dups == NULL) { 1253 return; 1254 } 1255 1256 /* first time? */ 1257 if (*dups == NULL) { 1258 *dups = allocb(bsize, BPRI_MED); 1259 if (*dups == NULL) { 1260 return; 1261 } 1262 } 1263 1264 mp = *dups; 1265 if ((mp->b_wptr - mp->b_rptr) >= bsize) { 1266 /* maximum reached */ 1267 return; 1268 } 1269 1270 /* add the duplicate tsn */ 1271 bcopy(&tsn, mp->b_wptr, sizeof (tsn)); 1272 mp->b_wptr += sizeof (tsn); 1273 ASSERT((mp->b_wptr - mp->b_rptr) <= bsize); 1274 } 1275 1276 /* 1277 * All incoming sctp data, complete messages and fragments are handled by 1278 * this function. Unless the U-bit is set in the data chunk it will be 1279 * delivered in order or queued until an in-order delivery can be made. 1280 */ 1281 static void 1282 sctp_data_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *mp, mblk_t **dups, 1283 sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira) 1284 { 1285 sctp_data_hdr_t *dc; 1286 mblk_t *dmp, *pmp; 1287 sctp_instr_t *instr; 1288 int ubit; 1289 int sid; 1290 int isfrag; 1291 uint16_t ssn; 1292 uint32_t oftsn; 1293 boolean_t can_deliver = B_TRUE; 1294 uint32_t tsn; 1295 int dlen; 1296 boolean_t tpfinished = B_TRUE; 1297 int32_t new_rwnd; 1298 sctp_stack_t *sctps = sctp->sctp_sctps; 1299 int error; 1300 1301 /* The following are used multiple times, so we inline them */ 1302 #define SCTP_ACK_IT(sctp, tsn) \ 1303 if (tsn == sctp->sctp_ftsn) { \ 1304 dprint(2, ("data_chunk: acking next %x\n", tsn)); \ 1305 (sctp)->sctp_ftsn++; \ 1306 if ((sctp)->sctp_sack_gaps > 0) \ 1307 (sctp)->sctp_force_sack = 1; \ 1308 } else if (SEQ_GT(tsn, sctp->sctp_ftsn)) { \ 1309 /* Got a gap; record it */ \ 1310 BUMP_LOCAL(sctp->sctp_outseqtsns); \ 1311 dprint(2, ("data_chunk: acking gap %x\n", tsn)); \ 1312 sctp_ack_add(&sctp->sctp_sack_info, tsn, \ 1313 &sctp->sctp_sack_gaps); \ 1314 sctp->sctp_force_sack = 1; \ 1315 } 1316 1317 dmp = NULL; 1318 1319 dc = (sctp_data_hdr_t *)ch; 1320 tsn = ntohl(dc->sdh_tsn); 1321 1322 dprint(3, ("sctp_data_chunk: mp=%p tsn=%x\n", (void *)mp, tsn)); 1323 1324 /* Check for duplicates */ 1325 if (SEQ_LT(tsn, sctp->sctp_ftsn)) { 1326 dprint(4, ("sctp_data_chunk: dropping duplicate\n")); 1327 BUMP_LOCAL(sctp->sctp_idupchunks); 1328 sctp->sctp_force_sack = 1; 1329 sctp_add_dup(dc->sdh_tsn, dups); 1330 return; 1331 } 1332 1333 /* Check for dups of sack'ed data */ 1334 if (sctp->sctp_sack_info != NULL) { 1335 sctp_set_t *sp; 1336 1337 for (sp = sctp->sctp_sack_info; sp; sp = sp->next) { 1338 if (SEQ_GEQ(tsn, sp->begin) && SEQ_LEQ(tsn, sp->end)) { 1339 dprint(4, 1340 ("sctp_data_chunk: dropping dup > " 1341 "cumtsn\n")); 1342 BUMP_LOCAL(sctp->sctp_idupchunks); 1343 sctp->sctp_force_sack = 1; 1344 sctp_add_dup(dc->sdh_tsn, dups); 1345 return; 1346 } 1347 } 1348 } 1349 1350 /* We can no longer deliver anything up, but still need to handle it. */ 1351 if (SCTP_IS_DETACHED(sctp)) { 1352 BUMP_MIB(&sctps->sctps_mib, sctpInClosed); 1353 can_deliver = B_FALSE; 1354 } 1355 1356 dlen = ntohs(dc->sdh_len) - sizeof (*dc); 1357 1358 /* 1359 * Check for buffer space. Note if this is the next expected TSN 1360 * we have to take it to avoid deadlock because we cannot deliver 1361 * later queued TSNs and thus clear buffer space without it. 1362 * We drop anything that is purely zero window probe data here. 1363 */ 1364 if ((sctp->sctp_rwnd - sctp->sctp_rxqueued < dlen) && 1365 (tsn != sctp->sctp_ftsn || sctp->sctp_rwnd == 0)) { 1366 /* Drop and SACK, but don't advance the cumulative TSN. */ 1367 sctp->sctp_force_sack = 1; 1368 dprint(0, ("sctp_data_chunk: exceed rwnd %d rxqueued %d " 1369 "dlen %d ssn %d tsn %x\n", sctp->sctp_rwnd, 1370 sctp->sctp_rxqueued, dlen, ntohs(dc->sdh_ssn), 1371 ntohl(dc->sdh_tsn))); 1372 return; 1373 } 1374 1375 sid = ntohs(dc->sdh_sid); 1376 1377 /* Data received for a stream not negotiated for this association */ 1378 if (sid >= sctp->sctp_num_istr) { 1379 sctp_bsc_t inval_parm; 1380 1381 /* Will populate the CAUSE block in the ERROR chunk. */ 1382 inval_parm.bsc_sid = dc->sdh_sid; 1383 /* RESERVED, ignored at the receiving end */ 1384 inval_parm.bsc_pad = 0; 1385 1386 /* ack and drop it */ 1387 sctp_add_err(sctp, SCTP_ERR_BAD_SID, (void *)&inval_parm, 1388 sizeof (sctp_bsc_t), fp); 1389 SCTP_ACK_IT(sctp, tsn); 1390 return; 1391 } 1392 1393 /* unordered delivery OK for this data if ubit set */ 1394 ubit = SCTP_DATA_GET_UBIT(dc); 1395 ASSERT(sctp->sctp_instr != NULL); 1396 1397 /* select per stream structure for this stream from the array */ 1398 instr = &sctp->sctp_instr[sid]; 1399 /* Initialize the stream, if not yet used */ 1400 if (instr->sctp == NULL) 1401 instr->sctp = sctp; 1402 1403 /* Begin and End bit set would mean a complete message */ 1404 isfrag = !(SCTP_DATA_GET_BBIT(dc) && SCTP_DATA_GET_EBIT(dc)); 1405 1406 /* The ssn of this sctp message and of any fragments in it */ 1407 ssn = ntohs(dc->sdh_ssn); 1408 1409 dmp = dupb(mp); 1410 if (dmp == NULL) { 1411 /* drop it and don't ack, let the peer retransmit */ 1412 return; 1413 } 1414 /* 1415 * Past header and payload, note: the underlying buffer may 1416 * contain further chunks from the same incoming IP packet, 1417 * if so db_ref will be greater than one. 1418 */ 1419 dmp->b_wptr = (uchar_t *)ch + ntohs(ch->sch_len); 1420 1421 sctp->sctp_rxqueued += dlen; 1422 1423 oftsn = sctp->sctp_ftsn; 1424 1425 if (isfrag) { 1426 1427 error = 0; 1428 /* fragmented data chunk */ 1429 dmp->b_rptr = (uchar_t *)dc; 1430 if (ubit) { 1431 /* prepare data for unordered delivery */ 1432 dmp = sctp_uodata_frag(sctp, dmp, &dc); 1433 #if DEBUG 1434 if (dmp != NULL) { 1435 ASSERT(instr == 1436 &sctp->sctp_instr[sid]); 1437 } 1438 #endif 1439 } else { 1440 /* 1441 * Assemble fragments and queue for ordered delivery, 1442 * dmp returned is NULL or the head of a complete or 1443 * "partial delivery" message. Any returned message 1444 * and all its fragments will have the same ssn as the 1445 * input fragment currently being handled. 1446 */ 1447 dmp = sctp_data_frag(sctp, dmp, &dc, &error, instr, 1448 &tpfinished); 1449 } 1450 if (error == ENOMEM) { 1451 /* back out the adjustment made earlier */ 1452 sctp->sctp_rxqueued -= dlen; 1453 /* 1454 * Don't ack the segment, 1455 * the peer will retransmit. 1456 */ 1457 return; 1458 } 1459 1460 if (dmp == NULL) { 1461 /* 1462 * The frag has been queued for later in-order delivery, 1463 * but the cumulative TSN may need to advance, so also 1464 * need to perform the gap ack checks at the done label. 1465 */ 1466 SCTP_ACK_IT(sctp, tsn); 1467 DTRACE_PROBE4(sctp_data_frag_queued, sctp_t *, sctp, 1468 int, sid, int, tsn, uint16_t, ssn); 1469 goto done; 1470 } 1471 } 1472 1473 /* 1474 * Unless message is the next for delivery to the ulp, queue complete 1475 * message in the correct order for ordered delivery. 1476 * Note: tpfinished is true when the incoming chunk contains a complete 1477 * message or is the final missing fragment which completed a message. 1478 */ 1479 if (!ubit && tpfinished && ssn != instr->nextseq) { 1480 /* Adjust rptr to point at the data chunk for compares */ 1481 dmp->b_rptr = (uchar_t *)dc; 1482 1483 dprint(2, 1484 ("data_chunk: inserted %x in pq (ssn %d expected %d)\n", 1485 ntohl(dc->sdh_tsn), (int)(ssn), (int)(instr->nextseq))); 1486 1487 if (instr->istr_msgs == NULL) { 1488 instr->istr_msgs = dmp; 1489 ASSERT(dmp->b_prev == NULL && dmp->b_next == NULL); 1490 } else { 1491 mblk_t *imblk = instr->istr_msgs; 1492 sctp_data_hdr_t *idc; 1493 1494 /* 1495 * XXXNeed to take sequence wraps into account, 1496 * ... and a more efficient insertion algo. 1497 */ 1498 for (;;) { 1499 idc = (sctp_data_hdr_t *)imblk->b_rptr; 1500 if (SSN_GT(ntohs(idc->sdh_ssn), 1501 ntohs(dc->sdh_ssn))) { 1502 if (instr->istr_msgs == imblk) { 1503 instr->istr_msgs = dmp; 1504 dmp->b_next = imblk; 1505 imblk->b_prev = dmp; 1506 } else { 1507 ASSERT(imblk->b_prev != NULL); 1508 imblk->b_prev->b_next = dmp; 1509 dmp->b_prev = imblk->b_prev; 1510 imblk->b_prev = dmp; 1511 dmp->b_next = imblk; 1512 } 1513 break; 1514 } 1515 if (imblk->b_next == NULL) { 1516 imblk->b_next = dmp; 1517 dmp->b_prev = imblk; 1518 break; 1519 } 1520 imblk = imblk->b_next; 1521 } 1522 } 1523 (instr->istr_nmsgs)++; 1524 (sctp->sctp_istr_nmsgs)++; 1525 SCTP_ACK_IT(sctp, tsn); 1526 DTRACE_PROBE4(sctp_pqueue_completemsg, sctp_t *, sctp, 1527 int, sid, int, tsn, uint16_t, ssn); 1528 return; 1529 } 1530 1531 /* 1532 * Deliver the data directly. Recalculate dlen now since 1533 * we may have just reassembled this data. 1534 */ 1535 dlen = dmp->b_wptr - (uchar_t *)dc - sizeof (*dc); 1536 for (pmp = dmp->b_cont; pmp != NULL; pmp = pmp->b_cont) 1537 dlen += MBLKL(pmp); 1538 ASSERT(sctp->sctp_rxqueued >= dlen); 1539 1540 /* Deliver the message. */ 1541 sctp->sctp_rxqueued -= dlen; 1542 1543 if (can_deliver) { 1544 1545 /* step past header to the payload */ 1546 dmp->b_rptr = (uchar_t *)(dc + 1); 1547 if (sctp_input_add_ancillary(sctp, &dmp, dc, fp, 1548 ipp, ira) == 0) { 1549 dprint(1, ("sctp_data_chunk: delivering %lu bytes\n", 1550 msgdsize(dmp))); 1551 sctp->sctp_rwnd -= dlen; 1552 /* 1553 * We overload the meaning of b_flag for SCTP sockfs 1554 * internal use, to advise sockfs of partial delivery 1555 * semantics. 1556 */ 1557 dmp->b_flag = tpfinished ? 0 : SCTP_PARTIAL_DATA; 1558 new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, dmp, 1559 msgdsize(dmp), 0, &error, NULL); 1560 /* 1561 * Since we always deliver the next TSN data chunk, 1562 * we may buffer a little more than allowed. In 1563 * that case, just mark the window as 0. 1564 */ 1565 if (new_rwnd < 0) 1566 sctp->sctp_rwnd = 0; 1567 else if (new_rwnd > sctp->sctp_rwnd) 1568 sctp->sctp_rwnd = new_rwnd; 1569 SCTP_ACK_IT(sctp, tsn); 1570 } else { 1571 /* No memory don't ack, the peer will retransmit. */ 1572 freemsg(dmp); 1573 return; 1574 } 1575 } else { 1576 /* Closed above, ack to peer and free the data */ 1577 freemsg(dmp); 1578 SCTP_ACK_IT(sctp, tsn); 1579 } 1580 1581 /* 1582 * Data now enqueued, may already have been processed and free'd 1583 * by the ULP (or we may have just freed it above, if we could not 1584 * deliver), so we must not reference it (this is why we saved the 1585 * ssn and ubit earlier). 1586 */ 1587 if (ubit != 0) { 1588 BUMP_LOCAL(sctp->sctp_iudchunks); 1589 goto done; 1590 } 1591 BUMP_LOCAL(sctp->sctp_idchunks); 1592 1593 /* 1594 * There was a partial delivery and it has not finished, 1595 * don't pull anything from the pqueues or increment the 1596 * nextseq. This msg must complete before starting on 1597 * the next ssn and the partial message must have the 1598 * same ssn as the next expected message.. 1599 */ 1600 if (!tpfinished) { 1601 DTRACE_PROBE4(sctp_partial_delivery, sctp_t *, sctp, 1602 int, sid, int, tsn, uint16_t, ssn); 1603 /* 1604 * Verify the partial delivery is part of the 1605 * message expected for ordered delivery. 1606 */ 1607 if (ssn != instr->nextseq) { 1608 DTRACE_PROBE4(sctp_partial_delivery_error, 1609 sctp_t *, sctp, int, sid, int, tsn, 1610 uint16_t, ssn); 1611 cmn_err(CE_WARN, "sctp partial" 1612 " delivery error, sctp 0x%p" 1613 " sid = 0x%x ssn != nextseq" 1614 " tsn 0x%x ftsn 0x%x" 1615 " ssn 0x%x nextseq 0x%x", 1616 (void *)sctp, sid, 1617 tsn, sctp->sctp_ftsn, ssn, 1618 instr->nextseq); 1619 } 1620 1621 ASSERT(ssn == instr->nextseq); 1622 goto done; 1623 } 1624 1625 if (ssn != instr->nextseq) { 1626 DTRACE_PROBE4(sctp_inorder_delivery_error, 1627 sctp_t *, sctp, int, sid, int, tsn, 1628 uint16_t, ssn); 1629 cmn_err(CE_WARN, "sctp in-order delivery error, sctp 0x%p " 1630 "sid = 0x%x ssn != nextseq ssn 0x%x nextseq 0x%x", 1631 (void *)sctp, sid, ssn, instr->nextseq); 1632 } 1633 1634 ASSERT(ssn == instr->nextseq); 1635 1636 DTRACE_PROBE4(sctp_deliver_completemsg, sctp_t *, sctp, int, sid, 1637 int, tsn, uint16_t, ssn); 1638 1639 instr->nextseq = ssn + 1; 1640 1641 /* 1642 * Deliver any successive data chunks waiting in the instr pqueue 1643 * for the data just sent up. 1644 */ 1645 while (instr->istr_nmsgs > 0) { 1646 dmp = (mblk_t *)instr->istr_msgs; 1647 dc = (sctp_data_hdr_t *)dmp->b_rptr; 1648 ssn = ntohs(dc->sdh_ssn); 1649 tsn = ntohl(dc->sdh_tsn); 1650 /* Stop at the first gap in the sequence */ 1651 if (ssn != instr->nextseq) 1652 break; 1653 1654 DTRACE_PROBE4(sctp_deliver_pqueuedmsg, sctp_t *, sctp, 1655 int, sid, int, tsn, uint16_t, ssn); 1656 /* 1657 * Ready to deliver all data before the gap 1658 * to the upper layer. 1659 */ 1660 (instr->istr_nmsgs)--; 1661 (instr->nextseq)++; 1662 (sctp->sctp_istr_nmsgs)--; 1663 1664 instr->istr_msgs = instr->istr_msgs->b_next; 1665 if (instr->istr_msgs != NULL) 1666 instr->istr_msgs->b_prev = NULL; 1667 dmp->b_next = dmp->b_prev = NULL; 1668 1669 dprint(2, ("data_chunk: pulling %x from pq (ssn %d)\n", 1670 ntohl(dc->sdh_tsn), (int)ssn)); 1671 1672 /* 1673 * Composite messages indicate this chunk was reassembled, 1674 * each b_cont represents another TSN; Follow the chain to 1675 * reach the frag with the last tsn in order to advance ftsn 1676 * shortly by calling SCTP_ACK_IT(). 1677 */ 1678 dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc); 1679 for (pmp = dmp->b_cont; pmp; pmp = pmp->b_cont) 1680 dlen += MBLKL(pmp); 1681 1682 ASSERT(sctp->sctp_rxqueued >= dlen); 1683 1684 sctp->sctp_rxqueued -= dlen; 1685 if (can_deliver) { 1686 dmp->b_rptr = (uchar_t *)(dc + 1); 1687 if (sctp_input_add_ancillary(sctp, &dmp, dc, fp, 1688 ipp, ira) == 0) { 1689 dprint(1, ("sctp_data_chunk: delivering %lu " 1690 "bytes\n", msgdsize(dmp))); 1691 sctp->sctp_rwnd -= dlen; 1692 /* 1693 * Meaning of b_flag overloaded for SCTP sockfs 1694 * internal use, advise sockfs of partial 1695 * delivery semantics. 1696 */ 1697 dmp->b_flag = tpfinished ? 1698 0 : SCTP_PARTIAL_DATA; 1699 new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, 1700 dmp, msgdsize(dmp), 0, &error, NULL); 1701 if (new_rwnd < 0) 1702 sctp->sctp_rwnd = 0; 1703 else if (new_rwnd > sctp->sctp_rwnd) 1704 sctp->sctp_rwnd = new_rwnd; 1705 SCTP_ACK_IT(sctp, tsn); 1706 } else { 1707 /* don't ack, the peer will retransmit */ 1708 freemsg(dmp); 1709 return; 1710 } 1711 } else { 1712 /* Closed above, ack and free the data */ 1713 freemsg(dmp); 1714 SCTP_ACK_IT(sctp, tsn); 1715 } 1716 } 1717 1718 done: 1719 1720 /* 1721 * If there are gap reports pending, check if advancing 1722 * the ftsn here closes a gap. If so, we can advance 1723 * ftsn to the end of the set. 1724 */ 1725 if (sctp->sctp_sack_info != NULL && 1726 sctp->sctp_ftsn == sctp->sctp_sack_info->begin) { 1727 sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1; 1728 } 1729 /* 1730 * If ftsn has moved forward, maybe we can remove gap reports. 1731 * NB: dmp may now be NULL, so don't dereference it here. 1732 */ 1733 if (oftsn != sctp->sctp_ftsn && sctp->sctp_sack_info != NULL) { 1734 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 1735 &sctp->sctp_sack_gaps); 1736 dprint(2, ("data_chunk: removed acks before %x (num=%d)\n", 1737 sctp->sctp_ftsn - 1, sctp->sctp_sack_gaps)); 1738 } 1739 1740 #ifdef DEBUG 1741 if (sctp->sctp_sack_info != NULL) { 1742 ASSERT(sctp->sctp_ftsn != sctp->sctp_sack_info->begin); 1743 } 1744 #endif 1745 1746 #undef SCTP_ACK_IT 1747 } 1748 1749 void 1750 sctp_fill_sack(sctp_t *sctp, unsigned char *dst, int sacklen) 1751 { 1752 sctp_chunk_hdr_t *sch; 1753 sctp_sack_chunk_t *sc; 1754 sctp_sack_frag_t *sf; 1755 uint16_t num_gaps = sctp->sctp_sack_gaps; 1756 sctp_set_t *sp; 1757 1758 /* Chunk hdr */ 1759 sch = (sctp_chunk_hdr_t *)dst; 1760 sch->sch_id = CHUNK_SACK; 1761 sch->sch_flags = 0; 1762 sch->sch_len = htons(sacklen); 1763 1764 /* SACK chunk */ 1765 sctp->sctp_lastacked = sctp->sctp_ftsn - 1; 1766 1767 sc = (sctp_sack_chunk_t *)(sch + 1); 1768 sc->ssc_cumtsn = htonl(sctp->sctp_lastacked); 1769 if (sctp->sctp_rxqueued < sctp->sctp_rwnd) { 1770 sc->ssc_a_rwnd = htonl(sctp->sctp_rwnd - sctp->sctp_rxqueued); 1771 } else { 1772 sc->ssc_a_rwnd = 0; 1773 } 1774 sc->ssc_numfrags = htons(num_gaps); 1775 sc->ssc_numdups = 0; 1776 1777 /* lay in gap reports */ 1778 sf = (sctp_sack_frag_t *)(sc + 1); 1779 for (sp = sctp->sctp_sack_info; sp; sp = sp->next) { 1780 uint16_t offset; 1781 1782 /* start */ 1783 if (sp->begin > sctp->sctp_lastacked) { 1784 offset = (uint16_t)(sp->begin - sctp->sctp_lastacked); 1785 } else { 1786 /* sequence number wrap */ 1787 offset = (uint16_t)(UINT32_MAX - sctp->sctp_lastacked + 1788 sp->begin); 1789 } 1790 sf->ssf_start = htons(offset); 1791 1792 /* end */ 1793 if (sp->end >= sp->begin) { 1794 offset += (uint16_t)(sp->end - sp->begin); 1795 } else { 1796 /* sequence number wrap */ 1797 offset += (uint16_t)(UINT32_MAX - sp->begin + sp->end); 1798 } 1799 sf->ssf_end = htons(offset); 1800 1801 sf++; 1802 /* This is just for debugging (a la the following assertion) */ 1803 num_gaps--; 1804 } 1805 1806 ASSERT(num_gaps == 0); 1807 1808 /* If the SACK timer is running, stop it */ 1809 if (sctp->sctp_ack_timer_running) { 1810 sctp_timer_stop(sctp->sctp_ack_mp); 1811 sctp->sctp_ack_timer_running = B_FALSE; 1812 } 1813 1814 BUMP_LOCAL(sctp->sctp_obchunks); 1815 BUMP_LOCAL(sctp->sctp_osacks); 1816 } 1817 1818 mblk_t * 1819 sctp_make_sack(sctp_t *sctp, sctp_faddr_t *sendto, mblk_t *dups) 1820 { 1821 mblk_t *smp; 1822 size_t slen; 1823 sctp_chunk_hdr_t *sch; 1824 sctp_sack_chunk_t *sc; 1825 int32_t acks_max; 1826 sctp_stack_t *sctps = sctp->sctp_sctps; 1827 uint32_t dups_len; 1828 sctp_faddr_t *fp; 1829 1830 ASSERT(sendto != NULL); 1831 1832 if (sctp->sctp_force_sack) { 1833 sctp->sctp_force_sack = 0; 1834 goto checks_done; 1835 } 1836 1837 acks_max = sctps->sctps_deferred_acks_max; 1838 if (sctp->sctp_state == SCTPS_ESTABLISHED) { 1839 if (sctp->sctp_sack_toggle < acks_max) { 1840 /* no need to SACK right now */ 1841 dprint(2, ("sctp_make_sack: %p no sack (toggle)\n", 1842 (void *)sctp)); 1843 return (NULL); 1844 } else if (sctp->sctp_sack_toggle >= acks_max) { 1845 sctp->sctp_sack_toggle = 0; 1846 } 1847 } 1848 1849 if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) { 1850 dprint(2, ("sctp_make_sack: %p no sack (already)\n", 1851 (void *)sctp)); 1852 return (NULL); 1853 } 1854 1855 checks_done: 1856 dprint(2, ("sctp_make_sack: acking %x\n", sctp->sctp_ftsn - 1)); 1857 1858 if (dups != NULL) 1859 dups_len = MBLKL(dups); 1860 else 1861 dups_len = 0; 1862 slen = sizeof (*sch) + sizeof (*sc) + 1863 (sizeof (sctp_sack_frag_t) * sctp->sctp_sack_gaps); 1864 1865 /* 1866 * If there are error chunks, check and see if we can send the 1867 * SACK chunk and error chunks together in one packet. If not, 1868 * send the error chunks out now. 1869 */ 1870 if (sctp->sctp_err_chunks != NULL) { 1871 fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks); 1872 if (sctp->sctp_err_len + slen + dups_len > fp->sfa_pmss) { 1873 if ((smp = sctp_make_mp(sctp, fp, 0)) == NULL) { 1874 SCTP_KSTAT(sctps, sctp_send_err_failed); 1875 SCTP_KSTAT(sctps, sctp_send_sack_failed); 1876 freemsg(sctp->sctp_err_chunks); 1877 sctp->sctp_err_chunks = NULL; 1878 sctp->sctp_err_len = 0; 1879 return (NULL); 1880 } 1881 smp->b_cont = sctp->sctp_err_chunks; 1882 sctp_set_iplen(sctp, smp, fp->ixa); 1883 (void) conn_ip_output(smp, fp->ixa); 1884 BUMP_LOCAL(sctp->sctp_opkts); 1885 sctp->sctp_err_chunks = NULL; 1886 sctp->sctp_err_len = 0; 1887 } 1888 } 1889 smp = sctp_make_mp(sctp, sendto, slen); 1890 if (smp == NULL) { 1891 SCTP_KSTAT(sctps, sctp_send_sack_failed); 1892 return (NULL); 1893 } 1894 sch = (sctp_chunk_hdr_t *)smp->b_wptr; 1895 1896 sctp_fill_sack(sctp, smp->b_wptr, slen); 1897 smp->b_wptr += slen; 1898 if (dups != NULL) { 1899 sc = (sctp_sack_chunk_t *)(sch + 1); 1900 sc->ssc_numdups = htons(MBLKL(dups) / sizeof (uint32_t)); 1901 sch->sch_len = htons(slen + dups_len); 1902 smp->b_cont = dups; 1903 } 1904 1905 if (sctp->sctp_err_chunks != NULL) { 1906 linkb(smp, sctp->sctp_err_chunks); 1907 sctp->sctp_err_chunks = NULL; 1908 sctp->sctp_err_len = 0; 1909 } 1910 return (smp); 1911 } 1912 1913 /* 1914 * Check and see if we need to send a SACK chunk. If it is needed, 1915 * send it out. Return true if a SACK chunk is sent, false otherwise. 1916 */ 1917 boolean_t 1918 sctp_sack(sctp_t *sctp, mblk_t *dups) 1919 { 1920 mblk_t *smp; 1921 sctp_stack_t *sctps = sctp->sctp_sctps; 1922 1923 /* If we are shutting down, let send_shutdown() bundle the SACK */ 1924 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 1925 sctp_send_shutdown(sctp, 0); 1926 } 1927 1928 ASSERT(sctp->sctp_lastdata != NULL); 1929 1930 if ((smp = sctp_make_sack(sctp, sctp->sctp_lastdata, dups)) == NULL) { 1931 /* The caller of sctp_sack() will not free the dups mblk. */ 1932 if (dups != NULL) 1933 freeb(dups); 1934 return (B_FALSE); 1935 } 1936 dprint(2, ("sctp_sack: sending to %p %x:%x:%x:%x\n", 1937 (void *)sctp->sctp_lastdata, 1938 SCTP_PRINTADDR(sctp->sctp_lastdata->faddr))); 1939 1940 sctp->sctp_active = ddi_get_lbolt64(); 1941 1942 BUMP_MIB(&sctps->sctps_mib, sctpOutAck); 1943 1944 sctp_set_iplen(sctp, smp, sctp->sctp_lastdata->ixa); 1945 (void) conn_ip_output(smp, sctp->sctp_lastdata->ixa); 1946 BUMP_LOCAL(sctp->sctp_opkts); 1947 return (B_TRUE); 1948 } 1949 1950 /* 1951 * This is called if we have a message that was partially sent and is 1952 * abandoned. The cum TSN will be the last chunk sent for this message, 1953 * subsequent chunks will be marked ABANDONED. We send a Forward TSN 1954 * chunk in this case with the TSN of the last sent chunk so that the 1955 * peer can clean up its fragment list for this message. This message 1956 * will be removed from the transmit list when the peer sends a SACK 1957 * back. 1958 */ 1959 int 1960 sctp_check_abandoned_msg(sctp_t *sctp, mblk_t *meta) 1961 { 1962 sctp_data_hdr_t *dh; 1963 mblk_t *nmp; 1964 mblk_t *head; 1965 int32_t unsent = 0; 1966 mblk_t *mp1 = meta->b_cont; 1967 uint32_t adv_pap = sctp->sctp_adv_pap; 1968 sctp_faddr_t *fp = sctp->sctp_current; 1969 sctp_stack_t *sctps = sctp->sctp_sctps; 1970 1971 dh = (sctp_data_hdr_t *)mp1->b_rptr; 1972 if (SEQ_GEQ(sctp->sctp_lastack_rxd, ntohl(dh->sdh_tsn))) { 1973 sctp_ftsn_set_t *sets = NULL; 1974 uint_t nsets = 0; 1975 uint32_t seglen = sizeof (uint32_t); 1976 boolean_t ubit = SCTP_DATA_GET_UBIT(dh); 1977 1978 while (mp1->b_next != NULL && SCTP_CHUNK_ISSENT(mp1->b_next)) 1979 mp1 = mp1->b_next; 1980 dh = (sctp_data_hdr_t *)mp1->b_rptr; 1981 sctp->sctp_adv_pap = ntohl(dh->sdh_tsn); 1982 if (!ubit && 1983 !sctp_add_ftsn_set(&sets, fp, meta, &nsets, &seglen)) { 1984 sctp->sctp_adv_pap = adv_pap; 1985 return (ENOMEM); 1986 } 1987 nmp = sctp_make_ftsn_chunk(sctp, fp, sets, nsets, seglen); 1988 sctp_free_ftsn_set(sets); 1989 if (nmp == NULL) { 1990 sctp->sctp_adv_pap = adv_pap; 1991 return (ENOMEM); 1992 } 1993 head = sctp_add_proto_hdr(sctp, fp, nmp, 0, NULL); 1994 if (head == NULL) { 1995 sctp->sctp_adv_pap = adv_pap; 1996 freemsg(nmp); 1997 SCTP_KSTAT(sctps, sctp_send_ftsn_failed); 1998 return (ENOMEM); 1999 } 2000 SCTP_MSG_SET_ABANDONED(meta); 2001 sctp_set_iplen(sctp, head, fp->ixa); 2002 (void) conn_ip_output(head, fp->ixa); 2003 BUMP_LOCAL(sctp->sctp_opkts); 2004 if (!fp->timer_running) 2005 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 2006 mp1 = mp1->b_next; 2007 while (mp1 != NULL) { 2008 ASSERT(!SCTP_CHUNK_ISSENT(mp1)); 2009 ASSERT(!SCTP_CHUNK_ABANDONED(mp1)); 2010 SCTP_ABANDON_CHUNK(mp1); 2011 dh = (sctp_data_hdr_t *)mp1->b_rptr; 2012 unsent += ntohs(dh->sdh_len) - sizeof (*dh); 2013 mp1 = mp1->b_next; 2014 } 2015 ASSERT(sctp->sctp_unsent >= unsent); 2016 sctp->sctp_unsent -= unsent; 2017 /* 2018 * Update ULP the amount of queued data, which is 2019 * sent-unack'ed + unsent. 2020 */ 2021 if (!SCTP_IS_DETACHED(sctp)) 2022 SCTP_TXQ_UPDATE(sctp); 2023 return (0); 2024 } 2025 return (-1); 2026 } 2027 2028 uint32_t 2029 sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked) 2030 { 2031 mblk_t *ump, *nump, *mp = NULL; 2032 uint16_t chunklen; 2033 uint32_t xtsn; 2034 sctp_faddr_t *fp; 2035 sctp_data_hdr_t *sdc; 2036 uint32_t cumack_forward = 0; 2037 sctp_msg_hdr_t *mhdr; 2038 sctp_stack_t *sctps = sctp->sctp_sctps; 2039 2040 ump = sctp->sctp_xmit_head; 2041 2042 /* 2043 * Free messages only when they're completely acked. 2044 */ 2045 while (ump != NULL) { 2046 mhdr = (sctp_msg_hdr_t *)ump->b_rptr; 2047 for (mp = ump->b_cont; mp != NULL; mp = mp->b_next) { 2048 if (SCTP_CHUNK_ABANDONED(mp)) { 2049 ASSERT(SCTP_IS_MSG_ABANDONED(ump)); 2050 mp = NULL; 2051 break; 2052 } 2053 /* 2054 * We check for abandoned message if we are PR-SCTP 2055 * aware, if this is not the first chunk in the 2056 * message (b_cont) and if the message is marked 2057 * abandoned. 2058 */ 2059 if (!SCTP_CHUNK_ISSENT(mp)) { 2060 if (sctp->sctp_prsctp_aware && 2061 mp != ump->b_cont && 2062 (SCTP_IS_MSG_ABANDONED(ump) || 2063 SCTP_MSG_TO_BE_ABANDONED(ump, mhdr, 2064 sctp))) { 2065 (void) sctp_check_abandoned_msg(sctp, 2066 ump); 2067 } 2068 goto cum_ack_done; 2069 } 2070 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2071 xtsn = ntohl(sdc->sdh_tsn); 2072 if (SEQ_GEQ(sctp->sctp_lastack_rxd, xtsn)) 2073 continue; 2074 if (SEQ_GEQ(tsn, xtsn)) { 2075 fp = SCTP_CHUNK_DEST(mp); 2076 chunklen = ntohs(sdc->sdh_len); 2077 2078 if (sctp->sctp_out_time != 0 && 2079 xtsn == sctp->sctp_rtt_tsn) { 2080 /* Got a new RTT measurement */ 2081 sctp_update_rtt(sctp, fp, 2082 ddi_get_lbolt64() - 2083 sctp->sctp_out_time); 2084 sctp->sctp_out_time = 0; 2085 } 2086 if (SCTP_CHUNK_ISACKED(mp)) 2087 continue; 2088 SCTP_CHUNK_SET_SACKCNT(mp, 0); 2089 SCTP_CHUNK_ACKED(mp); 2090 ASSERT(fp->suna >= chunklen); 2091 fp->suna -= chunklen; 2092 fp->acked += chunklen; 2093 cumack_forward += chunklen; 2094 ASSERT(sctp->sctp_unacked >= 2095 (chunklen - sizeof (*sdc))); 2096 sctp->sctp_unacked -= 2097 (chunklen - sizeof (*sdc)); 2098 if (fp->suna == 0) { 2099 /* all outstanding data acked */ 2100 fp->pba = 0; 2101 SCTP_FADDR_TIMER_STOP(fp); 2102 } else { 2103 SCTP_FADDR_TIMER_RESTART(sctp, fp, 2104 fp->rto); 2105 } 2106 } else { 2107 goto cum_ack_done; 2108 } 2109 } 2110 nump = ump->b_next; 2111 if (nump != NULL) 2112 nump->b_prev = NULL; 2113 if (ump == sctp->sctp_xmit_tail) 2114 sctp->sctp_xmit_tail = nump; 2115 if (SCTP_IS_MSG_ABANDONED(ump)) { 2116 BUMP_LOCAL(sctp->sctp_prsctpdrop); 2117 ump->b_next = NULL; 2118 sctp_sendfail_event(sctp, ump, 0, B_TRUE); 2119 } else { 2120 sctp_free_msg(ump); 2121 } 2122 sctp->sctp_xmit_head = ump = nump; 2123 } 2124 cum_ack_done: 2125 *first_unacked = mp; 2126 if (cumack_forward > 0) { 2127 BUMP_MIB(&sctps->sctps_mib, sctpInAck); 2128 if (SEQ_GT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) { 2129 sctp->sctp_recovery_tsn = sctp->sctp_lastack_rxd; 2130 } 2131 2132 /* 2133 * Update ULP the amount of queued data, which is 2134 * sent-unack'ed + unsent. 2135 */ 2136 if (!SCTP_IS_DETACHED(sctp)) 2137 SCTP_TXQ_UPDATE(sctp); 2138 2139 /* Time to send a shutdown? */ 2140 if (sctp->sctp_state == SCTPS_SHUTDOWN_PENDING) { 2141 sctp_send_shutdown(sctp, 0); 2142 } 2143 sctp->sctp_xmit_unacked = mp; 2144 } else { 2145 /* dup ack */ 2146 BUMP_MIB(&sctps->sctps_mib, sctpInDupAck); 2147 } 2148 sctp->sctp_lastack_rxd = tsn; 2149 if (SEQ_LT(sctp->sctp_adv_pap, sctp->sctp_lastack_rxd)) 2150 sctp->sctp_adv_pap = sctp->sctp_lastack_rxd; 2151 ASSERT(sctp->sctp_xmit_head || sctp->sctp_unacked == 0); 2152 2153 return (cumack_forward); 2154 } 2155 2156 static int 2157 sctp_set_frwnd(sctp_t *sctp, uint32_t frwnd) 2158 { 2159 uint32_t orwnd; 2160 2161 if (sctp->sctp_unacked > frwnd) { 2162 sctp->sctp_frwnd = 0; 2163 return (0); 2164 } 2165 orwnd = sctp->sctp_frwnd; 2166 sctp->sctp_frwnd = frwnd - sctp->sctp_unacked; 2167 if (orwnd < sctp->sctp_frwnd) { 2168 return (1); 2169 } else { 2170 return (0); 2171 } 2172 } 2173 2174 /* 2175 * For un-ordered messages. 2176 * Walk the sctp->sctp_uo_frag list and remove any fragments with TSN 2177 * less than/equal to ftsn. Fragments for un-ordered messages are 2178 * strictly in sequence (w.r.t TSN). 2179 */ 2180 static int 2181 sctp_ftsn_check_uo_frag(sctp_t *sctp, uint32_t ftsn) 2182 { 2183 mblk_t *hmp; 2184 mblk_t *hmp_next; 2185 sctp_data_hdr_t *dc; 2186 int dlen = 0; 2187 2188 hmp = sctp->sctp_uo_frags; 2189 while (hmp != NULL) { 2190 hmp_next = hmp->b_next; 2191 dc = (sctp_data_hdr_t *)hmp->b_rptr; 2192 if (SEQ_GT(ntohl(dc->sdh_tsn), ftsn)) 2193 return (dlen); 2194 sctp->sctp_uo_frags = hmp_next; 2195 if (hmp_next != NULL) 2196 hmp_next->b_prev = NULL; 2197 hmp->b_next = NULL; 2198 dlen += ntohs(dc->sdh_len) - sizeof (*dc); 2199 freeb(hmp); 2200 hmp = hmp_next; 2201 } 2202 return (dlen); 2203 } 2204 2205 /* 2206 * For ordered messages. 2207 * Check for existing fragments for an sid-ssn pair reported as abandoned, 2208 * hence will not receive, in the Forward TSN. If there are fragments, then 2209 * we just nuke them. If and when Partial Delivery API is supported, we 2210 * would need to send a notification to the upper layer about this. 2211 */ 2212 static int 2213 sctp_ftsn_check_frag(sctp_t *sctp, uint16_t ssn, sctp_instr_t *sip) 2214 { 2215 sctp_reass_t *srp; 2216 mblk_t *hmp; 2217 mblk_t *dmp; 2218 mblk_t *hmp_next; 2219 sctp_data_hdr_t *dc; 2220 int dlen = 0; 2221 2222 hmp = sip->istr_reass; 2223 while (hmp != NULL) { 2224 hmp_next = hmp->b_next; 2225 srp = (sctp_reass_t *)DB_BASE(hmp); 2226 if (SSN_GT(srp->ssn, ssn)) 2227 return (dlen); 2228 /* 2229 * If we had sent part of this message up, send a partial 2230 * delivery event. Since this is ordered delivery, we should 2231 * have sent partial message only for the next in sequence, 2232 * hence the ASSERT. See comments in sctp_data_chunk() for 2233 * trypartial. 2234 */ 2235 if (srp->partial_delivered) { 2236 if (srp->ssn != sip->nextseq) 2237 cmn_err(CE_WARN, "sctp partial" 2238 " delivery notify, sctp 0x%p" 2239 " sip = 0x%p ssn != nextseq" 2240 " ssn 0x%x nextseq 0x%x", 2241 (void *)sctp, (void *)sip, 2242 srp->ssn, sip->nextseq); 2243 ASSERT(sip->nextseq == srp->ssn); 2244 sctp_partial_delivery_event(sctp); 2245 } 2246 /* Take it out of the reass queue */ 2247 sip->istr_reass = hmp_next; 2248 if (hmp_next != NULL) 2249 hmp_next->b_prev = NULL; 2250 hmp->b_next = NULL; 2251 ASSERT(hmp->b_prev == NULL); 2252 dmp = hmp; 2253 ASSERT(DB_TYPE(hmp) == M_CTL); 2254 dmp = hmp->b_cont; 2255 hmp->b_cont = NULL; 2256 freeb(hmp); 2257 hmp = dmp; 2258 while (dmp != NULL) { 2259 dc = (sctp_data_hdr_t *)dmp->b_rptr; 2260 dlen += ntohs(dc->sdh_len) - sizeof (*dc); 2261 dmp = dmp->b_cont; 2262 } 2263 freemsg(hmp); 2264 hmp = hmp_next; 2265 } 2266 return (dlen); 2267 } 2268 2269 /* 2270 * Update sctp_ftsn to the cumulative TSN from the Forward TSN chunk. Remove 2271 * any SACK gaps less than the newly updated sctp_ftsn. Walk through the 2272 * sid-ssn pair in the Forward TSN and for each, clean the fragment list 2273 * for this pair, if needed, and check if we can deliver subsequent 2274 * messages, if any, from the instream queue (that were waiting for this 2275 * sid-ssn message to show up). Once we are done try to update the SACK 2276 * info. We could get a duplicate Forward TSN, in which case just send 2277 * a SACK. If any of the sid values in the Forward TSN is invalid, 2278 * send back an "Invalid Stream Identifier" error and continue processing 2279 * the rest. 2280 */ 2281 static void 2282 sctp_process_forward_tsn(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp, 2283 ip_pkt_t *ipp, ip_recv_attr_t *ira) 2284 { 2285 uint32_t *ftsn = (uint32_t *)(ch + 1); 2286 ftsn_entry_t *ftsn_entry; 2287 sctp_instr_t *instr; 2288 boolean_t can_deliver = B_TRUE; 2289 size_t dlen; 2290 int flen; 2291 mblk_t *dmp; 2292 mblk_t *pmp; 2293 sctp_data_hdr_t *dc; 2294 ssize_t remaining; 2295 sctp_stack_t *sctps = sctp->sctp_sctps; 2296 2297 *ftsn = ntohl(*ftsn); 2298 remaining = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*ftsn); 2299 2300 if (SCTP_IS_DETACHED(sctp)) { 2301 BUMP_MIB(&sctps->sctps_mib, sctpInClosed); 2302 can_deliver = B_FALSE; 2303 } 2304 /* 2305 * un-ordered messages don't have SID-SSN pair entries, we check 2306 * for any fragments (for un-ordered message) to be discarded using 2307 * the cumulative FTSN. 2308 */ 2309 flen = sctp_ftsn_check_uo_frag(sctp, *ftsn); 2310 if (flen > 0) { 2311 ASSERT(sctp->sctp_rxqueued >= flen); 2312 sctp->sctp_rxqueued -= flen; 2313 } 2314 ftsn_entry = (ftsn_entry_t *)(ftsn + 1); 2315 while (remaining >= sizeof (*ftsn_entry)) { 2316 ftsn_entry->ftsn_sid = ntohs(ftsn_entry->ftsn_sid); 2317 ftsn_entry->ftsn_ssn = ntohs(ftsn_entry->ftsn_ssn); 2318 if (ftsn_entry->ftsn_sid >= sctp->sctp_num_istr) { 2319 sctp_bsc_t inval_parm; 2320 2321 /* Will populate the CAUSE block in the ERROR chunk. */ 2322 inval_parm.bsc_sid = htons(ftsn_entry->ftsn_sid); 2323 /* RESERVED, ignored at the receiving end */ 2324 inval_parm.bsc_pad = 0; 2325 2326 sctp_add_err(sctp, SCTP_ERR_BAD_SID, 2327 (void *)&inval_parm, sizeof (sctp_bsc_t), fp); 2328 ftsn_entry++; 2329 remaining -= sizeof (*ftsn_entry); 2330 continue; 2331 } 2332 instr = &sctp->sctp_instr[ftsn_entry->ftsn_sid]; 2333 flen = sctp_ftsn_check_frag(sctp, ftsn_entry->ftsn_ssn, instr); 2334 /* Indicates frags were nuked, update rxqueued */ 2335 if (flen > 0) { 2336 ASSERT(sctp->sctp_rxqueued >= flen); 2337 sctp->sctp_rxqueued -= flen; 2338 } 2339 /* 2340 * It is possible to receive an FTSN chunk with SSN smaller 2341 * than then nextseq if this chunk is a retransmission because 2342 * of incomplete processing when it was first processed. 2343 */ 2344 if (SSN_GE(ftsn_entry->ftsn_ssn, instr->nextseq)) 2345 instr->nextseq = ftsn_entry->ftsn_ssn + 1; 2346 while (instr->istr_nmsgs > 0) { 2347 mblk_t *next; 2348 2349 dmp = (mblk_t *)instr->istr_msgs; 2350 dc = (sctp_data_hdr_t *)dmp->b_rptr; 2351 if (ntohs(dc->sdh_ssn) != instr->nextseq) 2352 break; 2353 2354 next = dmp->b_next; 2355 dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc); 2356 for (pmp = dmp->b_cont; pmp != NULL; 2357 pmp = pmp->b_cont) { 2358 dlen += MBLKL(pmp); 2359 } 2360 if (can_deliver) { 2361 int32_t nrwnd; 2362 int error; 2363 2364 dmp->b_rptr = (uchar_t *)(dc + 1); 2365 dmp->b_next = NULL; 2366 ASSERT(dmp->b_prev == NULL); 2367 if (sctp_input_add_ancillary(sctp, 2368 &dmp, dc, fp, ipp, ira) == 0) { 2369 sctp->sctp_rxqueued -= dlen; 2370 sctp->sctp_rwnd -= dlen; 2371 /* 2372 * Override b_flag for SCTP sockfs 2373 * internal use 2374 */ 2375 2376 dmp->b_flag = 0; 2377 nrwnd = sctp->sctp_ulp_recv( 2378 sctp->sctp_ulpd, dmp, msgdsize(dmp), 2379 0, &error, NULL); 2380 if (nrwnd < 0) 2381 sctp->sctp_rwnd = 0; 2382 else if (nrwnd > sctp->sctp_rwnd) 2383 sctp->sctp_rwnd = nrwnd; 2384 } else { 2385 /* 2386 * We will resume processing when 2387 * the FTSN chunk is re-xmitted. 2388 */ 2389 dmp->b_rptr = (uchar_t *)dc; 2390 dmp->b_next = next; 2391 dprint(0, 2392 ("FTSN dequeuing %u failed\n", 2393 ntohs(dc->sdh_ssn))); 2394 return; 2395 } 2396 } else { 2397 sctp->sctp_rxqueued -= dlen; 2398 ASSERT(dmp->b_prev == NULL); 2399 dmp->b_next = NULL; 2400 freemsg(dmp); 2401 } 2402 instr->istr_nmsgs--; 2403 instr->nextseq++; 2404 sctp->sctp_istr_nmsgs--; 2405 if (next != NULL) 2406 next->b_prev = NULL; 2407 instr->istr_msgs = next; 2408 } 2409 ftsn_entry++; 2410 remaining -= sizeof (*ftsn_entry); 2411 } 2412 /* Duplicate FTSN */ 2413 if (*ftsn <= (sctp->sctp_ftsn - 1)) { 2414 sctp->sctp_force_sack = 1; 2415 return; 2416 } 2417 /* Advance cum TSN to that reported in the Forward TSN chunk */ 2418 sctp->sctp_ftsn = *ftsn + 1; 2419 2420 /* Remove all the SACK gaps before the new cum TSN */ 2421 if (sctp->sctp_sack_info != NULL) { 2422 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 2423 &sctp->sctp_sack_gaps); 2424 } 2425 /* 2426 * If there are gap reports pending, check if advancing 2427 * the ftsn here closes a gap. If so, we can advance 2428 * ftsn to the end of the set. 2429 * If ftsn has moved forward, maybe we can remove gap reports. 2430 */ 2431 if (sctp->sctp_sack_info != NULL && 2432 sctp->sctp_ftsn == sctp->sctp_sack_info->begin) { 2433 sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1; 2434 sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1, 2435 &sctp->sctp_sack_gaps); 2436 } 2437 } 2438 2439 /* 2440 * When we have processed a SACK we check to see if we can advance the 2441 * cumulative TSN if there are abandoned chunks immediately following 2442 * the updated cumulative TSN. If there are, we attempt to send a 2443 * Forward TSN chunk. 2444 */ 2445 static void 2446 sctp_check_abandoned_data(sctp_t *sctp, sctp_faddr_t *fp) 2447 { 2448 mblk_t *meta = sctp->sctp_xmit_head; 2449 mblk_t *mp; 2450 mblk_t *nmp; 2451 uint32_t seglen; 2452 uint32_t adv_pap = sctp->sctp_adv_pap; 2453 2454 /* 2455 * We only check in the first meta since otherwise we can't 2456 * advance the cumulative ack point. We just look for chunks 2457 * marked for retransmission, else we might prematurely 2458 * send an FTSN for a sent, but unacked, chunk. 2459 */ 2460 for (mp = meta->b_cont; mp != NULL; mp = mp->b_next) { 2461 if (!SCTP_CHUNK_ISSENT(mp)) 2462 return; 2463 if (SCTP_CHUNK_WANT_REXMIT(mp)) 2464 break; 2465 } 2466 if (mp == NULL) 2467 return; 2468 sctp_check_adv_ack_pt(sctp, meta, mp); 2469 if (SEQ_GT(sctp->sctp_adv_pap, adv_pap)) { 2470 sctp_make_ftsns(sctp, meta, mp, &nmp, fp, &seglen); 2471 if (nmp == NULL) { 2472 sctp->sctp_adv_pap = adv_pap; 2473 if (!fp->timer_running) 2474 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 2475 return; 2476 } 2477 sctp_set_iplen(sctp, nmp, fp->ixa); 2478 (void) conn_ip_output(nmp, fp->ixa); 2479 BUMP_LOCAL(sctp->sctp_opkts); 2480 if (!fp->timer_running) 2481 SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto); 2482 } 2483 } 2484 2485 /* 2486 * The processing here follows the same logic in sctp_got_sack(), the reason 2487 * we do this separately is because, usually, gap blocks are ordered and 2488 * we can process it in sctp_got_sack(). However if they aren't we would 2489 * need to do some additional non-optimal stuff when we start processing the 2490 * unordered gaps. To that effect sctp_got_sack() does the processing in the 2491 * simple case and this does the same in the more involved case. 2492 */ 2493 static uint32_t 2494 sctp_process_uo_gaps(sctp_t *sctp, uint32_t ctsn, sctp_sack_frag_t *ssf, 2495 int num_gaps, mblk_t *umphead, mblk_t *mphead, int *trysend, 2496 boolean_t *fast_recovery, uint32_t fr_xtsn) 2497 { 2498 uint32_t xtsn; 2499 uint32_t gapstart = 0; 2500 uint32_t gapend = 0; 2501 int gapcnt; 2502 uint16_t chunklen; 2503 sctp_data_hdr_t *sdc; 2504 int gstart; 2505 mblk_t *ump = umphead; 2506 mblk_t *mp = mphead; 2507 sctp_faddr_t *fp; 2508 uint32_t acked = 0; 2509 sctp_stack_t *sctps = sctp->sctp_sctps; 2510 2511 /* 2512 * gstart tracks the last (in the order of TSN) gapstart that 2513 * we process in this SACK gaps walk. 2514 */ 2515 gstart = ctsn; 2516 2517 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2518 xtsn = ntohl(sdc->sdh_tsn); 2519 for (gapcnt = 0; gapcnt < num_gaps; gapcnt++, ssf++) { 2520 if (gapstart != 0) { 2521 /* 2522 * If we have reached the end of the transmit list or 2523 * hit an unsent chunk or encountered an unordered gap 2524 * block start from the ctsn again. 2525 */ 2526 if (ump == NULL || !SCTP_CHUNK_ISSENT(mp) || 2527 SEQ_LT(ctsn + ntohs(ssf->ssf_start), xtsn)) { 2528 ump = umphead; 2529 mp = mphead; 2530 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2531 xtsn = ntohl(sdc->sdh_tsn); 2532 } 2533 } 2534 2535 gapstart = ctsn + ntohs(ssf->ssf_start); 2536 gapend = ctsn + ntohs(ssf->ssf_end); 2537 2538 /* 2539 * Sanity checks: 2540 * 2541 * 1. SACK for TSN we have not sent - ABORT 2542 * 2. Invalid or spurious gaps, ignore all gaps 2543 */ 2544 if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) || 2545 SEQ_GT(gapend, sctp->sctp_ltsn - 1)) { 2546 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2547 *trysend = -1; 2548 return (acked); 2549 } else if (SEQ_LT(gapend, gapstart) || 2550 SEQ_LEQ(gapstart, ctsn)) { 2551 break; 2552 } 2553 /* 2554 * The xtsn can be the TSN processed for the last gap 2555 * (gapend) or it could be the cumulative TSN. We continue 2556 * with the last xtsn as long as the gaps are ordered, when 2557 * we hit an unordered gap, we re-start from the cumulative 2558 * TSN. For the first gap it is always the cumulative TSN. 2559 */ 2560 while (xtsn != gapstart) { 2561 /* 2562 * We can't reliably check for reneged chunks 2563 * when walking the unordered list, so we don't. 2564 * In case the peer reneges then we will end up 2565 * sending the reneged chunk via timeout. 2566 */ 2567 mp = mp->b_next; 2568 if (mp == NULL) { 2569 ump = ump->b_next; 2570 /* 2571 * ump can't be NULL because of the sanity 2572 * check above. 2573 */ 2574 ASSERT(ump != NULL); 2575 mp = ump->b_cont; 2576 } 2577 /* 2578 * mp can't be unsent because of the sanity check 2579 * above. 2580 */ 2581 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2582 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2583 xtsn = ntohl(sdc->sdh_tsn); 2584 } 2585 /* 2586 * Now that we have found the chunk with TSN == 'gapstart', 2587 * let's walk till we hit the chunk with TSN == 'gapend'. 2588 * All intermediate chunks will be marked ACKED, if they 2589 * haven't already been. 2590 */ 2591 while (SEQ_LEQ(xtsn, gapend)) { 2592 /* 2593 * SACKed 2594 */ 2595 SCTP_CHUNK_SET_SACKCNT(mp, 0); 2596 if (!SCTP_CHUNK_ISACKED(mp)) { 2597 SCTP_CHUNK_ACKED(mp); 2598 2599 fp = SCTP_CHUNK_DEST(mp); 2600 chunklen = ntohs(sdc->sdh_len); 2601 ASSERT(fp->suna >= chunklen); 2602 fp->suna -= chunklen; 2603 if (fp->suna == 0) { 2604 /* All outstanding data acked. */ 2605 fp->pba = 0; 2606 SCTP_FADDR_TIMER_STOP(fp); 2607 } 2608 fp->acked += chunklen; 2609 acked += chunklen; 2610 sctp->sctp_unacked -= chunklen - sizeof (*sdc); 2611 ASSERT(sctp->sctp_unacked >= 0); 2612 } 2613 /* 2614 * Move to the next message in the transmit list 2615 * if we are done with all the chunks from the current 2616 * message. Note, it is possible to hit the end of the 2617 * transmit list here, i.e. if we have already completed 2618 * processing the gap block. 2619 */ 2620 mp = mp->b_next; 2621 if (mp == NULL) { 2622 ump = ump->b_next; 2623 if (ump == NULL) { 2624 ASSERT(xtsn == gapend); 2625 break; 2626 } 2627 mp = ump->b_cont; 2628 } 2629 /* 2630 * Likewise, we can hit an unsent chunk once we have 2631 * completed processing the gap block. 2632 */ 2633 if (!SCTP_CHUNK_ISSENT(mp)) { 2634 ASSERT(xtsn == gapend); 2635 break; 2636 } 2637 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2638 xtsn = ntohl(sdc->sdh_tsn); 2639 } 2640 /* 2641 * We keep track of the last gap we successfully processed 2642 * so that we can terminate the walk below for incrementing 2643 * the SACK count. 2644 */ 2645 if (SEQ_LT(gstart, gapstart)) 2646 gstart = gapstart; 2647 } 2648 /* 2649 * Check if have incremented the SACK count for all unacked TSNs in 2650 * sctp_got_sack(), if so we are done. 2651 */ 2652 if (SEQ_LEQ(gstart, fr_xtsn)) 2653 return (acked); 2654 2655 ump = umphead; 2656 mp = mphead; 2657 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2658 xtsn = ntohl(sdc->sdh_tsn); 2659 while (SEQ_LT(xtsn, gstart)) { 2660 /* 2661 * We have incremented SACK count for TSNs less than fr_tsn 2662 * in sctp_got_sack(), so don't increment them again here. 2663 */ 2664 if (SEQ_GT(xtsn, fr_xtsn) && !SCTP_CHUNK_ISACKED(mp)) { 2665 SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1); 2666 if (SCTP_CHUNK_SACKCNT(mp) == 2667 sctps->sctps_fast_rxt_thresh) { 2668 SCTP_CHUNK_REXMIT(sctp, mp); 2669 sctp->sctp_chk_fast_rexmit = B_TRUE; 2670 *trysend = 1; 2671 if (!*fast_recovery) { 2672 /* 2673 * Entering fast recovery. 2674 */ 2675 fp = SCTP_CHUNK_DEST(mp); 2676 fp->ssthresh = fp->cwnd / 2; 2677 if (fp->ssthresh < 2 * fp->sfa_pmss) { 2678 fp->ssthresh = 2679 2 * fp->sfa_pmss; 2680 } 2681 fp->cwnd = fp->ssthresh; 2682 fp->pba = 0; 2683 sctp->sctp_recovery_tsn = 2684 sctp->sctp_ltsn - 1; 2685 *fast_recovery = B_TRUE; 2686 } 2687 } 2688 } 2689 mp = mp->b_next; 2690 if (mp == NULL) { 2691 ump = ump->b_next; 2692 /* We can't get to the end of the transmit list here */ 2693 ASSERT(ump != NULL); 2694 mp = ump->b_cont; 2695 } 2696 /* We can't hit an unsent chunk here */ 2697 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2698 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2699 xtsn = ntohl(sdc->sdh_tsn); 2700 } 2701 return (acked); 2702 } 2703 2704 static int 2705 sctp_got_sack(sctp_t *sctp, sctp_chunk_hdr_t *sch) 2706 { 2707 sctp_sack_chunk_t *sc; 2708 sctp_data_hdr_t *sdc; 2709 sctp_sack_frag_t *ssf; 2710 mblk_t *ump; 2711 mblk_t *mp; 2712 mblk_t *mp1; 2713 uint32_t cumtsn; 2714 uint32_t xtsn; 2715 uint32_t gapstart = 0; 2716 uint32_t gapend = 0; 2717 uint32_t acked = 0; 2718 uint16_t chunklen; 2719 sctp_faddr_t *fp; 2720 int num_gaps; 2721 int trysend = 0; 2722 int i; 2723 boolean_t fast_recovery = B_FALSE; 2724 boolean_t cumack_forward = B_FALSE; 2725 boolean_t fwd_tsn = B_FALSE; 2726 sctp_stack_t *sctps = sctp->sctp_sctps; 2727 2728 BUMP_LOCAL(sctp->sctp_ibchunks); 2729 BUMP_LOCAL(sctp->sctp_isacks); 2730 chunklen = ntohs(sch->sch_len); 2731 if (chunklen < (sizeof (*sch) + sizeof (*sc))) 2732 return (0); 2733 2734 sc = (sctp_sack_chunk_t *)(sch + 1); 2735 cumtsn = ntohl(sc->ssc_cumtsn); 2736 2737 dprint(2, ("got sack cumtsn %x -> %x\n", sctp->sctp_lastack_rxd, 2738 cumtsn)); 2739 2740 /* out of order */ 2741 if (SEQ_LT(cumtsn, sctp->sctp_lastack_rxd)) 2742 return (0); 2743 2744 if (SEQ_GT(cumtsn, sctp->sctp_ltsn - 1)) { 2745 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2746 /* Send an ABORT */ 2747 return (-1); 2748 } 2749 2750 /* 2751 * Cwnd only done when not in fast recovery mode. 2752 */ 2753 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) 2754 fast_recovery = B_TRUE; 2755 2756 /* 2757 * .. and if the cum TSN is not moving ahead on account Forward TSN 2758 */ 2759 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_adv_pap)) 2760 fwd_tsn = B_TRUE; 2761 2762 if (cumtsn == sctp->sctp_lastack_rxd && 2763 (sctp->sctp_xmit_unacked == NULL || 2764 !SCTP_CHUNK_ABANDONED(sctp->sctp_xmit_unacked))) { 2765 if (sctp->sctp_xmit_unacked != NULL) 2766 mp = sctp->sctp_xmit_unacked; 2767 else if (sctp->sctp_xmit_head != NULL) 2768 mp = sctp->sctp_xmit_head->b_cont; 2769 else 2770 mp = NULL; 2771 BUMP_MIB(&sctps->sctps_mib, sctpInDupAck); 2772 /* 2773 * If we were doing a zero win probe and the win 2774 * has now opened to at least MSS, re-transmit the 2775 * zero win probe via sctp_rexmit_packet(). 2776 */ 2777 if (mp != NULL && sctp->sctp_zero_win_probe && 2778 ntohl(sc->ssc_a_rwnd) >= sctp->sctp_current->sfa_pmss) { 2779 mblk_t *pkt; 2780 uint_t pkt_len; 2781 mblk_t *mp1 = mp; 2782 mblk_t *meta = sctp->sctp_xmit_head; 2783 2784 /* 2785 * Reset the RTO since we have been backing-off 2786 * to send the ZWP. 2787 */ 2788 fp = sctp->sctp_current; 2789 fp->rto = fp->srtt + 4 * fp->rttvar; 2790 SCTP_MAX_RTO(sctp, fp); 2791 /* Resend the ZWP */ 2792 pkt = sctp_rexmit_packet(sctp, &meta, &mp1, fp, 2793 &pkt_len); 2794 if (pkt == NULL) { 2795 SCTP_KSTAT(sctps, sctp_ss_rexmit_failed); 2796 return (0); 2797 } 2798 ASSERT(pkt_len <= fp->sfa_pmss); 2799 sctp->sctp_zero_win_probe = B_FALSE; 2800 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 2801 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 2802 sctp_set_iplen(sctp, pkt, fp->ixa); 2803 (void) conn_ip_output(pkt, fp->ixa); 2804 BUMP_LOCAL(sctp->sctp_opkts); 2805 } 2806 } else { 2807 if (sctp->sctp_zero_win_probe) { 2808 /* 2809 * Reset the RTO since we have been backing-off 2810 * to send the ZWP. 2811 */ 2812 fp = sctp->sctp_current; 2813 fp->rto = fp->srtt + 4 * fp->rttvar; 2814 SCTP_MAX_RTO(sctp, fp); 2815 sctp->sctp_zero_win_probe = B_FALSE; 2816 /* This is probably not required */ 2817 if (!sctp->sctp_rexmitting) { 2818 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 2819 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 2820 } 2821 } 2822 acked = sctp_cumack(sctp, cumtsn, &mp); 2823 sctp->sctp_xmit_unacked = mp; 2824 if (acked > 0) { 2825 trysend = 1; 2826 cumack_forward = B_TRUE; 2827 if (fwd_tsn && SEQ_GEQ(sctp->sctp_lastack_rxd, 2828 sctp->sctp_adv_pap)) { 2829 cumack_forward = B_FALSE; 2830 } 2831 } 2832 } 2833 num_gaps = ntohs(sc->ssc_numfrags); 2834 UPDATE_LOCAL(sctp->sctp_gapcnt, num_gaps); 2835 if (num_gaps == 0 || mp == NULL || !SCTP_CHUNK_ISSENT(mp) || 2836 chunklen < (sizeof (*sch) + sizeof (*sc) + 2837 num_gaps * sizeof (*ssf))) { 2838 goto ret; 2839 } 2840 #ifdef DEBUG 2841 /* 2842 * Since we delete any message that has been acked completely, 2843 * the unacked chunk must belong to sctp_xmit_head (as 2844 * we don't have a back pointer from the mp to the meta data 2845 * we do this). 2846 */ 2847 { 2848 mblk_t *mp2 = sctp->sctp_xmit_head->b_cont; 2849 2850 while (mp2 != NULL) { 2851 if (mp2 == mp) 2852 break; 2853 mp2 = mp2->b_next; 2854 } 2855 ASSERT(mp2 != NULL); 2856 } 2857 #endif 2858 ump = sctp->sctp_xmit_head; 2859 2860 /* 2861 * Just remember where we started from, in case we need to call 2862 * sctp_process_uo_gaps() if the gap blocks are unordered. 2863 */ 2864 mp1 = mp; 2865 2866 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2867 xtsn = ntohl(sdc->sdh_tsn); 2868 ASSERT(xtsn == cumtsn + 1); 2869 2870 /* 2871 * Go through SACK gaps. They are ordered based on start TSN. 2872 */ 2873 ssf = (sctp_sack_frag_t *)(sc + 1); 2874 for (i = 0; i < num_gaps; i++, ssf++) { 2875 if (gapstart != 0) { 2876 /* check for unordered gap */ 2877 if (SEQ_LEQ(cumtsn + ntohs(ssf->ssf_start), gapstart)) { 2878 acked += sctp_process_uo_gaps(sctp, 2879 cumtsn, ssf, num_gaps - i, 2880 sctp->sctp_xmit_head, mp1, 2881 &trysend, &fast_recovery, gapstart); 2882 if (trysend < 0) { 2883 BUMP_MIB(&sctps->sctps_mib, 2884 sctpInAckUnsent); 2885 return (-1); 2886 } 2887 break; 2888 } 2889 } 2890 gapstart = cumtsn + ntohs(ssf->ssf_start); 2891 gapend = cumtsn + ntohs(ssf->ssf_end); 2892 2893 /* 2894 * Sanity checks: 2895 * 2896 * 1. SACK for TSN we have not sent - ABORT 2897 * 2. Invalid or spurious gaps, ignore all gaps 2898 */ 2899 if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) || 2900 SEQ_GT(gapend, sctp->sctp_ltsn - 1)) { 2901 BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent); 2902 return (-1); 2903 } else if (SEQ_LT(gapend, gapstart) || 2904 SEQ_LEQ(gapstart, cumtsn)) { 2905 break; 2906 } 2907 /* 2908 * Let's start at the current TSN (for the 1st gap we start 2909 * from the cumulative TSN, for subsequent ones we start from 2910 * where the previous gapend was found - second while loop 2911 * below) and walk the transmit list till we find the TSN 2912 * corresponding to gapstart. All the unacked chunks till we 2913 * get to the chunk with TSN == gapstart will have their 2914 * SACKCNT incremented by 1. Note since the gap blocks are 2915 * ordered, we won't be incrementing the SACKCNT for an 2916 * unacked chunk by more than one while processing the gap 2917 * blocks. If the SACKCNT for any unacked chunk exceeds 2918 * the fast retransmit threshold, we will fast retransmit 2919 * after processing all the gap blocks. 2920 */ 2921 ASSERT(SEQ_LEQ(xtsn, gapstart)); 2922 while (xtsn != gapstart) { 2923 SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1); 2924 if (SCTP_CHUNK_SACKCNT(mp) == 2925 sctps->sctps_fast_rxt_thresh) { 2926 SCTP_CHUNK_REXMIT(sctp, mp); 2927 sctp->sctp_chk_fast_rexmit = B_TRUE; 2928 trysend = 1; 2929 if (!fast_recovery) { 2930 /* 2931 * Entering fast recovery. 2932 */ 2933 fp = SCTP_CHUNK_DEST(mp); 2934 fp->ssthresh = fp->cwnd / 2; 2935 if (fp->ssthresh < 2 * fp->sfa_pmss) { 2936 fp->ssthresh = 2937 2 * fp->sfa_pmss; 2938 } 2939 fp->cwnd = fp->ssthresh; 2940 fp->pba = 0; 2941 sctp->sctp_recovery_tsn = 2942 sctp->sctp_ltsn - 1; 2943 fast_recovery = B_TRUE; 2944 } 2945 } 2946 2947 /* 2948 * Peer may have reneged on this chunk, so un-sack 2949 * it now. If the peer did renege, we need to 2950 * readjust unacked. 2951 */ 2952 if (SCTP_CHUNK_ISACKED(mp)) { 2953 chunklen = ntohs(sdc->sdh_len); 2954 fp = SCTP_CHUNK_DEST(mp); 2955 fp->suna += chunklen; 2956 sctp->sctp_unacked += chunklen - sizeof (*sdc); 2957 SCTP_CHUNK_CLEAR_ACKED(sctp, mp); 2958 if (!fp->timer_running) { 2959 SCTP_FADDR_TIMER_RESTART(sctp, fp, 2960 fp->rto); 2961 } 2962 } 2963 2964 mp = mp->b_next; 2965 if (mp == NULL) { 2966 ump = ump->b_next; 2967 /* 2968 * ump can't be NULL given the sanity check 2969 * above. But if it is NULL, it means that 2970 * there is a data corruption. We'd better 2971 * panic. 2972 */ 2973 if (ump == NULL) { 2974 panic("Memory corruption detected: gap " 2975 "start TSN 0x%x missing from the " 2976 "xmit list: %p", gapstart, 2977 (void *)sctp); 2978 } 2979 mp = ump->b_cont; 2980 } 2981 /* 2982 * mp can't be unsent given the sanity check above. 2983 */ 2984 ASSERT(SCTP_CHUNK_ISSENT(mp)); 2985 sdc = (sctp_data_hdr_t *)mp->b_rptr; 2986 xtsn = ntohl(sdc->sdh_tsn); 2987 } 2988 /* 2989 * Now that we have found the chunk with TSN == 'gapstart', 2990 * let's walk till we hit the chunk with TSN == 'gapend'. 2991 * All intermediate chunks will be marked ACKED, if they 2992 * haven't already been. 2993 */ 2994 while (SEQ_LEQ(xtsn, gapend)) { 2995 /* 2996 * SACKed 2997 */ 2998 SCTP_CHUNK_SET_SACKCNT(mp, 0); 2999 if (!SCTP_CHUNK_ISACKED(mp)) { 3000 SCTP_CHUNK_ACKED(mp); 3001 3002 fp = SCTP_CHUNK_DEST(mp); 3003 chunklen = ntohs(sdc->sdh_len); 3004 ASSERT(fp->suna >= chunklen); 3005 fp->suna -= chunklen; 3006 if (fp->suna == 0) { 3007 /* All outstanding data acked. */ 3008 fp->pba = 0; 3009 SCTP_FADDR_TIMER_STOP(fp); 3010 } 3011 fp->acked += chunklen; 3012 acked += chunklen; 3013 sctp->sctp_unacked -= chunklen - sizeof (*sdc); 3014 ASSERT(sctp->sctp_unacked >= 0); 3015 } 3016 /* Go to the next chunk of the current message */ 3017 mp = mp->b_next; 3018 /* 3019 * Move to the next message in the transmit list 3020 * if we are done with all the chunks from the current 3021 * message. Note, it is possible to hit the end of the 3022 * transmit list here, i.e. if we have already completed 3023 * processing the gap block. But the TSN must be equal 3024 * to the gapend because of the above sanity check. 3025 * If it is not equal, it means that some data is 3026 * missing. 3027 * Also, note that we break here, which means we 3028 * continue processing gap blocks, if any. In case of 3029 * ordered gap blocks there can't be any following 3030 * this (if there is it will fail the sanity check 3031 * above). In case of un-ordered gap blocks we will 3032 * switch to sctp_process_uo_gaps(). In either case 3033 * it should be fine to continue with NULL ump/mp, 3034 * but we just reset it to xmit_head. 3035 */ 3036 if (mp == NULL) { 3037 ump = ump->b_next; 3038 if (ump == NULL) { 3039 if (xtsn != gapend) { 3040 panic("Memory corruption " 3041 "detected: gap end TSN " 3042 "0x%x missing from the " 3043 "xmit list: %p", gapend, 3044 (void *)sctp); 3045 } 3046 ump = sctp->sctp_xmit_head; 3047 mp = mp1; 3048 sdc = (sctp_data_hdr_t *)mp->b_rptr; 3049 xtsn = ntohl(sdc->sdh_tsn); 3050 break; 3051 } 3052 mp = ump->b_cont; 3053 } 3054 /* 3055 * Likewise, we could hit an unsent chunk once we have 3056 * completed processing the gap block. Again, it is 3057 * fine to continue processing gap blocks with mp 3058 * pointing to the unsent chunk, because if there 3059 * are more ordered gap blocks, they will fail the 3060 * sanity check, and if there are un-ordered gap blocks, 3061 * we will continue processing in sctp_process_uo_gaps() 3062 * We just reset the mp to the one we started with. 3063 */ 3064 if (!SCTP_CHUNK_ISSENT(mp)) { 3065 ASSERT(xtsn == gapend); 3066 ump = sctp->sctp_xmit_head; 3067 mp = mp1; 3068 sdc = (sctp_data_hdr_t *)mp->b_rptr; 3069 xtsn = ntohl(sdc->sdh_tsn); 3070 break; 3071 } 3072 sdc = (sctp_data_hdr_t *)mp->b_rptr; 3073 xtsn = ntohl(sdc->sdh_tsn); 3074 } 3075 } 3076 if (sctp->sctp_prsctp_aware) 3077 sctp_check_abandoned_data(sctp, sctp->sctp_current); 3078 if (sctp->sctp_chk_fast_rexmit) 3079 sctp_fast_rexmit(sctp); 3080 ret: 3081 trysend += sctp_set_frwnd(sctp, ntohl(sc->ssc_a_rwnd)); 3082 3083 /* 3084 * If receive window is closed while there is unsent data, 3085 * set a timer for doing zero window probes. 3086 */ 3087 if (sctp->sctp_frwnd == 0 && sctp->sctp_unacked == 0 && 3088 sctp->sctp_unsent != 0) { 3089 SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current, 3090 sctp->sctp_current->rto); 3091 } 3092 3093 /* 3094 * Set cwnd for all destinations. 3095 * Congestion window gets increased only when cumulative 3096 * TSN moves forward, we're not in fast recovery, and 3097 * cwnd has been fully utilized (almost fully, need to allow 3098 * some leeway due to non-MSS sized messages). 3099 */ 3100 if (sctp->sctp_current->acked == acked) { 3101 /* 3102 * Fast-path, only data sent to sctp_current got acked. 3103 */ 3104 fp = sctp->sctp_current; 3105 if (cumack_forward && !fast_recovery && 3106 (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) { 3107 if (fp->cwnd < fp->ssthresh) { 3108 /* 3109 * Slow start 3110 */ 3111 if (fp->acked > fp->sfa_pmss) { 3112 fp->cwnd += fp->sfa_pmss; 3113 } else { 3114 fp->cwnd += fp->acked; 3115 } 3116 fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max); 3117 } else { 3118 /* 3119 * Congestion avoidance 3120 */ 3121 fp->pba += fp->acked; 3122 if (fp->pba >= fp->cwnd) { 3123 fp->pba -= fp->cwnd; 3124 fp->cwnd += fp->sfa_pmss; 3125 fp->cwnd = MIN(fp->cwnd, 3126 sctp->sctp_cwnd_max); 3127 } 3128 } 3129 } 3130 /* 3131 * Limit the burst of transmitted data segments. 3132 */ 3133 if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss < 3134 fp->cwnd) { 3135 fp->cwnd = fp->suna + sctps->sctps_maxburst * 3136 fp->sfa_pmss; 3137 } 3138 fp->acked = 0; 3139 goto check_ss_rxmit; 3140 } 3141 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 3142 if (cumack_forward && fp->acked && !fast_recovery && 3143 (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) { 3144 if (fp->cwnd < fp->ssthresh) { 3145 if (fp->acked > fp->sfa_pmss) { 3146 fp->cwnd += fp->sfa_pmss; 3147 } else { 3148 fp->cwnd += fp->acked; 3149 } 3150 fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max); 3151 } else { 3152 fp->pba += fp->acked; 3153 if (fp->pba >= fp->cwnd) { 3154 fp->pba -= fp->cwnd; 3155 fp->cwnd += fp->sfa_pmss; 3156 fp->cwnd = MIN(fp->cwnd, 3157 sctp->sctp_cwnd_max); 3158 } 3159 } 3160 } 3161 if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss < 3162 fp->cwnd) { 3163 fp->cwnd = fp->suna + sctps->sctps_maxburst * 3164 fp->sfa_pmss; 3165 } 3166 fp->acked = 0; 3167 } 3168 fp = sctp->sctp_current; 3169 check_ss_rxmit: 3170 /* 3171 * If this is a SACK following a timeout, check if there are 3172 * still unacked chunks (sent before the timeout) that we can 3173 * send. 3174 */ 3175 if (sctp->sctp_rexmitting) { 3176 if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_rxt_maxtsn)) { 3177 /* 3178 * As we are in retransmission phase, we may get a 3179 * SACK which indicates some new chunks are received 3180 * but cum_tsn does not advance. During this 3181 * phase, the other side advances cum_tsn only because 3182 * it receives our retransmitted chunks. Only 3183 * this signals that some chunks are still 3184 * missing. 3185 */ 3186 if (cumack_forward) { 3187 fp->rxt_unacked -= acked; 3188 sctp_ss_rexmit(sctp); 3189 } 3190 } else { 3191 sctp->sctp_rexmitting = B_FALSE; 3192 sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn; 3193 sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn; 3194 fp->rxt_unacked = 0; 3195 } 3196 } 3197 return (trysend); 3198 } 3199 3200 /* 3201 * Returns 0 if the caller should stop processing any more chunks, 3202 * 1 if the caller should skip this chunk and continue processing. 3203 */ 3204 static int 3205 sctp_strange_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp) 3206 { 3207 size_t len; 3208 3209 BUMP_LOCAL(sctp->sctp_ibchunks); 3210 /* check top two bits for action required */ 3211 if (ch->sch_id & 0x40) { /* also matches 0xc0 */ 3212 len = ntohs(ch->sch_len); 3213 sctp_add_err(sctp, SCTP_ERR_UNREC_CHUNK, ch, len, fp); 3214 3215 if ((ch->sch_id & 0xc0) == 0xc0) { 3216 /* skip and continue */ 3217 return (1); 3218 } else { 3219 /* stop processing */ 3220 return (0); 3221 } 3222 } 3223 if (ch->sch_id & 0x80) { 3224 /* skip and continue, no error */ 3225 return (1); 3226 } 3227 /* top two bits are clear; stop processing and no error */ 3228 return (0); 3229 } 3230 3231 /* 3232 * Basic sanity checks on all input chunks and parameters: they must 3233 * be of legitimate size for their purported type, and must follow 3234 * ordering conventions as defined in rfc2960. 3235 * 3236 * Returns 1 if the chunk and all encloded params are legitimate, 3237 * 0 otherwise. 3238 */ 3239 /*ARGSUSED*/ 3240 static int 3241 sctp_check_input(sctp_t *sctp, sctp_chunk_hdr_t *ch, ssize_t len, int first) 3242 { 3243 sctp_parm_hdr_t *ph; 3244 void *p = NULL; 3245 ssize_t clen; 3246 uint16_t ch_len; 3247 3248 ch_len = ntohs(ch->sch_len); 3249 if (ch_len > len) { 3250 return (0); 3251 } 3252 3253 switch (ch->sch_id) { 3254 case CHUNK_DATA: 3255 if (ch_len < sizeof (sctp_data_hdr_t)) { 3256 return (0); 3257 } 3258 return (1); 3259 case CHUNK_INIT: 3260 case CHUNK_INIT_ACK: 3261 { 3262 ssize_t remlen = len; 3263 3264 /* 3265 * INIT and INIT-ACK chunks must not be bundled with 3266 * any other. 3267 */ 3268 if (!first || sctp_next_chunk(ch, &remlen) != NULL || 3269 (ch_len < (sizeof (*ch) + 3270 sizeof (sctp_init_chunk_t)))) { 3271 return (0); 3272 } 3273 /* may have params that need checking */ 3274 p = (char *)(ch + 1) + sizeof (sctp_init_chunk_t); 3275 clen = ch_len - (sizeof (*ch) + 3276 sizeof (sctp_init_chunk_t)); 3277 } 3278 break; 3279 case CHUNK_SACK: 3280 if (ch_len < (sizeof (*ch) + sizeof (sctp_sack_chunk_t))) { 3281 return (0); 3282 } 3283 /* dup and gap reports checked by got_sack() */ 3284 return (1); 3285 case CHUNK_SHUTDOWN: 3286 if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) { 3287 return (0); 3288 } 3289 return (1); 3290 case CHUNK_ABORT: 3291 case CHUNK_ERROR: 3292 if (ch_len < sizeof (*ch)) { 3293 return (0); 3294 } 3295 /* may have params that need checking */ 3296 p = ch + 1; 3297 clen = ch_len - sizeof (*ch); 3298 break; 3299 case CHUNK_ECNE: 3300 case CHUNK_CWR: 3301 case CHUNK_HEARTBEAT: 3302 case CHUNK_HEARTBEAT_ACK: 3303 /* Full ASCONF chunk and parameter checks are in asconf.c */ 3304 case CHUNK_ASCONF: 3305 case CHUNK_ASCONF_ACK: 3306 if (ch_len < sizeof (*ch)) { 3307 return (0); 3308 } 3309 /* heartbeat data checked by process_heartbeat() */ 3310 return (1); 3311 case CHUNK_SHUTDOWN_COMPLETE: 3312 { 3313 ssize_t remlen = len; 3314 3315 /* 3316 * SHUTDOWN-COMPLETE chunk must not be bundled with any 3317 * other 3318 */ 3319 if (!first || sctp_next_chunk(ch, &remlen) != NULL || 3320 ch_len < sizeof (*ch)) { 3321 return (0); 3322 } 3323 } 3324 return (1); 3325 case CHUNK_COOKIE: 3326 case CHUNK_COOKIE_ACK: 3327 case CHUNK_SHUTDOWN_ACK: 3328 if (ch_len < sizeof (*ch) || !first) { 3329 return (0); 3330 } 3331 return (1); 3332 case CHUNK_FORWARD_TSN: 3333 if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) 3334 return (0); 3335 return (1); 3336 default: 3337 return (1); /* handled by strange_chunk() */ 3338 } 3339 3340 /* check and byteorder parameters */ 3341 if (clen <= 0) { 3342 return (1); 3343 } 3344 ASSERT(p != NULL); 3345 3346 ph = p; 3347 while (ph != NULL && clen > 0) { 3348 ch_len = ntohs(ph->sph_len); 3349 if (ch_len > len || ch_len < sizeof (*ph)) { 3350 return (0); 3351 } 3352 ph = sctp_next_parm(ph, &clen); 3353 } 3354 3355 /* All OK */ 3356 return (1); 3357 } 3358 3359 static mblk_t * 3360 sctp_check_in_policy(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 3361 { 3362 boolean_t policy_present; 3363 ipha_t *ipha; 3364 ip6_t *ip6h; 3365 netstack_t *ns = ipst->ips_netstack; 3366 ipsec_stack_t *ipss = ns->netstack_ipsec; 3367 3368 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 3369 policy_present = ipss->ipsec_inbound_v4_policy_present; 3370 ipha = (ipha_t *)mp->b_rptr; 3371 ip6h = NULL; 3372 } else { 3373 policy_present = ipss->ipsec_inbound_v6_policy_present; 3374 ipha = NULL; 3375 ip6h = (ip6_t *)mp->b_rptr; 3376 } 3377 3378 if (policy_present) { 3379 /* 3380 * The conn_t parameter is NULL because we already know 3381 * nobody's home. 3382 */ 3383 mp = ipsec_check_global_policy(mp, (conn_t *)NULL, 3384 ipha, ip6h, ira, ns); 3385 if (mp == NULL) 3386 return (NULL); 3387 } 3388 return (mp); 3389 } 3390 3391 /* Handle out-of-the-blue packets */ 3392 void 3393 sctp_ootb_input(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 3394 { 3395 sctp_t *sctp; 3396 sctp_chunk_hdr_t *ch; 3397 sctp_hdr_t *sctph; 3398 in6_addr_t src, dst; 3399 uint_t ip_hdr_len = ira->ira_ip_hdr_length; 3400 ssize_t mlen; 3401 sctp_stack_t *sctps; 3402 boolean_t secure; 3403 zoneid_t zoneid = ira->ira_zoneid; 3404 uchar_t *rptr; 3405 3406 ASSERT(ira->ira_ill == NULL); 3407 3408 secure = ira->ira_flags & IRAF_IPSEC_SECURE; 3409 3410 sctps = ipst->ips_netstack->netstack_sctp; 3411 3412 BUMP_MIB(&sctps->sctps_mib, sctpOutOfBlue); 3413 BUMP_MIB(&sctps->sctps_mib, sctpInSCTPPkts); 3414 3415 if (mp->b_cont != NULL) { 3416 /* 3417 * All subsequent code is vastly simplified if it can 3418 * assume a single contiguous chunk of data. 3419 */ 3420 if (pullupmsg(mp, -1) == 0) { 3421 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3422 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 3423 freemsg(mp); 3424 return; 3425 } 3426 } 3427 3428 rptr = mp->b_rptr; 3429 sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]); 3430 if (ira->ira_flags & IRAF_IS_IPV4) { 3431 ipha_t *ipha; 3432 3433 ipha = (ipha_t *)rptr; 3434 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src); 3435 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst); 3436 } else { 3437 ip6_t *ip6h; 3438 3439 ip6h = (ip6_t *)rptr; 3440 src = ip6h->ip6_src; 3441 dst = ip6h->ip6_dst; 3442 } 3443 3444 mlen = mp->b_wptr - (uchar_t *)(sctph + 1); 3445 if ((ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen)) == NULL) { 3446 dprint(3, ("sctp_ootb_input: invalid packet\n")); 3447 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3448 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 3449 freemsg(mp); 3450 return; 3451 } 3452 3453 switch (ch->sch_id) { 3454 case CHUNK_INIT: 3455 /* no listener; send abort */ 3456 if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL) 3457 return; 3458 sctp_ootb_send_abort(sctp_init2vtag(ch), 0, 3459 NULL, 0, mp, 0, B_TRUE, ira, ipst); 3460 break; 3461 case CHUNK_INIT_ACK: 3462 /* check for changed src addr */ 3463 sctp = sctp_addrlist2sctp(mp, sctph, ch, zoneid, sctps); 3464 if (sctp != NULL) { 3465 /* success; proceed to normal path */ 3466 mutex_enter(&sctp->sctp_lock); 3467 if (sctp->sctp_running) { 3468 sctp_add_recvq(sctp, mp, B_FALSE, ira); 3469 mutex_exit(&sctp->sctp_lock); 3470 } else { 3471 /* 3472 * If the source address is changed, we 3473 * don't need to worry too much about 3474 * out of order processing. So we don't 3475 * check if the recvq is empty or not here. 3476 */ 3477 sctp->sctp_running = B_TRUE; 3478 mutex_exit(&sctp->sctp_lock); 3479 sctp_input_data(sctp, mp, ira); 3480 WAKE_SCTP(sctp); 3481 } 3482 SCTP_REFRELE(sctp); 3483 return; 3484 } 3485 /* else bogus init ack; drop it */ 3486 break; 3487 case CHUNK_SHUTDOWN_ACK: 3488 if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL) 3489 return; 3490 sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst); 3491 return; 3492 case CHUNK_ERROR: 3493 case CHUNK_ABORT: 3494 case CHUNK_COOKIE_ACK: 3495 case CHUNK_SHUTDOWN_COMPLETE: 3496 break; 3497 default: 3498 if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL) 3499 return; 3500 sctp_ootb_send_abort(sctph->sh_verf, 0, 3501 NULL, 0, mp, 0, B_TRUE, ira, ipst); 3502 break; 3503 } 3504 freemsg(mp); 3505 } 3506 3507 /* 3508 * Handle sctp packets. 3509 * Note that we rele the sctp_t (the caller got a reference on it). 3510 */ 3511 void 3512 sctp_input(conn_t *connp, ipha_t *ipha, ip6_t *ip6h, mblk_t *mp, 3513 ip_recv_attr_t *ira) 3514 { 3515 sctp_t *sctp = CONN2SCTP(connp); 3516 boolean_t secure; 3517 ill_t *ill = ira->ira_ill; 3518 ip_stack_t *ipst = ill->ill_ipst; 3519 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 3520 iaflags_t iraflags = ira->ira_flags; 3521 ill_t *rill = ira->ira_rill; 3522 3523 secure = iraflags & IRAF_IPSEC_SECURE; 3524 3525 /* 3526 * We check some fields in conn_t without holding a lock. 3527 * This should be fine. 3528 */ 3529 if (((iraflags & IRAF_IS_IPV4) ? 3530 CONN_INBOUND_POLICY_PRESENT(connp, ipss) : 3531 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) || 3532 secure) { 3533 mp = ipsec_check_inbound_policy(mp, connp, ipha, 3534 ip6h, ira); 3535 if (mp == NULL) { 3536 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 3537 /* Note that mp is NULL */ 3538 ip_drop_input("ipIfStatsInDiscards", mp, ill); 3539 SCTP_REFRELE(sctp); 3540 return; 3541 } 3542 } 3543 3544 ira->ira_ill = ira->ira_rill = NULL; 3545 3546 mutex_enter(&sctp->sctp_lock); 3547 if (sctp->sctp_running) { 3548 sctp_add_recvq(sctp, mp, B_FALSE, ira); 3549 mutex_exit(&sctp->sctp_lock); 3550 goto done; 3551 } else { 3552 sctp->sctp_running = B_TRUE; 3553 mutex_exit(&sctp->sctp_lock); 3554 3555 mutex_enter(&sctp->sctp_recvq_lock); 3556 if (sctp->sctp_recvq != NULL) { 3557 sctp_add_recvq(sctp, mp, B_TRUE, ira); 3558 mutex_exit(&sctp->sctp_recvq_lock); 3559 WAKE_SCTP(sctp); 3560 goto done; 3561 } 3562 } 3563 mutex_exit(&sctp->sctp_recvq_lock); 3564 if (ira->ira_flags & IRAF_ICMP_ERROR) 3565 sctp_icmp_error(sctp, mp); 3566 else 3567 sctp_input_data(sctp, mp, ira); 3568 WAKE_SCTP(sctp); 3569 3570 done: 3571 SCTP_REFRELE(sctp); 3572 ira->ira_ill = ill; 3573 ira->ira_rill = rill; 3574 } 3575 3576 static void 3577 sctp_process_abort(sctp_t *sctp, sctp_chunk_hdr_t *ch, int err) 3578 { 3579 sctp_stack_t *sctps = sctp->sctp_sctps; 3580 3581 BUMP_MIB(&sctps->sctps_mib, sctpAborted); 3582 BUMP_LOCAL(sctp->sctp_ibchunks); 3583 3584 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3585 ntohs(((sctp_parm_hdr_t *)(ch + 1))->sph_type), ch); 3586 sctp_clean_death(sctp, err); 3587 } 3588 3589 void 3590 sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira) 3591 { 3592 sctp_chunk_hdr_t *ch; 3593 ssize_t mlen; 3594 int gotdata; 3595 int trysend; 3596 sctp_faddr_t *fp; 3597 sctp_init_chunk_t *iack; 3598 uint32_t tsn; 3599 sctp_data_hdr_t *sdc; 3600 ip_pkt_t ipp; 3601 in6_addr_t src; 3602 in6_addr_t dst; 3603 uint_t ifindex; 3604 sctp_hdr_t *sctph; 3605 uint_t ip_hdr_len = ira->ira_ip_hdr_length; 3606 mblk_t *dups = NULL; 3607 int recv_adaptation; 3608 boolean_t wake_eager = B_FALSE; 3609 in6_addr_t peer_src; 3610 int64_t now; 3611 sctp_stack_t *sctps = sctp->sctp_sctps; 3612 ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; 3613 boolean_t hb_already = B_FALSE; 3614 cred_t *cr; 3615 pid_t cpid; 3616 uchar_t *rptr; 3617 conn_t *connp = sctp->sctp_connp; 3618 3619 ASSERT(DB_TYPE(mp) == M_DATA); 3620 ASSERT(ira->ira_ill == NULL); 3621 3622 if (mp->b_cont != NULL) { 3623 /* 3624 * All subsequent code is vastly simplified if it can 3625 * assume a single contiguous chunk of data. 3626 */ 3627 if (pullupmsg(mp, -1) == 0) { 3628 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3629 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 3630 freemsg(mp); 3631 return; 3632 } 3633 } 3634 3635 BUMP_LOCAL(sctp->sctp_ipkts); 3636 ifindex = ira->ira_ruifindex; 3637 3638 rptr = mp->b_rptr; 3639 3640 ipp.ipp_fields = 0; 3641 if (connp->conn_recv_ancillary.crb_all != 0) { 3642 /* 3643 * Record packet information in the ip_pkt_t 3644 */ 3645 if (ira->ira_flags & IRAF_IS_IPV4) { 3646 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 3647 B_FALSE); 3648 } else { 3649 uint8_t nexthdrp; 3650 3651 /* 3652 * IPv6 packets can only be received by applications 3653 * that are prepared to receive IPv6 addresses. 3654 * The IP fanout must ensure this. 3655 */ 3656 ASSERT(connp->conn_family == AF_INET6); 3657 3658 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 3659 &nexthdrp); 3660 ASSERT(nexthdrp == IPPROTO_SCTP); 3661 3662 /* Could have caused a pullup? */ 3663 rptr = mp->b_rptr; 3664 } 3665 } 3666 3667 sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]); 3668 3669 if (ira->ira_flags & IRAF_IS_IPV4) { 3670 ipha_t *ipha; 3671 3672 ipha = (ipha_t *)rptr; 3673 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src); 3674 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst); 3675 } else { 3676 ip6_t *ip6h; 3677 3678 ip6h = (ip6_t *)rptr; 3679 src = ip6h->ip6_src; 3680 dst = ip6h->ip6_dst; 3681 } 3682 3683 mlen = mp->b_wptr - (uchar_t *)(sctph + 1); 3684 ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen); 3685 if (ch == NULL) { 3686 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3687 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 3688 freemsg(mp); 3689 return; 3690 } 3691 3692 if (!sctp_check_input(sctp, ch, mlen, 1)) { 3693 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 3694 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 3695 goto done; 3696 } 3697 /* 3698 * Check verfication tag (special handling for INIT, 3699 * COOKIE, SHUTDOWN_COMPLETE and SHUTDOWN_ACK chunks). 3700 * ABORTs are handled in the chunk processing loop, since 3701 * may not appear first. All other checked chunks must 3702 * appear first, or will have been dropped by check_input(). 3703 */ 3704 switch (ch->sch_id) { 3705 case CHUNK_INIT: 3706 if (sctph->sh_verf != 0) { 3707 /* drop it */ 3708 goto done; 3709 } 3710 break; 3711 case CHUNK_SHUTDOWN_COMPLETE: 3712 if (sctph->sh_verf == sctp->sctp_lvtag) 3713 break; 3714 if (sctph->sh_verf == sctp->sctp_fvtag && 3715 SCTP_GET_TBIT(ch)) { 3716 break; 3717 } 3718 /* else drop it */ 3719 goto done; 3720 case CHUNK_ABORT: 3721 case CHUNK_COOKIE: 3722 /* handled below */ 3723 break; 3724 case CHUNK_SHUTDOWN_ACK: 3725 if (sctp->sctp_state > SCTPS_BOUND && 3726 sctp->sctp_state < SCTPS_ESTABLISHED) { 3727 /* treat as OOTB */ 3728 sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst); 3729 return; 3730 } 3731 /* else fallthru */ 3732 default: 3733 /* 3734 * All other packets must have a valid 3735 * verification tag, however if this is a 3736 * listener, we use a refined version of 3737 * out-of-the-blue logic. 3738 */ 3739 if (sctph->sh_verf != sctp->sctp_lvtag && 3740 sctp->sctp_state != SCTPS_LISTEN) { 3741 /* drop it */ 3742 goto done; 3743 } 3744 break; 3745 } 3746 3747 /* Have a valid sctp for this packet */ 3748 fp = sctp_lookup_faddr(sctp, &src); 3749 dprint(2, ("sctp_dispatch_rput: mp=%p fp=%p sctp=%p\n", (void *)mp, 3750 (void *)fp, (void *)sctp)); 3751 3752 gotdata = 0; 3753 trysend = 0; 3754 3755 now = ddi_get_lbolt64(); 3756 /* Process the chunks */ 3757 do { 3758 dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n", 3759 sctp->sctp_state, (int)(ch->sch_id))); 3760 3761 if (ch->sch_id == CHUNK_ABORT) { 3762 if (sctph->sh_verf != sctp->sctp_lvtag && 3763 sctph->sh_verf != sctp->sctp_fvtag) { 3764 /* drop it */ 3765 goto done; 3766 } 3767 } 3768 3769 switch (sctp->sctp_state) { 3770 3771 case SCTPS_ESTABLISHED: 3772 case SCTPS_SHUTDOWN_PENDING: 3773 case SCTPS_SHUTDOWN_SENT: 3774 switch (ch->sch_id) { 3775 case CHUNK_DATA: 3776 /* 0-length data chunks are not allowed */ 3777 if (ntohs(ch->sch_len) == sizeof (*sdc)) { 3778 sdc = (sctp_data_hdr_t *)ch; 3779 tsn = sdc->sdh_tsn; 3780 sctp_send_abort(sctp, sctp->sctp_fvtag, 3781 SCTP_ERR_NO_USR_DATA, (char *)&tsn, 3782 sizeof (tsn), mp, 0, B_FALSE, ira); 3783 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3784 0, NULL); 3785 sctp_clean_death(sctp, ECONNABORTED); 3786 goto done; 3787 } 3788 3789 ASSERT(fp != NULL); 3790 sctp->sctp_lastdata = fp; 3791 sctp_data_chunk(sctp, ch, mp, &dups, fp, 3792 &ipp, ira); 3793 gotdata = 1; 3794 /* Restart shutdown timer if shutting down */ 3795 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 3796 /* 3797 * If we have exceeded our max 3798 * wait bound for waiting for a 3799 * shutdown ack from the peer, 3800 * abort the association. 3801 */ 3802 if (sctps->sctps_shutack_wait_bound != 3803 0 && 3804 TICK_TO_MSEC(now - 3805 sctp->sctp_out_time) > 3806 sctps->sctps_shutack_wait_bound) { 3807 sctp_send_abort(sctp, 3808 sctp->sctp_fvtag, 0, NULL, 3809 0, mp, 0, B_FALSE, ira); 3810 sctp_assoc_event(sctp, 3811 SCTP_COMM_LOST, 0, NULL); 3812 sctp_clean_death(sctp, 3813 ECONNABORTED); 3814 goto done; 3815 } 3816 SCTP_FADDR_TIMER_RESTART(sctp, fp, 3817 fp->rto); 3818 } 3819 break; 3820 case CHUNK_SACK: 3821 ASSERT(fp != NULL); 3822 /* 3823 * Peer is real and alive if it can ack our 3824 * data. 3825 */ 3826 sctp_faddr_alive(sctp, fp); 3827 trysend = sctp_got_sack(sctp, ch); 3828 if (trysend < 0) { 3829 sctp_send_abort(sctp, sctph->sh_verf, 3830 0, NULL, 0, mp, 0, B_FALSE, ira); 3831 sctp_assoc_event(sctp, 3832 SCTP_COMM_LOST, 0, NULL); 3833 sctp_clean_death(sctp, 3834 ECONNABORTED); 3835 goto done; 3836 } 3837 break; 3838 case CHUNK_HEARTBEAT: 3839 if (!hb_already) { 3840 /* 3841 * In any one packet, there should 3842 * only be one heartbeat chunk. So 3843 * we should not process more than 3844 * once. 3845 */ 3846 sctp_return_heartbeat(sctp, ch, mp); 3847 hb_already = B_TRUE; 3848 } 3849 break; 3850 case CHUNK_HEARTBEAT_ACK: 3851 sctp_process_heartbeat(sctp, ch); 3852 break; 3853 case CHUNK_SHUTDOWN: 3854 sctp_shutdown_event(sctp); 3855 trysend = sctp_shutdown_received(sctp, ch, 3856 B_FALSE, B_FALSE, fp); 3857 BUMP_LOCAL(sctp->sctp_ibchunks); 3858 break; 3859 case CHUNK_SHUTDOWN_ACK: 3860 BUMP_LOCAL(sctp->sctp_ibchunks); 3861 if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) { 3862 sctp_shutdown_complete(sctp); 3863 BUMP_MIB(&sctps->sctps_mib, 3864 sctpShutdowns); 3865 sctp_assoc_event(sctp, 3866 SCTP_SHUTDOWN_COMP, 0, NULL); 3867 sctp_clean_death(sctp, 0); 3868 goto done; 3869 } 3870 break; 3871 case CHUNK_ABORT: { 3872 sctp_saddr_ipif_t *sp; 3873 3874 /* Ignore if delete pending */ 3875 sp = sctp_saddr_lookup(sctp, &dst, 0); 3876 ASSERT(sp != NULL); 3877 if (sp->saddr_ipif_delete_pending) { 3878 BUMP_LOCAL(sctp->sctp_ibchunks); 3879 break; 3880 } 3881 3882 sctp_process_abort(sctp, ch, ECONNRESET); 3883 goto done; 3884 } 3885 case CHUNK_INIT: 3886 sctp_send_initack(sctp, sctph, ch, mp, ira); 3887 break; 3888 case CHUNK_COOKIE: 3889 if (sctp_process_cookie(sctp, ch, mp, &iack, 3890 sctph, &recv_adaptation, NULL, ira) != -1) { 3891 sctp_send_cookie_ack(sctp); 3892 sctp_assoc_event(sctp, SCTP_RESTART, 3893 0, NULL); 3894 if (recv_adaptation) { 3895 sctp->sctp_recv_adaptation = 1; 3896 sctp_adaptation_event(sctp); 3897 } 3898 } else { 3899 BUMP_MIB(&sctps->sctps_mib, 3900 sctpInInvalidCookie); 3901 } 3902 break; 3903 case CHUNK_ERROR: { 3904 int error; 3905 3906 BUMP_LOCAL(sctp->sctp_ibchunks); 3907 error = sctp_handle_error(sctp, sctph, ch, mp, 3908 ira); 3909 if (error != 0) { 3910 sctp_assoc_event(sctp, SCTP_COMM_LOST, 3911 0, NULL); 3912 sctp_clean_death(sctp, error); 3913 goto done; 3914 } 3915 break; 3916 } 3917 case CHUNK_ASCONF: 3918 ASSERT(fp != NULL); 3919 sctp_input_asconf(sctp, ch, fp); 3920 BUMP_LOCAL(sctp->sctp_ibchunks); 3921 break; 3922 case CHUNK_ASCONF_ACK: 3923 ASSERT(fp != NULL); 3924 sctp_faddr_alive(sctp, fp); 3925 sctp_input_asconf_ack(sctp, ch, fp); 3926 BUMP_LOCAL(sctp->sctp_ibchunks); 3927 break; 3928 case CHUNK_FORWARD_TSN: 3929 ASSERT(fp != NULL); 3930 sctp->sctp_lastdata = fp; 3931 sctp_process_forward_tsn(sctp, ch, fp, 3932 &ipp, ira); 3933 gotdata = 1; 3934 BUMP_LOCAL(sctp->sctp_ibchunks); 3935 break; 3936 default: 3937 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 3938 goto nomorechunks; 3939 } /* else skip and continue processing */ 3940 break; 3941 } 3942 break; 3943 3944 case SCTPS_LISTEN: 3945 switch (ch->sch_id) { 3946 case CHUNK_INIT: 3947 sctp_send_initack(sctp, sctph, ch, mp, ira); 3948 break; 3949 case CHUNK_COOKIE: { 3950 sctp_t *eager; 3951 3952 if (sctp_process_cookie(sctp, ch, mp, &iack, 3953 sctph, &recv_adaptation, &peer_src, 3954 ira) == -1) { 3955 BUMP_MIB(&sctps->sctps_mib, 3956 sctpInInvalidCookie); 3957 goto done; 3958 } 3959 3960 /* 3961 * The cookie is good; ensure that 3962 * the peer used the verification 3963 * tag from the init ack in the header. 3964 */ 3965 if (iack->sic_inittag != sctph->sh_verf) 3966 goto done; 3967 3968 eager = sctp_conn_request(sctp, mp, ifindex, 3969 ip_hdr_len, iack, ira); 3970 if (eager == NULL) { 3971 sctp_send_abort(sctp, sctph->sh_verf, 3972 SCTP_ERR_NO_RESOURCES, NULL, 0, mp, 3973 0, B_FALSE, ira); 3974 goto done; 3975 } 3976 3977 /* 3978 * If there were extra chunks 3979 * bundled with the cookie, 3980 * they must be processed 3981 * on the eager's queue. We 3982 * accomplish this by refeeding 3983 * the whole packet into the 3984 * state machine on the right 3985 * q. The packet (mp) gets 3986 * there via the eager's 3987 * cookie_mp field (overloaded 3988 * with the active open role). 3989 * This is picked up when 3990 * processing the null bind 3991 * request put on the eager's 3992 * q by sctp_accept(). We must 3993 * first revert the cookie 3994 * chunk's length field to network 3995 * byteorder so it can be 3996 * properly reprocessed on the 3997 * eager's queue. 3998 */ 3999 BUMP_MIB(&sctps->sctps_mib, sctpPassiveEstab); 4000 if (mlen > ntohs(ch->sch_len)) { 4001 eager->sctp_cookie_mp = dupb(mp); 4002 /* 4003 * If no mem, just let 4004 * the peer retransmit. 4005 */ 4006 } 4007 sctp_assoc_event(eager, SCTP_COMM_UP, 0, NULL); 4008 if (recv_adaptation) { 4009 eager->sctp_recv_adaptation = 1; 4010 eager->sctp_rx_adaptation_code = 4011 sctp->sctp_rx_adaptation_code; 4012 sctp_adaptation_event(eager); 4013 } 4014 4015 eager->sctp_active = now; 4016 sctp_send_cookie_ack(eager); 4017 4018 wake_eager = B_TRUE; 4019 4020 /* 4021 * Process rest of the chunks with eager. 4022 */ 4023 sctp = eager; 4024 fp = sctp_lookup_faddr(sctp, &peer_src); 4025 /* 4026 * Confirm peer's original source. fp can 4027 * only be NULL if peer does not use the 4028 * original source as one of its addresses... 4029 */ 4030 if (fp == NULL) 4031 fp = sctp_lookup_faddr(sctp, &src); 4032 else 4033 sctp_faddr_alive(sctp, fp); 4034 4035 /* 4036 * Validate the peer addresses. It also starts 4037 * the heartbeat timer. 4038 */ 4039 sctp_validate_peer(sctp); 4040 break; 4041 } 4042 /* Anything else is considered out-of-the-blue */ 4043 case CHUNK_ERROR: 4044 case CHUNK_ABORT: 4045 case CHUNK_COOKIE_ACK: 4046 case CHUNK_SHUTDOWN_COMPLETE: 4047 BUMP_LOCAL(sctp->sctp_ibchunks); 4048 goto done; 4049 default: 4050 BUMP_LOCAL(sctp->sctp_ibchunks); 4051 sctp_send_abort(sctp, sctph->sh_verf, 0, NULL, 4052 0, mp, 0, B_TRUE, ira); 4053 goto done; 4054 } 4055 break; 4056 4057 case SCTPS_COOKIE_WAIT: 4058 switch (ch->sch_id) { 4059 case CHUNK_INIT_ACK: 4060 sctp_stop_faddr_timers(sctp); 4061 sctp_faddr_alive(sctp, sctp->sctp_current); 4062 sctp_send_cookie_echo(sctp, ch, mp, ira); 4063 BUMP_LOCAL(sctp->sctp_ibchunks); 4064 break; 4065 case CHUNK_ABORT: 4066 sctp_process_abort(sctp, ch, ECONNREFUSED); 4067 goto done; 4068 case CHUNK_INIT: 4069 sctp_send_initack(sctp, sctph, ch, mp, ira); 4070 break; 4071 case CHUNK_COOKIE: 4072 cr = ira->ira_cred; 4073 cpid = ira->ira_cpid; 4074 4075 if (sctp_process_cookie(sctp, ch, mp, &iack, 4076 sctph, &recv_adaptation, NULL, ira) == -1) { 4077 BUMP_MIB(&sctps->sctps_mib, 4078 sctpInInvalidCookie); 4079 break; 4080 } 4081 sctp_send_cookie_ack(sctp); 4082 sctp_stop_faddr_timers(sctp); 4083 if (!SCTP_IS_DETACHED(sctp)) { 4084 sctp->sctp_ulp_connected( 4085 sctp->sctp_ulpd, 0, cr, cpid); 4086 sctp_set_ulp_prop(sctp); 4087 4088 } 4089 sctp->sctp_state = SCTPS_ESTABLISHED; 4090 sctp->sctp_assoc_start_time = 4091 (uint32_t)ddi_get_lbolt(); 4092 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 4093 if (sctp->sctp_cookie_mp) { 4094 freemsg(sctp->sctp_cookie_mp); 4095 sctp->sctp_cookie_mp = NULL; 4096 } 4097 4098 /* Validate the peer addresses. */ 4099 sctp->sctp_active = now; 4100 sctp_validate_peer(sctp); 4101 4102 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 4103 if (recv_adaptation) { 4104 sctp->sctp_recv_adaptation = 1; 4105 sctp_adaptation_event(sctp); 4106 } 4107 /* Try sending queued data, or ASCONFs */ 4108 trysend = 1; 4109 break; 4110 default: 4111 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4112 goto nomorechunks; 4113 } /* else skip and continue processing */ 4114 break; 4115 } 4116 break; 4117 4118 case SCTPS_COOKIE_ECHOED: 4119 switch (ch->sch_id) { 4120 case CHUNK_COOKIE_ACK: 4121 cr = ira->ira_cred; 4122 cpid = ira->ira_cpid; 4123 4124 if (!SCTP_IS_DETACHED(sctp)) { 4125 sctp->sctp_ulp_connected( 4126 sctp->sctp_ulpd, 0, cr, cpid); 4127 sctp_set_ulp_prop(sctp); 4128 } 4129 if (sctp->sctp_unacked == 0) 4130 sctp_stop_faddr_timers(sctp); 4131 sctp->sctp_state = SCTPS_ESTABLISHED; 4132 sctp->sctp_assoc_start_time = 4133 (uint32_t)ddi_get_lbolt(); 4134 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 4135 BUMP_LOCAL(sctp->sctp_ibchunks); 4136 if (sctp->sctp_cookie_mp) { 4137 freemsg(sctp->sctp_cookie_mp); 4138 sctp->sctp_cookie_mp = NULL; 4139 } 4140 sctp_faddr_alive(sctp, fp); 4141 /* Validate the peer addresses. */ 4142 sctp->sctp_active = now; 4143 sctp_validate_peer(sctp); 4144 4145 /* Try sending queued data, or ASCONFs */ 4146 trysend = 1; 4147 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 4148 sctp_adaptation_event(sctp); 4149 break; 4150 case CHUNK_ABORT: 4151 sctp_process_abort(sctp, ch, ECONNREFUSED); 4152 goto done; 4153 case CHUNK_COOKIE: 4154 cr = ira->ira_cred; 4155 cpid = ira->ira_cpid; 4156 4157 if (sctp_process_cookie(sctp, ch, mp, &iack, 4158 sctph, &recv_adaptation, NULL, ira) == -1) { 4159 BUMP_MIB(&sctps->sctps_mib, 4160 sctpInInvalidCookie); 4161 break; 4162 } 4163 sctp_send_cookie_ack(sctp); 4164 4165 if (!SCTP_IS_DETACHED(sctp)) { 4166 sctp->sctp_ulp_connected( 4167 sctp->sctp_ulpd, 0, cr, cpid); 4168 sctp_set_ulp_prop(sctp); 4169 4170 } 4171 if (sctp->sctp_unacked == 0) 4172 sctp_stop_faddr_timers(sctp); 4173 sctp->sctp_state = SCTPS_ESTABLISHED; 4174 sctp->sctp_assoc_start_time = 4175 (uint32_t)ddi_get_lbolt(); 4176 BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab); 4177 if (sctp->sctp_cookie_mp) { 4178 freemsg(sctp->sctp_cookie_mp); 4179 sctp->sctp_cookie_mp = NULL; 4180 } 4181 /* Validate the peer addresses. */ 4182 sctp->sctp_active = now; 4183 sctp_validate_peer(sctp); 4184 4185 sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL); 4186 if (recv_adaptation) { 4187 sctp->sctp_recv_adaptation = 1; 4188 sctp_adaptation_event(sctp); 4189 } 4190 /* Try sending queued data, or ASCONFs */ 4191 trysend = 1; 4192 break; 4193 case CHUNK_INIT: 4194 sctp_send_initack(sctp, sctph, ch, mp, ira); 4195 break; 4196 case CHUNK_ERROR: { 4197 sctp_parm_hdr_t *p; 4198 4199 BUMP_LOCAL(sctp->sctp_ibchunks); 4200 /* check for a stale cookie */ 4201 if (ntohs(ch->sch_len) >= 4202 (sizeof (*p) + sizeof (*ch)) + 4203 sizeof (uint32_t)) { 4204 4205 p = (sctp_parm_hdr_t *)(ch + 1); 4206 if (p->sph_type == 4207 htons(SCTP_ERR_STALE_COOKIE)) { 4208 BUMP_MIB(&sctps->sctps_mib, 4209 sctpAborted); 4210 sctp_error_event(sctp, 4211 ch, B_FALSE); 4212 sctp_assoc_event(sctp, 4213 SCTP_COMM_LOST, 0, NULL); 4214 sctp_clean_death(sctp, 4215 ECONNREFUSED); 4216 goto done; 4217 } 4218 } 4219 break; 4220 } 4221 case CHUNK_HEARTBEAT: 4222 if (!hb_already) { 4223 sctp_return_heartbeat(sctp, ch, mp); 4224 hb_already = B_TRUE; 4225 } 4226 break; 4227 default: 4228 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4229 goto nomorechunks; 4230 } /* else skip and continue processing */ 4231 } /* switch (ch->sch_id) */ 4232 break; 4233 4234 case SCTPS_SHUTDOWN_ACK_SENT: 4235 switch (ch->sch_id) { 4236 case CHUNK_ABORT: 4237 /* Pass gathered wisdom to IP for keeping */ 4238 sctp_update_dce(sctp); 4239 sctp_process_abort(sctp, ch, 0); 4240 goto done; 4241 case CHUNK_SHUTDOWN_COMPLETE: 4242 BUMP_LOCAL(sctp->sctp_ibchunks); 4243 BUMP_MIB(&sctps->sctps_mib, sctpShutdowns); 4244 sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0, 4245 NULL); 4246 4247 /* Pass gathered wisdom to IP for keeping */ 4248 sctp_update_dce(sctp); 4249 sctp_clean_death(sctp, 0); 4250 goto done; 4251 case CHUNK_SHUTDOWN_ACK: 4252 sctp_shutdown_complete(sctp); 4253 BUMP_LOCAL(sctp->sctp_ibchunks); 4254 BUMP_MIB(&sctps->sctps_mib, sctpShutdowns); 4255 sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0, 4256 NULL); 4257 sctp_clean_death(sctp, 0); 4258 goto done; 4259 case CHUNK_COOKIE: 4260 (void) sctp_shutdown_received(sctp, NULL, 4261 B_TRUE, B_FALSE, fp); 4262 BUMP_LOCAL(sctp->sctp_ibchunks); 4263 break; 4264 case CHUNK_HEARTBEAT: 4265 if (!hb_already) { 4266 sctp_return_heartbeat(sctp, ch, mp); 4267 hb_already = B_TRUE; 4268 } 4269 break; 4270 default: 4271 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4272 goto nomorechunks; 4273 } /* else skip and continue processing */ 4274 break; 4275 } 4276 break; 4277 4278 case SCTPS_SHUTDOWN_RECEIVED: 4279 switch (ch->sch_id) { 4280 case CHUNK_SHUTDOWN: 4281 trysend = sctp_shutdown_received(sctp, ch, 4282 B_FALSE, B_FALSE, fp); 4283 break; 4284 case CHUNK_SACK: 4285 trysend = sctp_got_sack(sctp, ch); 4286 if (trysend < 0) { 4287 sctp_send_abort(sctp, sctph->sh_verf, 4288 0, NULL, 0, mp, 0, B_FALSE, ira); 4289 sctp_assoc_event(sctp, 4290 SCTP_COMM_LOST, 0, NULL); 4291 sctp_clean_death(sctp, 4292 ECONNABORTED); 4293 goto done; 4294 } 4295 break; 4296 case CHUNK_ABORT: 4297 sctp_process_abort(sctp, ch, ECONNRESET); 4298 goto done; 4299 case CHUNK_HEARTBEAT: 4300 if (!hb_already) { 4301 sctp_return_heartbeat(sctp, ch, mp); 4302 hb_already = B_TRUE; 4303 } 4304 break; 4305 default: 4306 if (sctp_strange_chunk(sctp, ch, fp) == 0) { 4307 goto nomorechunks; 4308 } /* else skip and continue processing */ 4309 break; 4310 } 4311 break; 4312 4313 default: 4314 /* 4315 * The only remaining states are SCTPS_IDLE and 4316 * SCTPS_BOUND, and we should not be getting here 4317 * for these. 4318 */ 4319 ASSERT(0); 4320 } /* switch (sctp->sctp_state) */ 4321 4322 ch = sctp_next_chunk(ch, &mlen); 4323 if (ch != NULL && !sctp_check_input(sctp, ch, mlen, 0)) 4324 goto done; 4325 } while (ch != NULL); 4326 4327 /* Finished processing all chunks in packet */ 4328 4329 nomorechunks: 4330 /* SACK if necessary */ 4331 if (gotdata) { 4332 boolean_t sack_sent; 4333 4334 (sctp->sctp_sack_toggle)++; 4335 sack_sent = sctp_sack(sctp, dups); 4336 dups = NULL; 4337 4338 /* If a SACK is sent, no need to restart the timer. */ 4339 if (!sack_sent && !sctp->sctp_ack_timer_running) { 4340 sctp->sctp_ack_timer_running = B_TRUE; 4341 sctp_timer(sctp, sctp->sctp_ack_mp, 4342 MSEC_TO_TICK(sctps->sctps_deferred_ack_interval)); 4343 } 4344 } 4345 4346 if (trysend) { 4347 sctp_output(sctp, UINT_MAX); 4348 if (sctp->sctp_cxmit_list != NULL) 4349 sctp_wput_asconf(sctp, NULL); 4350 } 4351 /* 4352 * If there is unsent data, make sure a timer is running, check 4353 * timer_mp, if sctp_closei_local() ran the timers may be free. 4354 */ 4355 if (sctp->sctp_unsent > 0 && !sctp->sctp_current->timer_running && 4356 sctp->sctp_current->timer_mp != NULL) { 4357 SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current, 4358 sctp->sctp_current->rto); 4359 } 4360 4361 done: 4362 if (dups != NULL) 4363 freeb(dups); 4364 freemsg(mp); 4365 4366 if (sctp->sctp_err_chunks != NULL) 4367 sctp_process_err(sctp); 4368 4369 if (wake_eager) { 4370 /* 4371 * sctp points to newly created control block, need to 4372 * release it before exiting. 4373 */ 4374 WAKE_SCTP(sctp); 4375 } 4376 } 4377 4378 /* 4379 * Some amount of data got removed from rx q. 4380 * Check if we should send a window update. 4381 * 4382 * Due to way sctp_rwnd updates are made, ULP can give reports out-of-order. 4383 * To keep from dropping incoming data due to this, we only update 4384 * sctp_rwnd when if it's larger than what we've reported to peer earlier. 4385 */ 4386 void 4387 sctp_recvd(sctp_t *sctp, int len) 4388 { 4389 int32_t old, new; 4390 sctp_stack_t *sctps = sctp->sctp_sctps; 4391 4392 ASSERT(sctp != NULL); 4393 RUN_SCTP(sctp); 4394 4395 if (len < sctp->sctp_rwnd) { 4396 WAKE_SCTP(sctp); 4397 return; 4398 } 4399 4400 old = sctp->sctp_rwnd - sctp->sctp_rxqueued; 4401 new = len - sctp->sctp_rxqueued; 4402 sctp->sctp_rwnd = len; 4403 4404 if (sctp->sctp_state >= SCTPS_ESTABLISHED && 4405 ((old <= new >> 1) || (old < sctp->sctp_mss))) { 4406 sctp->sctp_force_sack = 1; 4407 BUMP_MIB(&sctps->sctps_mib, sctpOutWinUpdate); 4408 (void) sctp_sack(sctp, NULL); 4409 } 4410 WAKE_SCTP(sctp); 4411 } 4412