xref: /illumos-gate/usr/src/uts/common/inet/sctp/sctp_input.c (revision 4e567b4443d7a1680a7319275e5288eef2c92319)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/systm.h>
29 #include <sys/stream.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #define	_SUN_TPI_VERSION 2
33 #include <sys/tihdr.h>
34 #include <sys/socket.h>
35 #include <sys/strsun.h>
36 #include <sys/strsubr.h>
37 
38 #include <netinet/in.h>
39 #include <netinet/ip6.h>
40 #include <netinet/tcp_seq.h>
41 #include <netinet/sctp.h>
42 
43 #include <inet/common.h>
44 #include <inet/ip.h>
45 #include <inet/ip_if.h>
46 #include <inet/ip6.h>
47 #include <inet/mib2.h>
48 #include <inet/ipclassifier.h>
49 #include <inet/ipp_common.h>
50 #include <inet/ipsec_impl.h>
51 #include <inet/sctp_ip.h>
52 
53 #include "sctp_impl.h"
54 #include "sctp_asconf.h"
55 #include "sctp_addr.h"
56 
57 static struct kmem_cache *sctp_kmem_set_cache;
58 
59 /*
60  * PR-SCTP comments.
61  *
62  * When we get a valid Forward TSN chunk, we check the fragment list for this
63  * SSN and preceeding SSNs free all them. Further, if this Forward TSN causes
64  * the next expected SSN to be present in the stream queue, we deliver any
65  * such stranded messages upstream. We also update the SACK info. appropriately.
66  * When checking for advancing the cumulative ack (in sctp_cumack()) we must
67  * check for abandoned chunks and messages. While traversing the tramsmit
68  * list if we come across an abandoned chunk, we can skip the message (i.e.
69  * take it out of the (re)transmit list) since this message, and hence this
70  * chunk, has been marked abandoned by sctp_rexmit(). If we come across an
71  * unsent chunk for a message this now abandoned we need to check if a
72  * Forward TSN needs to be sent, this could be a case where we deferred sending
73  * a Forward TSN in sctp_get_msg_to_send(). Further, after processing a
74  * SACK we check if the Advanced peer ack point can be moved ahead, i.e.
75  * if we can send a Forward TSN via sctp_check_abandoned_data().
76  */
77 void
78 sctp_free_set(sctp_set_t *s)
79 {
80 	sctp_set_t *p;
81 
82 	while (s) {
83 		p = s->next;
84 		kmem_cache_free(sctp_kmem_set_cache, s);
85 		s = p;
86 	}
87 }
88 
89 static void
90 sctp_ack_add(sctp_set_t **head, uint32_t tsn, int *num)
91 {
92 	sctp_set_t *p, *t;
93 
94 	if (head == NULL || num == NULL)
95 		return;
96 
97 	ASSERT(*num >= 0);
98 	ASSERT((*num == 0 && *head == NULL) || (*num > 0 && *head != NULL));
99 
100 	if (*head == NULL) {
101 		*head = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
102 		if (*head == NULL)
103 			return;
104 		(*head)->prev = (*head)->next = NULL;
105 		(*head)->begin = tsn;
106 		(*head)->end = tsn;
107 		*num = 1;
108 		return;
109 	}
110 
111 	ASSERT((*head)->prev == NULL);
112 
113 	/*
114 	 * Handle this special case here so we don't have to check
115 	 * for it each time in the loop.
116 	 */
117 	if (SEQ_LT(tsn + 1, (*head)->begin)) {
118 		/* add a new set, and move the head pointer */
119 		t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
120 		if (t == NULL)
121 			return;
122 		t->next = *head;
123 		t->prev = NULL;
124 		(*head)->prev = t;
125 		t->begin = tsn;
126 		t->end = tsn;
127 		(*num)++;
128 		*head = t;
129 		return;
130 	}
131 
132 	/*
133 	 * We need to handle the following cases, where p points to
134 	 * the current set (as we walk through the loop):
135 	 *
136 	 * 1. tsn is entirely less than p; create a new set before p.
137 	 * 2. tsn borders p from less; coalesce p with tsn.
138 	 * 3. tsn is withing p; do nothing.
139 	 * 4. tsn borders p from greater; coalesce p with tsn.
140 	 * 4a. p may now border p->next from less; if so, coalesce those
141 	 *    two sets.
142 	 * 5. tsn is entirely greater then all sets; add a new set at
143 	 *    the end.
144 	 */
145 	for (p = *head; ; p = p->next) {
146 		if (SEQ_LT(tsn + 1, p->begin)) {
147 			/* 1: add a new set before p. */
148 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
149 			if (t == NULL)
150 				return;
151 			t->next = p;
152 			t->prev = NULL;
153 			t->begin = tsn;
154 			t->end = tsn;
155 			if (p->prev) {
156 				t->prev = p->prev;
157 				p->prev->next = t;
158 			}
159 			p->prev = t;
160 			(*num)++;
161 			return;
162 		}
163 
164 		if ((tsn + 1) == p->begin) {
165 			/* 2: adjust p->begin */
166 			p->begin = tsn;
167 			return;
168 		}
169 
170 		if (SEQ_GEQ(tsn, p->begin) && SEQ_LEQ(tsn, p->end)) {
171 			/* 3; do nothing */
172 			return;
173 		}
174 
175 		if ((p->end + 1) == tsn) {
176 			/* 4; adjust p->end */
177 			p->end = tsn;
178 
179 			if (p->next != NULL && (tsn + 1) == p->next->begin) {
180 				/* 4a: coalesce p and p->next */
181 				t = p->next;
182 				p->end = t->end;
183 				p->next = t->next;
184 				if (t->next != NULL)
185 					t->next->prev = p;
186 				kmem_cache_free(sctp_kmem_set_cache, t);
187 				(*num)--;
188 			}
189 			return;
190 		}
191 
192 		if (p->next == NULL) {
193 			/* 5: add new set at the end */
194 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
195 			if (t == NULL)
196 				return;
197 			t->next = NULL;
198 			t->prev = p;
199 			t->begin = tsn;
200 			t->end = tsn;
201 			p->next = t;
202 			(*num)++;
203 			return;
204 		}
205 
206 		if (SEQ_GT(tsn, p->end + 1))
207 			continue;
208 	}
209 }
210 
211 static void
212 sctp_ack_rem(sctp_set_t **head, uint32_t end, int *num)
213 {
214 	sctp_set_t *p, *t;
215 
216 	if (head == NULL || *head == NULL || num == NULL)
217 		return;
218 
219 	/* Nothing to remove */
220 	if (SEQ_LT(end, (*head)->begin))
221 		return;
222 
223 	/* Find out where to start removing sets */
224 	for (p = *head; p->next; p = p->next) {
225 		if (SEQ_LEQ(end, p->end))
226 			break;
227 	}
228 
229 	if (SEQ_LT(end, p->end) && SEQ_GEQ(end, p->begin)) {
230 		/* adjust p */
231 		p->begin = end + 1;
232 		/* all done */
233 		if (p == *head)
234 			return;
235 	} else if (SEQ_GEQ(end, p->end)) {
236 		/* remove this set too */
237 		p = p->next;
238 	}
239 
240 	/* unlink everything before this set */
241 	t = *head;
242 	*head = p;
243 	if (p != NULL && p->prev != NULL) {
244 		p->prev->next = NULL;
245 		p->prev = NULL;
246 	}
247 
248 	sctp_free_set(t);
249 
250 	/* recount the number of sets */
251 	*num = 0;
252 
253 	for (p = *head; p != NULL; p = p->next)
254 		(*num)++;
255 }
256 
257 void
258 sctp_sets_init()
259 {
260 	sctp_kmem_set_cache = kmem_cache_create("sctp_set_cache",
261 	    sizeof (sctp_set_t), 0, NULL, NULL, NULL, NULL,
262 	    NULL, 0);
263 }
264 
265 void
266 sctp_sets_fini()
267 {
268 	kmem_cache_destroy(sctp_kmem_set_cache);
269 }
270 
271 sctp_chunk_hdr_t *
272 sctp_first_chunk(uchar_t *rptr, ssize_t remaining)
273 {
274 	sctp_chunk_hdr_t *ch;
275 	uint16_t ch_len;
276 
277 	if (remaining < sizeof (*ch)) {
278 		return (NULL);
279 	}
280 
281 	ch = (sctp_chunk_hdr_t *)rptr;
282 	ch_len = ntohs(ch->sch_len);
283 
284 	if (ch_len < sizeof (*ch) || remaining < ch_len) {
285 		return (NULL);
286 	}
287 
288 	return (ch);
289 }
290 
291 sctp_chunk_hdr_t *
292 sctp_next_chunk(sctp_chunk_hdr_t *ch, ssize_t *remaining)
293 {
294 	int pad;
295 	uint16_t ch_len;
296 
297 	if (!ch) {
298 		return (NULL);
299 	}
300 
301 	ch_len = ntohs(ch->sch_len);
302 
303 	if ((pad = ch_len & (SCTP_ALIGN - 1)) != 0) {
304 		pad = SCTP_ALIGN - pad;
305 	}
306 
307 	*remaining -= (ch_len + pad);
308 	ch = (sctp_chunk_hdr_t *)((char *)ch + ch_len + pad);
309 
310 	return (sctp_first_chunk((uchar_t *)ch, *remaining));
311 }
312 
313 /*
314  * Attach ancillary data to a received SCTP segments.
315  * If the source address (fp) is not the primary, send up a
316  * unitdata_ind so recvfrom() can populate the msg_name field.
317  * If ancillary data is also requested, we append it to the
318  * unitdata_req. Otherwise, we just send up an optdata_ind.
319  */
320 static int
321 sctp_input_add_ancillary(sctp_t *sctp, mblk_t **mp, sctp_data_hdr_t *dcp,
322     sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira)
323 {
324 	struct T_unitdata_ind	*tudi;
325 	int			optlen;
326 	int			hdrlen;
327 	uchar_t			*optptr;
328 	struct cmsghdr		*cmsg;
329 	mblk_t			*mp1;
330 	struct sockaddr_in6	sin_buf[1];
331 	struct sockaddr_in6	*sin6;
332 	struct sockaddr_in	*sin4;
333 	crb_t			 addflag;	/* Which pieces to add */
334 	conn_t			*connp = sctp->sctp_connp;
335 
336 	sin4 = NULL;
337 	sin6 = NULL;
338 
339 	optlen = hdrlen = 0;
340 	addflag.crb_all = 0;
341 
342 	/* Figure out address size */
343 	if (connp->conn_family == AF_INET) {
344 		sin4 = (struct sockaddr_in *)sin_buf;
345 		sin4->sin_family = AF_INET;
346 		sin4->sin_port = connp->conn_fport;
347 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr);
348 		hdrlen = sizeof (*tudi) + sizeof (*sin4);
349 	} else {
350 		sin6 = sin_buf;
351 		sin6->sin6_family = AF_INET6;
352 		sin6->sin6_port = connp->conn_fport;
353 		sin6->sin6_addr = fp->faddr;
354 		hdrlen = sizeof (*tudi) + sizeof (*sin6);
355 	}
356 	/* If app asked to receive send / recv info */
357 	if (sctp->sctp_recvsndrcvinfo)
358 		optlen += sizeof (*cmsg) + sizeof (struct sctp_sndrcvinfo);
359 
360 	if (connp->conn_recv_ancillary.crb_all == 0)
361 		goto noancillary;
362 
363 	if (connp->conn_recv_ancillary.crb_ip_recvpktinfo &&
364 	    ira->ira_ruifindex != sctp->sctp_recvifindex) {
365 		optlen += sizeof (*cmsg) + sizeof (struct in6_pktinfo);
366 		if (hdrlen == 0)
367 			hdrlen = sizeof (struct T_unitdata_ind);
368 		addflag.crb_ip_recvpktinfo = 1;
369 	}
370 	/* If app asked for hoplimit and it has changed ... */
371 	if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit &&
372 	    ipp->ipp_hoplimit != sctp->sctp_recvhops) {
373 		optlen += sizeof (*cmsg) + sizeof (uint_t);
374 		if (hdrlen == 0)
375 			hdrlen = sizeof (struct T_unitdata_ind);
376 		addflag.crb_ipv6_recvhoplimit = 1;
377 	}
378 	/* If app asked for tclass and it has changed ... */
379 	if (connp->conn_recv_ancillary.crb_ipv6_recvtclass &&
380 	    ipp->ipp_tclass != sctp->sctp_recvtclass) {
381 		optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
382 		if (hdrlen == 0)
383 			hdrlen = sizeof (struct T_unitdata_ind);
384 		addflag.crb_ipv6_recvtclass = 1;
385 	}
386 	/* If app asked for hopbyhop headers and it has changed ... */
387 	if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts &&
388 	    ip_cmpbuf(sctp->sctp_hopopts, sctp->sctp_hopoptslen,
389 	    (ipp->ipp_fields & IPPF_HOPOPTS),
390 	    ipp->ipp_hopopts, ipp->ipp_hopoptslen)) {
391 		optlen += sizeof (*cmsg) + ipp->ipp_hopoptslen -
392 		    sctp->sctp_v6label_len;
393 		if (hdrlen == 0)
394 			hdrlen = sizeof (struct T_unitdata_ind);
395 		addflag.crb_ipv6_recvhopopts = 1;
396 		if (!ip_allocbuf((void **)&sctp->sctp_hopopts,
397 		    &sctp->sctp_hopoptslen,
398 		    (ipp->ipp_fields & IPPF_HOPOPTS),
399 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen))
400 			return (-1);
401 	}
402 	/* If app asked for dst headers before routing headers ... */
403 	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts &&
404 	    ip_cmpbuf(sctp->sctp_rthdrdstopts, sctp->sctp_rthdrdstoptslen,
405 	    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
406 	    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) {
407 		optlen += sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen;
408 		if (hdrlen == 0)
409 			hdrlen = sizeof (struct T_unitdata_ind);
410 		addflag.crb_ipv6_recvrthdrdstopts = 1;
411 		if (!ip_allocbuf((void **)&sctp->sctp_rthdrdstopts,
412 		    &sctp->sctp_rthdrdstoptslen,
413 		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
414 		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen))
415 			return (-1);
416 	}
417 	/* If app asked for routing headers and it has changed ... */
418 	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr &&
419 	    ip_cmpbuf(sctp->sctp_rthdr, sctp->sctp_rthdrlen,
420 	    (ipp->ipp_fields & IPPF_RTHDR),
421 	    ipp->ipp_rthdr, ipp->ipp_rthdrlen)) {
422 		optlen += sizeof (*cmsg) + ipp->ipp_rthdrlen;
423 		if (hdrlen == 0)
424 			hdrlen = sizeof (struct T_unitdata_ind);
425 		addflag.crb_ipv6_recvrthdr = 1;
426 		if (!ip_allocbuf((void **)&sctp->sctp_rthdr,
427 		    &sctp->sctp_rthdrlen,
428 		    (ipp->ipp_fields & IPPF_RTHDR),
429 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen))
430 			return (-1);
431 	}
432 	/* If app asked for dest headers and it has changed ... */
433 	if (connp->conn_recv_ancillary.crb_ipv6_recvdstopts &&
434 	    ip_cmpbuf(sctp->sctp_dstopts, sctp->sctp_dstoptslen,
435 	    (ipp->ipp_fields & IPPF_DSTOPTS),
436 	    ipp->ipp_dstopts, ipp->ipp_dstoptslen)) {
437 		optlen += sizeof (*cmsg) + ipp->ipp_dstoptslen;
438 		if (hdrlen == 0)
439 			hdrlen = sizeof (struct T_unitdata_ind);
440 		addflag.crb_ipv6_recvdstopts = 1;
441 		if (!ip_allocbuf((void **)&sctp->sctp_dstopts,
442 		    &sctp->sctp_dstoptslen,
443 		    (ipp->ipp_fields & IPPF_DSTOPTS),
444 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen))
445 			return (-1);
446 	}
447 noancillary:
448 	/* Nothing to add */
449 	if (hdrlen == 0)
450 		return (-1);
451 
452 	mp1 = allocb(hdrlen + optlen + sizeof (void *), BPRI_MED);
453 	if (mp1 == NULL)
454 		return (-1);
455 	mp1->b_cont = *mp;
456 	*mp = mp1;
457 	mp1->b_rptr += sizeof (void *);  /* pointer worth of padding */
458 	mp1->b_wptr = mp1->b_rptr + hdrlen + optlen;
459 	DB_TYPE(mp1) = M_PROTO;
460 	tudi = (struct T_unitdata_ind *)mp1->b_rptr;
461 	tudi->PRIM_type = T_UNITDATA_IND;
462 	tudi->SRC_length = sin4 ? sizeof (*sin4) : sizeof (*sin6);
463 	tudi->SRC_offset = sizeof (*tudi);
464 	tudi->OPT_offset = sizeof (*tudi) + tudi->SRC_length;
465 	tudi->OPT_length = optlen;
466 	if (sin4) {
467 		bcopy(sin4, tudi + 1, sizeof (*sin4));
468 	} else {
469 		bcopy(sin6, tudi + 1, sizeof (*sin6));
470 	}
471 	optptr = (uchar_t *)tudi + tudi->OPT_offset;
472 
473 	if (sctp->sctp_recvsndrcvinfo) {
474 		/* XXX need backout method if memory allocation fails. */
475 		struct sctp_sndrcvinfo *sri;
476 
477 		cmsg = (struct cmsghdr *)optptr;
478 		cmsg->cmsg_level = IPPROTO_SCTP;
479 		cmsg->cmsg_type = SCTP_SNDRCV;
480 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*sri);
481 		optptr += sizeof (*cmsg);
482 
483 		sri = (struct sctp_sndrcvinfo *)(cmsg + 1);
484 		ASSERT(OK_32PTR(sri));
485 		sri->sinfo_stream = ntohs(dcp->sdh_sid);
486 		sri->sinfo_ssn = ntohs(dcp->sdh_ssn);
487 		if (SCTP_DATA_GET_UBIT(dcp)) {
488 			sri->sinfo_flags = MSG_UNORDERED;
489 		} else {
490 			sri->sinfo_flags = 0;
491 		}
492 		sri->sinfo_ppid = dcp->sdh_payload_id;
493 		sri->sinfo_context = 0;
494 		sri->sinfo_timetolive = 0;
495 		sri->sinfo_tsn = ntohl(dcp->sdh_tsn);
496 		sri->sinfo_cumtsn = sctp->sctp_ftsn;
497 		sri->sinfo_assoc_id = 0;
498 
499 		optptr += sizeof (*sri);
500 	}
501 
502 	/*
503 	 * If app asked for pktinfo and the index has changed ...
504 	 * Note that the local address never changes for the connection.
505 	 */
506 	if (addflag.crb_ip_recvpktinfo) {
507 		struct in6_pktinfo *pkti;
508 		uint_t ifindex;
509 
510 		ifindex = ira->ira_ruifindex;
511 		cmsg = (struct cmsghdr *)optptr;
512 		cmsg->cmsg_level = IPPROTO_IPV6;
513 		cmsg->cmsg_type = IPV6_PKTINFO;
514 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*pkti);
515 		optptr += sizeof (*cmsg);
516 
517 		pkti = (struct in6_pktinfo *)optptr;
518 		if (connp->conn_family == AF_INET6)
519 			pkti->ipi6_addr = sctp->sctp_ip6h->ip6_src;
520 		else
521 			IN6_IPADDR_TO_V4MAPPED(sctp->sctp_ipha->ipha_src,
522 			    &pkti->ipi6_addr);
523 
524 		pkti->ipi6_ifindex = ifindex;
525 		optptr += sizeof (*pkti);
526 		ASSERT(OK_32PTR(optptr));
527 		/* Save as "last" value */
528 		sctp->sctp_recvifindex = ifindex;
529 	}
530 	/* If app asked for hoplimit and it has changed ... */
531 	if (addflag.crb_ipv6_recvhoplimit) {
532 		cmsg = (struct cmsghdr *)optptr;
533 		cmsg->cmsg_level = IPPROTO_IPV6;
534 		cmsg->cmsg_type = IPV6_HOPLIMIT;
535 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t);
536 		optptr += sizeof (*cmsg);
537 
538 		*(uint_t *)optptr = ipp->ipp_hoplimit;
539 		optptr += sizeof (uint_t);
540 		ASSERT(OK_32PTR(optptr));
541 		/* Save as "last" value */
542 		sctp->sctp_recvhops = ipp->ipp_hoplimit;
543 	}
544 	/* If app asked for tclass and it has changed ... */
545 	if (addflag.crb_ipv6_recvtclass) {
546 		cmsg = (struct cmsghdr *)optptr;
547 		cmsg->cmsg_level = IPPROTO_IPV6;
548 		cmsg->cmsg_type = IPV6_TCLASS;
549 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t);
550 		optptr += sizeof (*cmsg);
551 
552 		*(uint_t *)optptr = ipp->ipp_tclass;
553 		optptr += sizeof (uint_t);
554 		ASSERT(OK_32PTR(optptr));
555 		/* Save as "last" value */
556 		sctp->sctp_recvtclass = ipp->ipp_tclass;
557 	}
558 	if (addflag.crb_ipv6_recvhopopts) {
559 		cmsg = (struct cmsghdr *)optptr;
560 		cmsg->cmsg_level = IPPROTO_IPV6;
561 		cmsg->cmsg_type = IPV6_HOPOPTS;
562 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_hopoptslen;
563 		optptr += sizeof (*cmsg);
564 
565 		bcopy(ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen);
566 		optptr += ipp->ipp_hopoptslen;
567 		ASSERT(OK_32PTR(optptr));
568 		/* Save as last value */
569 		ip_savebuf((void **)&sctp->sctp_hopopts,
570 		    &sctp->sctp_hopoptslen,
571 		    (ipp->ipp_fields & IPPF_HOPOPTS),
572 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen);
573 	}
574 	if (addflag.crb_ipv6_recvrthdrdstopts) {
575 		cmsg = (struct cmsghdr *)optptr;
576 		cmsg->cmsg_level = IPPROTO_IPV6;
577 		cmsg->cmsg_type = IPV6_RTHDRDSTOPTS;
578 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen;
579 		optptr += sizeof (*cmsg);
580 
581 		bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen);
582 		optptr += ipp->ipp_rthdrdstoptslen;
583 		ASSERT(OK_32PTR(optptr));
584 		/* Save as last value */
585 		ip_savebuf((void **)&sctp->sctp_rthdrdstopts,
586 		    &sctp->sctp_rthdrdstoptslen,
587 		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
588 		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
589 	}
590 	if (addflag.crb_ipv6_recvrthdr) {
591 		cmsg = (struct cmsghdr *)optptr;
592 		cmsg->cmsg_level = IPPROTO_IPV6;
593 		cmsg->cmsg_type = IPV6_RTHDR;
594 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrlen;
595 		optptr += sizeof (*cmsg);
596 
597 		bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen);
598 		optptr += ipp->ipp_rthdrlen;
599 		ASSERT(OK_32PTR(optptr));
600 		/* Save as last value */
601 		ip_savebuf((void **)&sctp->sctp_rthdr,
602 		    &sctp->sctp_rthdrlen,
603 		    (ipp->ipp_fields & IPPF_RTHDR),
604 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen);
605 	}
606 	if (addflag.crb_ipv6_recvdstopts) {
607 		cmsg = (struct cmsghdr *)optptr;
608 		cmsg->cmsg_level = IPPROTO_IPV6;
609 		cmsg->cmsg_type = IPV6_DSTOPTS;
610 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_dstoptslen;
611 		optptr += sizeof (*cmsg);
612 
613 		bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen);
614 		optptr += ipp->ipp_dstoptslen;
615 		ASSERT(OK_32PTR(optptr));
616 		/* Save as last value */
617 		ip_savebuf((void **)&sctp->sctp_dstopts,
618 		    &sctp->sctp_dstoptslen,
619 		    (ipp->ipp_fields & IPPF_DSTOPTS),
620 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen);
621 	}
622 
623 	ASSERT(optptr == mp1->b_wptr);
624 
625 	return (0);
626 }
627 
628 void
629 sctp_free_reass(sctp_instr_t *sip)
630 {
631 	mblk_t *mp, *mpnext, *mctl;
632 
633 	for (mp = sip->istr_reass; mp != NULL; mp = mpnext) {
634 		mpnext = mp->b_next;
635 		mp->b_next = NULL;
636 		mp->b_prev = NULL;
637 		if (DB_TYPE(mp) == M_CTL) {
638 			mctl = mp;
639 			ASSERT(mp->b_cont != NULL);
640 			mp = mp->b_cont;
641 			mctl->b_cont = NULL;
642 			freeb(mctl);
643 		}
644 		freemsg(mp);
645 	}
646 	sip->istr_reass = NULL;
647 }
648 
649 /*
650  * If the series of data fragments of which dmp is a part is successfully
651  * reassembled, the first mblk in the series is returned. dc is adjusted
652  * to point at the data chunk in the lead mblk, and b_rptr also points to
653  * the data chunk; the following mblk's b_rptr's point at the actual payload.
654  *
655  * If the series is not yet reassembled, NULL is returned. dc is not changed.
656  * XXX should probably move this up into the state machine.
657  */
658 
659 /* Fragment list for un-ordered messages. Partial delivery is not supported */
660 static mblk_t *
661 sctp_uodata_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc)
662 {
663 	mblk_t		*hmp;
664 	mblk_t		*begin = NULL;
665 	mblk_t		*end = NULL;
666 	sctp_data_hdr_t	*qdc;
667 	uint32_t	ntsn;
668 	uint32_t	tsn = ntohl((*dc)->sdh_tsn);
669 #ifdef	DEBUG
670 	mblk_t		*mp1;
671 #endif
672 
673 	/* First frag. */
674 	if (sctp->sctp_uo_frags == NULL) {
675 		sctp->sctp_uo_frags = dmp;
676 		return (NULL);
677 	}
678 	hmp = sctp->sctp_uo_frags;
679 	/*
680 	 * Insert the segment according to the TSN, fragmented unordered
681 	 * chunks are sequenced by TSN.
682 	 */
683 	while (hmp != NULL) {
684 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
685 		ntsn = ntohl(qdc->sdh_tsn);
686 		if (SEQ_GT(ntsn, tsn)) {
687 			if (hmp->b_prev == NULL) {
688 				dmp->b_next = hmp;
689 				hmp->b_prev = dmp;
690 				sctp->sctp_uo_frags = dmp;
691 			} else {
692 				dmp->b_next = hmp;
693 				dmp->b_prev = hmp->b_prev;
694 				hmp->b_prev->b_next = dmp;
695 				hmp->b_prev = dmp;
696 			}
697 			break;
698 		}
699 		if (hmp->b_next == NULL) {
700 			hmp->b_next = dmp;
701 			dmp->b_prev = hmp;
702 			break;
703 		}
704 		hmp = hmp->b_next;
705 	}
706 	/* check if we completed a msg */
707 	if (SCTP_DATA_GET_BBIT(*dc)) {
708 		begin = dmp;
709 	} else if (SCTP_DATA_GET_EBIT(*dc)) {
710 		end = dmp;
711 	}
712 	/*
713 	 * We walk consecutive TSNs backwards till we get a seg. with
714 	 * the B bit
715 	 */
716 	if (begin == NULL) {
717 		for (hmp = dmp->b_prev; hmp != NULL; hmp = hmp->b_prev) {
718 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
719 			ntsn = ntohl(qdc->sdh_tsn);
720 			if ((int32_t)(tsn - ntsn) > 1) {
721 				return (NULL);
722 			}
723 			if (SCTP_DATA_GET_BBIT(qdc)) {
724 				begin = hmp;
725 				break;
726 			}
727 			tsn = ntsn;
728 		}
729 	}
730 	tsn = ntohl((*dc)->sdh_tsn);
731 	/*
732 	 * We walk consecutive TSNs till we get a seg. with the E bit
733 	 */
734 	if (end == NULL) {
735 		for (hmp = dmp->b_next; hmp != NULL; hmp = hmp->b_next) {
736 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
737 			ntsn = ntohl(qdc->sdh_tsn);
738 			if ((int32_t)(ntsn - tsn) > 1) {
739 				return (NULL);
740 			}
741 			if (SCTP_DATA_GET_EBIT(qdc)) {
742 				end = hmp;
743 				break;
744 			}
745 			tsn = ntsn;
746 		}
747 	}
748 	if (begin == NULL || end == NULL) {
749 		return (NULL);
750 	}
751 	/* Got one!, Remove the msg from the list */
752 	if (sctp->sctp_uo_frags == begin) {
753 		ASSERT(begin->b_prev == NULL);
754 		sctp->sctp_uo_frags = end->b_next;
755 		if (end->b_next != NULL)
756 			end->b_next->b_prev = NULL;
757 	} else {
758 		begin->b_prev->b_next = end->b_next;
759 		if (end->b_next != NULL)
760 			end->b_next->b_prev = begin->b_prev;
761 	}
762 	begin->b_prev = NULL;
763 	end->b_next = NULL;
764 
765 	/*
766 	 * Null out b_next and b_prev and chain using b_cont.
767 	 */
768 	dmp = end = begin;
769 	hmp = begin->b_next;
770 	*dc = (sctp_data_hdr_t *)begin->b_rptr;
771 	begin->b_next = NULL;
772 	while (hmp != NULL) {
773 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
774 		hmp->b_rptr = (uchar_t *)(qdc + 1);
775 		end = hmp->b_next;
776 		dmp->b_cont = hmp;
777 		dmp = hmp;
778 
779 		if (end != NULL)
780 			hmp->b_next = NULL;
781 		hmp->b_prev = NULL;
782 		hmp = end;
783 	}
784 	BUMP_LOCAL(sctp->sctp_reassmsgs);
785 #ifdef	DEBUG
786 	mp1 = begin;
787 	while (mp1 != NULL) {
788 		ASSERT(mp1->b_next == NULL);
789 		ASSERT(mp1->b_prev == NULL);
790 		mp1 = mp1->b_cont;
791 	}
792 #endif
793 	return (begin);
794 }
795 
796 /*
797  * Try partial delivery.
798  */
799 static mblk_t *
800 sctp_try_partial_delivery(sctp_t *sctp, mblk_t *hmp, sctp_reass_t *srp,
801     sctp_data_hdr_t **dc)
802 {
803 	mblk_t		*mp;
804 	mblk_t		*dmp;
805 	mblk_t		*qmp;
806 	mblk_t		*prev;
807 	sctp_data_hdr_t	*qdc;
808 	uint32_t	tsn;
809 
810 	ASSERT(DB_TYPE(hmp) == M_CTL);
811 
812 	dprint(4, ("trypartial: got=%d, needed=%d\n",
813 	    (int)(srp->got), (int)(srp->needed)));
814 
815 	mp = hmp->b_cont;
816 	qdc = (sctp_data_hdr_t *)mp->b_rptr;
817 
818 	ASSERT(SCTP_DATA_GET_BBIT(qdc) && srp->hasBchunk);
819 
820 	tsn = ntohl(qdc->sdh_tsn) + 1;
821 
822 	/*
823 	 * This loop has two exit conditions: the
824 	 * end of received chunks has been reached, or
825 	 * there is a break in the sequence. We want
826 	 * to chop the reassembly list as follows (the
827 	 * numbers are TSNs):
828 	 *   10 -> 11 -> 	(end of chunks)
829 	 *   10 -> 11 -> | 13   (break in sequence)
830 	 */
831 	prev = mp;
832 	mp = mp->b_cont;
833 	while (mp != NULL) {
834 		qdc = (sctp_data_hdr_t *)mp->b_rptr;
835 		if (ntohl(qdc->sdh_tsn) != tsn)
836 			break;
837 		prev = mp;
838 		mp = mp->b_cont;
839 		tsn++;
840 	}
841 	/*
842 	 * We are sending all the fragments upstream, we have to retain
843 	 * the srp info for further fragments.
844 	 */
845 	if (mp == NULL) {
846 		dmp = hmp->b_cont;
847 		hmp->b_cont = NULL;
848 		srp->nexttsn = tsn;
849 		srp->msglen = 0;
850 		srp->needed = 0;
851 		srp->got = 0;
852 		srp->partial_delivered = B_TRUE;
853 		srp->tail = NULL;
854 	} else {
855 		dmp = hmp->b_cont;
856 		hmp->b_cont = mp;
857 	}
858 	srp->hasBchunk = B_FALSE;
859 	/*
860 	 * mp now points at the last chunk in the sequence,
861 	 * and prev points to mp's previous in the list.
862 	 * We chop the list at prev, and convert mp into the
863 	 * new list head by setting the B bit. Subsequence
864 	 * fragment deliveries will follow the normal reassembly
865 	 * path.
866 	 */
867 	prev->b_cont = NULL;
868 	srp->partial_delivered = B_TRUE;
869 
870 	dprint(4, ("trypartial: got some, got=%d, needed=%d\n",
871 	    (int)(srp->got), (int)(srp->needed)));
872 
873 	/*
874 	 * Adjust all mblk's except the lead so their rptr's point to the
875 	 * payload. sctp_data_chunk() will need to process the lead's
876 	 * data chunk section, so leave it's rptr pointing at the data chunk.
877 	 */
878 	*dc = (sctp_data_hdr_t *)dmp->b_rptr;
879 	if (srp->tail != NULL) {
880 		srp->got--;
881 		ASSERT(srp->got != 0);
882 		if (srp->needed != 0) {
883 			srp->needed--;
884 			ASSERT(srp->needed != 0);
885 		}
886 		srp->msglen -= ntohs((*dc)->sdh_len);
887 	}
888 	for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) {
889 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
890 		qmp->b_rptr = (uchar_t *)(qdc + 1);
891 
892 		/*
893 		 * Deduct the balance from got and needed here, now that
894 		 * we know we are actually delivering these data.
895 		 */
896 		if (srp->tail != NULL) {
897 			srp->got--;
898 			ASSERT(srp->got != 0);
899 			if (srp->needed != 0) {
900 				srp->needed--;
901 				ASSERT(srp->needed != 0);
902 			}
903 			srp->msglen -= ntohs(qdc->sdh_len);
904 		}
905 	}
906 	ASSERT(srp->msglen == 0);
907 	BUMP_LOCAL(sctp->sctp_reassmsgs);
908 
909 	return (dmp);
910 }
911 
912 /*
913  * Fragment list for ordered messages.
914  * If no error occures, error is set to 0. If we run out of memory, error
915  * is set to 1. If the peer commits a fatal error (like using different
916  * sequence numbers for the same data fragment series), the association is
917  * aborted and error is set to 2. tpfinished indicates whether we have
918  * assembled a complete message, this is used in sctp_data_chunk() to
919  * see if we can try to send any queued message for this stream.
920  */
921 static mblk_t *
922 sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error,
923     sctp_instr_t *sip, boolean_t *tpfinished)
924 {
925 	mblk_t		*hmp;
926 	mblk_t		*pmp;
927 	mblk_t		*qmp;
928 	mblk_t		*first_mp;
929 	sctp_reass_t	*srp;
930 	sctp_data_hdr_t	*qdc;
931 	sctp_data_hdr_t	*bdc;
932 	sctp_data_hdr_t	*edc;
933 	uint32_t	tsn;
934 	uint16_t	fraglen = 0;
935 
936 	*error = 0;
937 
938 	/* find the reassembly queue for this data chunk */
939 	hmp = qmp = sip->istr_reass;
940 	for (; hmp != NULL; hmp = hmp->b_next) {
941 		srp = (sctp_reass_t *)DB_BASE(hmp);
942 		if (ntohs((*dc)->sdh_ssn) == srp->ssn)
943 			goto foundit;
944 		else if (SSN_GT(srp->ssn, ntohs((*dc)->sdh_ssn)))
945 			break;
946 		qmp = hmp;
947 	}
948 
949 	/*
950 	 * Allocate a M_CTL that will contain information about this
951 	 * fragmented message.
952 	 */
953 	if ((pmp = allocb(sizeof (*srp), BPRI_MED)) == NULL) {
954 		*error = 1;
955 		return (NULL);
956 	}
957 	DB_TYPE(pmp) = M_CTL;
958 	srp = (sctp_reass_t *)DB_BASE(pmp);
959 	pmp->b_cont = dmp;
960 
961 	if (hmp != NULL) {
962 		if (sip->istr_reass == hmp) {
963 			sip->istr_reass = pmp;
964 			pmp->b_next = hmp;
965 			pmp->b_prev = NULL;
966 			hmp->b_prev = pmp;
967 		} else {
968 			qmp->b_next = pmp;
969 			pmp->b_prev = qmp;
970 			pmp->b_next = hmp;
971 			hmp->b_prev = pmp;
972 		}
973 	} else {
974 		/* make a new reass head and stick it on the end */
975 		if (sip->istr_reass == NULL) {
976 			sip->istr_reass = pmp;
977 			pmp->b_prev = NULL;
978 		} else {
979 			qmp->b_next = pmp;
980 			pmp->b_prev = qmp;
981 		}
982 		pmp->b_next = NULL;
983 	}
984 	srp->partial_delivered = B_FALSE;
985 	srp->ssn = ntohs((*dc)->sdh_ssn);
986 empty_srp:
987 	srp->needed = 0;
988 	srp->got = 1;
989 	srp->tail = dmp;
990 	if (SCTP_DATA_GET_BBIT(*dc)) {
991 		srp->msglen = ntohs((*dc)->sdh_len);
992 		srp->nexttsn = ntohl((*dc)->sdh_tsn) + 1;
993 		srp->hasBchunk = B_TRUE;
994 	} else if (srp->partial_delivered &&
995 	    srp->nexttsn == ntohl((*dc)->sdh_tsn)) {
996 		SCTP_DATA_SET_BBIT(*dc);
997 		/* Last fragment */
998 		if (SCTP_DATA_GET_EBIT(*dc)) {
999 			srp->needed = 1;
1000 			goto frag_done;
1001 		}
1002 		srp->hasBchunk = B_TRUE;
1003 		srp->msglen = ntohs((*dc)->sdh_len);
1004 		srp->nexttsn++;
1005 	}
1006 	return (NULL);
1007 foundit:
1008 	/*
1009 	 * else already have a reassembly queue. Insert the new data chunk
1010 	 * in the reassemble queue. Try the tail first, on the assumption
1011 	 * that the fragments are coming in in order.
1012 	 */
1013 	qmp = srp->tail;
1014 
1015 	/*
1016 	 * This means the message was partially delivered.
1017 	 */
1018 	if (qmp == NULL) {
1019 		ASSERT(srp->got == 0 && srp->needed == 0 &&
1020 		    srp->partial_delivered);
1021 		ASSERT(hmp->b_cont == NULL);
1022 		hmp->b_cont = dmp;
1023 		goto empty_srp;
1024 	}
1025 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1026 	ASSERT(qmp->b_cont == NULL);
1027 
1028 	/* XXXIs it fine to do this just here? */
1029 	if ((*dc)->sdh_sid != qdc->sdh_sid) {
1030 		/* our peer is fatally confused; XXX abort the assc */
1031 		*error = 2;
1032 		return (NULL);
1033 	}
1034 	if (SEQ_GT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1035 		qmp->b_cont = dmp;
1036 		srp->tail = dmp;
1037 		dmp->b_cont = NULL;
1038 		if (srp->hasBchunk && srp->nexttsn == ntohl((*dc)->sdh_tsn)) {
1039 			srp->msglen += ntohs((*dc)->sdh_len);
1040 			srp->nexttsn++;
1041 		}
1042 		goto inserted;
1043 	}
1044 
1045 	/* Next check for insertion at the beginning */
1046 	qmp = hmp->b_cont;
1047 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1048 	if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1049 		dmp->b_cont = qmp;
1050 		hmp->b_cont = dmp;
1051 		if (SCTP_DATA_GET_BBIT(*dc)) {
1052 			srp->hasBchunk = B_TRUE;
1053 			srp->nexttsn = ntohl((*dc)->sdh_tsn);
1054 		}
1055 		goto preinserted;
1056 	}
1057 
1058 	/* Insert somewhere in the middle */
1059 	for (;;) {
1060 		/* Tail check above should have caught this */
1061 		ASSERT(qmp->b_cont != NULL);
1062 
1063 		qdc = (sctp_data_hdr_t *)qmp->b_cont->b_rptr;
1064 		if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1065 			/* insert here */
1066 			dmp->b_cont = qmp->b_cont;
1067 			qmp->b_cont = dmp;
1068 			break;
1069 		}
1070 		qmp = qmp->b_cont;
1071 	}
1072 preinserted:
1073 	if (!srp->hasBchunk || ntohl((*dc)->sdh_tsn) != srp->nexttsn)
1074 		goto inserted;
1075 	/*
1076 	 * fraglen contains the length of consecutive chunks of fragments.
1077 	 * starting from the chunk inserted recently.
1078 	 */
1079 	tsn = srp->nexttsn;
1080 	for (qmp = dmp; qmp != NULL; qmp = qmp->b_cont) {
1081 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1082 		if (tsn != ntohl(qdc->sdh_tsn))
1083 			break;
1084 		fraglen += ntohs(qdc->sdh_len);
1085 		tsn++;
1086 	}
1087 	srp->nexttsn = tsn;
1088 	srp->msglen += fraglen;
1089 inserted:
1090 	srp->got++;
1091 	first_mp = hmp->b_cont;
1092 	if (srp->needed == 0) {
1093 		/* check if we have the first and last fragments */
1094 		bdc = (sctp_data_hdr_t *)first_mp->b_rptr;
1095 		edc = (sctp_data_hdr_t *)srp->tail->b_rptr;
1096 
1097 		/* calculate how many fragments are needed, if possible  */
1098 		if (SCTP_DATA_GET_BBIT(bdc) && SCTP_DATA_GET_EBIT(edc)) {
1099 			srp->needed = ntohl(edc->sdh_tsn) -
1100 			    ntohl(bdc->sdh_tsn) + 1;
1101 		}
1102 	}
1103 
1104 	/*
1105 	 * Try partial delivery if the message length has exceeded the
1106 	 * partial delivery point. Only do this if we can immediately
1107 	 * deliver the partially assembled message, and only partially
1108 	 * deliver one message at a time (i.e. messages cannot be
1109 	 * intermixed arriving at the upper layer). A simple way to
1110 	 * enforce this is to only try partial delivery if this TSN is
1111 	 * the next expected TSN. Partial Delivery not supported
1112 	 * for un-ordered message.
1113 	 */
1114 	if (srp->needed != srp->got) {
1115 		dmp = NULL;
1116 		if (ntohl((*dc)->sdh_tsn) == sctp->sctp_ftsn &&
1117 		    srp->msglen >= sctp->sctp_pd_point) {
1118 			dmp = sctp_try_partial_delivery(sctp, hmp, srp, dc);
1119 			*tpfinished = B_FALSE;
1120 		}
1121 		return (dmp);
1122 	}
1123 frag_done:
1124 	/*
1125 	 * else reassembly done; prepare the data for delivery.
1126 	 * First unlink hmp from the ssn list.
1127 	 */
1128 	if (sip->istr_reass == hmp) {
1129 		sip->istr_reass = hmp->b_next;
1130 		if (hmp->b_next)
1131 			hmp->b_next->b_prev = NULL;
1132 	} else {
1133 		ASSERT(hmp->b_prev != NULL);
1134 		hmp->b_prev->b_next = hmp->b_next;
1135 		if (hmp->b_next)
1136 			hmp->b_next->b_prev = hmp->b_prev;
1137 	}
1138 
1139 	/*
1140 	 * Using b_prev and b_next was a little sinful, but OK since
1141 	 * this mblk is never put*'d. However, freeb() will still
1142 	 * ASSERT that they are unused, so we need to NULL them out now.
1143 	 */
1144 	hmp->b_next = NULL;
1145 	hmp->b_prev = NULL;
1146 	dmp = hmp;
1147 	dmp = dmp->b_cont;
1148 	hmp->b_cont = NULL;
1149 	freeb(hmp);
1150 	*tpfinished = B_TRUE;
1151 
1152 	/*
1153 	 * Adjust all mblk's except the lead so their rptr's point to the
1154 	 * payload. sctp_data_chunk() will need to process the lead's
1155 	 * data chunk section, so leave it's rptr pointing at the data chunk.
1156 	 */
1157 	*dc = (sctp_data_hdr_t *)dmp->b_rptr;
1158 	for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) {
1159 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1160 		qmp->b_rptr = (uchar_t *)(qdc + 1);
1161 	}
1162 	BUMP_LOCAL(sctp->sctp_reassmsgs);
1163 
1164 	return (dmp);
1165 }
1166 static void
1167 sctp_add_dup(uint32_t tsn, mblk_t **dups)
1168 {
1169 	mblk_t *mp;
1170 	size_t bsize = SCTP_DUP_MBLK_SZ * sizeof (tsn);
1171 
1172 	if (dups == NULL) {
1173 		return;
1174 	}
1175 
1176 	/* first time? */
1177 	if (*dups == NULL) {
1178 		*dups = allocb(bsize, BPRI_MED);
1179 		if (*dups == NULL) {
1180 			return;
1181 		}
1182 	}
1183 
1184 	mp = *dups;
1185 	if ((mp->b_wptr - mp->b_rptr) >= bsize) {
1186 		/* maximum reached */
1187 		return;
1188 	}
1189 
1190 	/* add the duplicate tsn */
1191 	bcopy(&tsn, mp->b_wptr, sizeof (tsn));
1192 	mp->b_wptr += sizeof (tsn);
1193 	ASSERT((mp->b_wptr - mp->b_rptr) <= bsize);
1194 }
1195 
1196 static void
1197 sctp_data_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *mp, mblk_t **dups,
1198     sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira)
1199 {
1200 	sctp_data_hdr_t *dc;
1201 	mblk_t *dmp, *pmp;
1202 	sctp_instr_t *instr;
1203 	int ubit;
1204 	int isfrag;
1205 	uint16_t ssn;
1206 	uint32_t oftsn;
1207 	boolean_t can_deliver = B_TRUE;
1208 	uint32_t tsn;
1209 	int dlen;
1210 	boolean_t tpfinished = B_TRUE;
1211 	int32_t new_rwnd;
1212 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1213 	int	error;
1214 
1215 	/* The following are used multiple times, so we inline them */
1216 #define	SCTP_ACK_IT(sctp, tsn)						\
1217 	if (tsn == sctp->sctp_ftsn) {					\
1218 		dprint(2, ("data_chunk: acking next %x\n", tsn));	\
1219 		(sctp)->sctp_ftsn++;					\
1220 		if ((sctp)->sctp_sack_gaps > 0)				\
1221 			(sctp)->sctp_force_sack = 1;			\
1222 	} else if (SEQ_GT(tsn, sctp->sctp_ftsn)) {			\
1223 		/* Got a gap; record it */				\
1224 		BUMP_LOCAL(sctp->sctp_outseqtsns);			\
1225 		dprint(2, ("data_chunk: acking gap %x\n", tsn));	\
1226 		sctp_ack_add(&sctp->sctp_sack_info, tsn,		\
1227 		    &sctp->sctp_sack_gaps);				\
1228 		sctp->sctp_force_sack = 1;				\
1229 	}
1230 
1231 	dmp = NULL;
1232 
1233 	dc = (sctp_data_hdr_t *)ch;
1234 	tsn = ntohl(dc->sdh_tsn);
1235 
1236 	dprint(3, ("sctp_data_chunk: mp=%p tsn=%x\n", (void *)mp, tsn));
1237 
1238 	/* Check for duplicates */
1239 	if (SEQ_LT(tsn, sctp->sctp_ftsn)) {
1240 		dprint(4, ("sctp_data_chunk: dropping duplicate\n"));
1241 		BUMP_LOCAL(sctp->sctp_idupchunks);
1242 		sctp->sctp_force_sack = 1;
1243 		sctp_add_dup(dc->sdh_tsn, dups);
1244 		return;
1245 	}
1246 
1247 	if (sctp->sctp_sack_info != NULL) {
1248 		sctp_set_t *sp;
1249 
1250 		for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
1251 			if (SEQ_GEQ(tsn, sp->begin) && SEQ_LEQ(tsn, sp->end)) {
1252 				dprint(4,
1253 				    ("sctp_data_chunk: dropping dup > "
1254 				    "cumtsn\n"));
1255 				BUMP_LOCAL(sctp->sctp_idupchunks);
1256 				sctp->sctp_force_sack = 1;
1257 				sctp_add_dup(dc->sdh_tsn, dups);
1258 				return;
1259 			}
1260 		}
1261 	}
1262 
1263 	/* We cannot deliver anything up now but we still need to handle it. */
1264 	if (SCTP_IS_DETACHED(sctp)) {
1265 		BUMP_MIB(&sctps->sctps_mib, sctpInClosed);
1266 		can_deliver = B_FALSE;
1267 	}
1268 
1269 	dlen = ntohs(dc->sdh_len) - sizeof (*dc);
1270 
1271 	/*
1272 	 * Check for buffer space. Note if this is the next expected TSN
1273 	 * we have to take it to avoid deadlock because we cannot deliver
1274 	 * later queued TSNs and thus clear buffer space without it.
1275 	 * We drop anything that is purely zero window probe data here.
1276 	 */
1277 	if ((sctp->sctp_rwnd - sctp->sctp_rxqueued < dlen) &&
1278 	    (tsn != sctp->sctp_ftsn || sctp->sctp_rwnd == 0)) {
1279 		/* Drop and SACK, but don't advance the cumulative TSN. */
1280 		sctp->sctp_force_sack = 1;
1281 		dprint(0, ("sctp_data_chunk: exceed rwnd %d rxqueued %d "
1282 		    "dlen %d ssn %d tsn %x\n", sctp->sctp_rwnd,
1283 		    sctp->sctp_rxqueued, dlen, ntohs(dc->sdh_ssn),
1284 		    ntohl(dc->sdh_tsn)));
1285 		return;
1286 	}
1287 
1288 	if (ntohs(dc->sdh_sid) >= sctp->sctp_num_istr) {
1289 		sctp_bsc_t	inval_parm;
1290 
1291 		/* Will populate the CAUSE block in the ERROR chunk. */
1292 		inval_parm.bsc_sid = dc->sdh_sid;
1293 		/* RESERVED, ignored at the receiving end */
1294 		inval_parm.bsc_pad = 0;
1295 
1296 		/* ack and drop it */
1297 		sctp_add_err(sctp, SCTP_ERR_BAD_SID, (void *)&inval_parm,
1298 		    sizeof (sctp_bsc_t), fp);
1299 		SCTP_ACK_IT(sctp, tsn);
1300 		return;
1301 	}
1302 
1303 	ubit = SCTP_DATA_GET_UBIT(dc);
1304 	ASSERT(sctp->sctp_instr != NULL);
1305 	instr = &sctp->sctp_instr[ntohs(dc->sdh_sid)];
1306 	/* Initialize the stream, if not yet used */
1307 	if (instr->sctp == NULL)
1308 		instr->sctp = sctp;
1309 
1310 	isfrag = !(SCTP_DATA_GET_BBIT(dc) && SCTP_DATA_GET_EBIT(dc));
1311 	ssn = ntohs(dc->sdh_ssn);
1312 
1313 	dmp = dupb(mp);
1314 	if (dmp == NULL) {
1315 		/* drop it and don't ack it, causing the peer to retransmit */
1316 		return;
1317 	}
1318 	dmp->b_wptr = (uchar_t *)ch + ntohs(ch->sch_len);
1319 
1320 	sctp->sctp_rxqueued += dlen;
1321 
1322 	oftsn = sctp->sctp_ftsn;
1323 
1324 	if (isfrag) {
1325 
1326 		error = 0;
1327 		/* fragmented data chunk */
1328 		dmp->b_rptr = (uchar_t *)dc;
1329 		if (ubit) {
1330 			dmp = sctp_uodata_frag(sctp, dmp, &dc);
1331 #if	DEBUG
1332 			if (dmp != NULL) {
1333 				ASSERT(instr ==
1334 				    &sctp->sctp_instr[ntohs(dc->sdh_sid)]);
1335 			}
1336 #endif
1337 		} else {
1338 			dmp = sctp_data_frag(sctp, dmp, &dc, &error, instr,
1339 			    &tpfinished);
1340 		}
1341 		if (error != 0) {
1342 			sctp->sctp_rxqueued -= dlen;
1343 			if (error == 1) {
1344 				/*
1345 				 * out of memory; don't ack it so
1346 				 * the peer retransmits
1347 				 */
1348 				return;
1349 			} else if (error == 2) {
1350 				/*
1351 				 * fatal error (i.e. peer used different
1352 				 * ssn's for same fragmented data) --
1353 				 * the association has been aborted.
1354 				 * XXX need to return errval so state
1355 				 * machine can also abort processing.
1356 				 */
1357 				dprint(0, ("error 2: must not happen!\n"));
1358 				return;
1359 			}
1360 		}
1361 
1362 		if (dmp == NULL) {
1363 			/*
1364 			 * Can't process this data now, but the cumulative
1365 			 * TSN may be advanced, so do the checks at done.
1366 			 */
1367 			SCTP_ACK_IT(sctp, tsn);
1368 			goto done;
1369 		}
1370 	}
1371 
1372 	/*
1373 	 * Insert complete messages in correct order for ordered delivery.
1374 	 * tpfinished is true when the incoming chunk contains a complete
1375 	 * message or is the final missing fragment which completed a message.
1376 	 */
1377 	if (!ubit && tpfinished && ssn != instr->nextseq) {
1378 		/* Adjust rptr to point at the data chunk for compares */
1379 		dmp->b_rptr = (uchar_t *)dc;
1380 
1381 		dprint(2,
1382 		    ("data_chunk: inserted %x in pq (ssn %d expected %d)\n",
1383 		    ntohl(dc->sdh_tsn), (int)(ssn), (int)(instr->nextseq)));
1384 
1385 		if (instr->istr_msgs == NULL) {
1386 			instr->istr_msgs = dmp;
1387 			ASSERT(dmp->b_prev == NULL && dmp->b_next == NULL);
1388 		} else {
1389 			mblk_t			*imblk = instr->istr_msgs;
1390 			sctp_data_hdr_t		*idc;
1391 
1392 			/*
1393 			 * XXXNeed to take sequence wraps into account,
1394 			 * ... and a more efficient insertion algo.
1395 			 */
1396 			for (;;) {
1397 				idc = (sctp_data_hdr_t *)imblk->b_rptr;
1398 				if (SSN_GT(ntohs(idc->sdh_ssn),
1399 				    ntohs(dc->sdh_ssn))) {
1400 					if (instr->istr_msgs == imblk) {
1401 						instr->istr_msgs = dmp;
1402 						dmp->b_next = imblk;
1403 						imblk->b_prev = dmp;
1404 					} else {
1405 						ASSERT(imblk->b_prev != NULL);
1406 						imblk->b_prev->b_next = dmp;
1407 						dmp->b_prev = imblk->b_prev;
1408 						imblk->b_prev = dmp;
1409 						dmp->b_next = imblk;
1410 					}
1411 					break;
1412 				}
1413 				if (imblk->b_next == NULL) {
1414 					imblk->b_next = dmp;
1415 					dmp->b_prev = imblk;
1416 					break;
1417 				}
1418 				imblk = imblk->b_next;
1419 			}
1420 		}
1421 		(instr->istr_nmsgs)++;
1422 		(sctp->sctp_istr_nmsgs)++;
1423 		SCTP_ACK_IT(sctp, tsn);
1424 		return;
1425 	}
1426 
1427 	/*
1428 	 * Else we can deliver the data directly. Recalculate
1429 	 * dlen now since we may have reassembled data.
1430 	 */
1431 	dlen = dmp->b_wptr - (uchar_t *)dc - sizeof (*dc);
1432 	for (pmp = dmp->b_cont; pmp != NULL; pmp = pmp->b_cont)
1433 		dlen += MBLKL(pmp);
1434 	ASSERT(sctp->sctp_rxqueued >= dlen);
1435 
1436 	/* Deliver the message. */
1437 	sctp->sctp_rxqueued -= dlen;
1438 
1439 	if (can_deliver) {
1440 
1441 		dmp->b_rptr = (uchar_t *)(dc + 1);
1442 		if (sctp_input_add_ancillary(sctp, &dmp, dc, fp,
1443 		    ipp, ira) == 0) {
1444 			dprint(1, ("sctp_data_chunk: delivering %lu bytes\n",
1445 			    msgdsize(dmp)));
1446 			sctp->sctp_rwnd -= dlen;
1447 			/*
1448 			 * Override b_flag for SCTP sockfs internal use
1449 			 */
1450 			dmp->b_flag = tpfinished ? 0 : SCTP_PARTIAL_DATA;
1451 			new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, dmp,
1452 			    msgdsize(dmp), 0, &error, NULL);
1453 			/*
1454 			 * Since we always deliver the next TSN data chunk,
1455 			 * we may buffer a little more than allowed. In
1456 			 * that case, just mark the window as 0.
1457 			 */
1458 			if (new_rwnd < 0)
1459 				sctp->sctp_rwnd = 0;
1460 			else if (new_rwnd > sctp->sctp_rwnd)
1461 				sctp->sctp_rwnd = new_rwnd;
1462 			SCTP_ACK_IT(sctp, tsn);
1463 		} else {
1464 			/* Just free the message if we don't have memory. */
1465 			freemsg(dmp);
1466 			return;
1467 		}
1468 	} else {
1469 		/* About to free the data */
1470 		freemsg(dmp);
1471 		SCTP_ACK_IT(sctp, tsn);
1472 	}
1473 
1474 	/*
1475 	 * data, now enqueued, may already have been processed and free'd
1476 	 * by the ULP (or we may have just freed it above, if we could not
1477 	 * deliver it), so we must not reference it (this is why we kept
1478 	 * the ssn and ubit above).
1479 	 */
1480 	if (ubit != 0) {
1481 		BUMP_LOCAL(sctp->sctp_iudchunks);
1482 		goto done;
1483 	}
1484 	BUMP_LOCAL(sctp->sctp_idchunks);
1485 
1486 	/*
1487 	 * If there was a partial delivery and it has not finished,
1488 	 * don't pull anything from the pqueues.
1489 	 */
1490 	if (!tpfinished) {
1491 		goto done;
1492 	}
1493 
1494 	instr->nextseq = ssn + 1;
1495 	/* Deliver any successive data chunks in the instr queue */
1496 	while (instr->istr_nmsgs > 0) {
1497 		dmp = (mblk_t *)instr->istr_msgs;
1498 		dc = (sctp_data_hdr_t *)dmp->b_rptr;
1499 		ssn = ntohs(dc->sdh_ssn);
1500 		/* Gap in the sequence */
1501 		if (ssn != instr->nextseq)
1502 			break;
1503 
1504 		/* Else deliver the data */
1505 		(instr->istr_nmsgs)--;
1506 		(instr->nextseq)++;
1507 		(sctp->sctp_istr_nmsgs)--;
1508 
1509 		instr->istr_msgs = instr->istr_msgs->b_next;
1510 		if (instr->istr_msgs != NULL)
1511 			instr->istr_msgs->b_prev = NULL;
1512 		dmp->b_next = dmp->b_prev = NULL;
1513 
1514 		dprint(2, ("data_chunk: pulling %x from pq (ssn %d)\n",
1515 		    ntohl(dc->sdh_tsn), (int)ssn));
1516 
1517 		/*
1518 		 * If this chunk was reassembled, each b_cont represents
1519 		 * another TSN; advance ftsn now.
1520 		 */
1521 		dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
1522 		for (pmp = dmp->b_cont; pmp; pmp = pmp->b_cont)
1523 			dlen += MBLKL(pmp);
1524 
1525 		ASSERT(sctp->sctp_rxqueued >= dlen);
1526 
1527 		sctp->sctp_rxqueued -= dlen;
1528 		if (can_deliver) {
1529 			dmp->b_rptr = (uchar_t *)(dc + 1);
1530 			if (sctp_input_add_ancillary(sctp, &dmp, dc, fp,
1531 			    ipp, ira) == 0) {
1532 				dprint(1, ("sctp_data_chunk: delivering %lu "
1533 				    "bytes\n", msgdsize(dmp)));
1534 				sctp->sctp_rwnd -= dlen;
1535 				/*
1536 				 * Override b_flag for SCTP sockfs internal use
1537 				 */
1538 				dmp->b_flag = tpfinished ?
1539 				    0 : SCTP_PARTIAL_DATA;
1540 				new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd,
1541 				    dmp, msgdsize(dmp), 0, &error, NULL);
1542 				if (new_rwnd < 0)
1543 					sctp->sctp_rwnd = 0;
1544 				else if (new_rwnd > sctp->sctp_rwnd)
1545 					sctp->sctp_rwnd = new_rwnd;
1546 				SCTP_ACK_IT(sctp, tsn);
1547 			} else {
1548 				freemsg(dmp);
1549 				return;
1550 			}
1551 		} else {
1552 			/* About to free the data */
1553 			freemsg(dmp);
1554 			SCTP_ACK_IT(sctp, tsn);
1555 		}
1556 	}
1557 
1558 done:
1559 
1560 	/*
1561 	 * If there are gap reports pending, check if advancing
1562 	 * the ftsn here closes a gap. If so, we can advance
1563 	 * ftsn to the end of the set.
1564 	 */
1565 	if (sctp->sctp_sack_info != NULL &&
1566 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
1567 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
1568 	}
1569 	/*
1570 	 * If ftsn has moved forward, maybe we can remove gap reports.
1571 	 * NB: dmp may now be NULL, so don't dereference it here.
1572 	 */
1573 	if (oftsn != sctp->sctp_ftsn && sctp->sctp_sack_info != NULL) {
1574 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
1575 		    &sctp->sctp_sack_gaps);
1576 		dprint(2, ("data_chunk: removed acks before %x (num=%d)\n",
1577 		    sctp->sctp_ftsn - 1, sctp->sctp_sack_gaps));
1578 	}
1579 
1580 #ifdef	DEBUG
1581 	if (sctp->sctp_sack_info != NULL) {
1582 		ASSERT(sctp->sctp_ftsn != sctp->sctp_sack_info->begin);
1583 	}
1584 #endif
1585 
1586 #undef	SCTP_ACK_IT
1587 }
1588 
1589 void
1590 sctp_fill_sack(sctp_t *sctp, unsigned char *dst, int sacklen)
1591 {
1592 	sctp_chunk_hdr_t *sch;
1593 	sctp_sack_chunk_t *sc;
1594 	sctp_sack_frag_t *sf;
1595 	uint16_t num_gaps = sctp->sctp_sack_gaps;
1596 	sctp_set_t *sp;
1597 
1598 	/* Chunk hdr */
1599 	sch = (sctp_chunk_hdr_t *)dst;
1600 	sch->sch_id = CHUNK_SACK;
1601 	sch->sch_flags = 0;
1602 	sch->sch_len = htons(sacklen);
1603 
1604 	/* SACK chunk */
1605 	sctp->sctp_lastacked = sctp->sctp_ftsn - 1;
1606 
1607 	sc = (sctp_sack_chunk_t *)(sch + 1);
1608 	sc->ssc_cumtsn = htonl(sctp->sctp_lastacked);
1609 	if (sctp->sctp_rxqueued < sctp->sctp_rwnd) {
1610 		sc->ssc_a_rwnd = htonl(sctp->sctp_rwnd - sctp->sctp_rxqueued);
1611 	} else {
1612 		sc->ssc_a_rwnd = 0;
1613 	}
1614 	sc->ssc_numfrags = htons(num_gaps);
1615 	sc->ssc_numdups = 0;
1616 
1617 	/* lay in gap reports */
1618 	sf = (sctp_sack_frag_t *)(sc + 1);
1619 	for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
1620 		uint16_t offset;
1621 
1622 		/* start */
1623 		if (sp->begin > sctp->sctp_lastacked) {
1624 			offset = (uint16_t)(sp->begin - sctp->sctp_lastacked);
1625 		} else {
1626 			/* sequence number wrap */
1627 			offset = (uint16_t)(UINT32_MAX - sctp->sctp_lastacked +
1628 			    sp->begin);
1629 		}
1630 		sf->ssf_start = htons(offset);
1631 
1632 		/* end */
1633 		if (sp->end >= sp->begin) {
1634 			offset += (uint16_t)(sp->end - sp->begin);
1635 		} else {
1636 			/* sequence number wrap */
1637 			offset += (uint16_t)(UINT32_MAX - sp->begin + sp->end);
1638 		}
1639 		sf->ssf_end = htons(offset);
1640 
1641 		sf++;
1642 		/* This is just for debugging (a la the following assertion) */
1643 		num_gaps--;
1644 	}
1645 
1646 	ASSERT(num_gaps == 0);
1647 
1648 	/* If the SACK timer is running, stop it */
1649 	if (sctp->sctp_ack_timer_running) {
1650 		sctp_timer_stop(sctp->sctp_ack_mp);
1651 		sctp->sctp_ack_timer_running = B_FALSE;
1652 	}
1653 
1654 	BUMP_LOCAL(sctp->sctp_obchunks);
1655 	BUMP_LOCAL(sctp->sctp_osacks);
1656 }
1657 
1658 mblk_t *
1659 sctp_make_sack(sctp_t *sctp, sctp_faddr_t *sendto, mblk_t *dups)
1660 {
1661 	mblk_t *smp;
1662 	size_t slen;
1663 	sctp_chunk_hdr_t *sch;
1664 	sctp_sack_chunk_t *sc;
1665 	int32_t acks_max;
1666 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1667 	uint32_t	dups_len;
1668 	sctp_faddr_t	*fp;
1669 
1670 	ASSERT(sendto != NULL);
1671 
1672 	if (sctp->sctp_force_sack) {
1673 		sctp->sctp_force_sack = 0;
1674 		goto checks_done;
1675 	}
1676 
1677 	acks_max = sctps->sctps_deferred_acks_max;
1678 	if (sctp->sctp_state == SCTPS_ESTABLISHED) {
1679 		if (sctp->sctp_sack_toggle < acks_max) {
1680 			/* no need to SACK right now */
1681 			dprint(2, ("sctp_make_sack: %p no sack (toggle)\n",
1682 			    (void *)sctp));
1683 			return (NULL);
1684 		} else if (sctp->sctp_sack_toggle >= acks_max) {
1685 			sctp->sctp_sack_toggle = 0;
1686 		}
1687 	}
1688 
1689 	if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) {
1690 		dprint(2, ("sctp_make_sack: %p no sack (already)\n",
1691 		    (void *)sctp));
1692 		return (NULL);
1693 	}
1694 
1695 checks_done:
1696 	dprint(2, ("sctp_make_sack: acking %x\n", sctp->sctp_ftsn - 1));
1697 
1698 	if (dups != NULL)
1699 		dups_len = MBLKL(dups);
1700 	else
1701 		dups_len = 0;
1702 	slen = sizeof (*sch) + sizeof (*sc) +
1703 	    (sizeof (sctp_sack_frag_t) * sctp->sctp_sack_gaps);
1704 
1705 	/*
1706 	 * If there are error chunks, check and see if we can send the
1707 	 * SACK chunk and error chunks together in one packet.  If not,
1708 	 * send the error chunks out now.
1709 	 */
1710 	if (sctp->sctp_err_chunks != NULL) {
1711 		fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks);
1712 		if (sctp->sctp_err_len + slen + dups_len > fp->sfa_pmss) {
1713 			if ((smp = sctp_make_mp(sctp, fp, 0)) == NULL) {
1714 				SCTP_KSTAT(sctps, sctp_send_err_failed);
1715 				SCTP_KSTAT(sctps, sctp_send_sack_failed);
1716 				freemsg(sctp->sctp_err_chunks);
1717 				sctp->sctp_err_chunks = NULL;
1718 				sctp->sctp_err_len = 0;
1719 				return (NULL);
1720 			}
1721 			smp->b_cont = sctp->sctp_err_chunks;
1722 			sctp_set_iplen(sctp, smp, fp->ixa);
1723 			(void) conn_ip_output(smp, fp->ixa);
1724 			BUMP_LOCAL(sctp->sctp_opkts);
1725 			sctp->sctp_err_chunks = NULL;
1726 			sctp->sctp_err_len = 0;
1727 		}
1728 	}
1729 	smp = sctp_make_mp(sctp, sendto, slen);
1730 	if (smp == NULL) {
1731 		SCTP_KSTAT(sctps, sctp_send_sack_failed);
1732 		return (NULL);
1733 	}
1734 	sch = (sctp_chunk_hdr_t *)smp->b_wptr;
1735 
1736 	sctp_fill_sack(sctp, smp->b_wptr, slen);
1737 	smp->b_wptr += slen;
1738 	if (dups != NULL) {
1739 		sc = (sctp_sack_chunk_t *)(sch + 1);
1740 		sc->ssc_numdups = htons(MBLKL(dups) / sizeof (uint32_t));
1741 		sch->sch_len = htons(slen + dups_len);
1742 		smp->b_cont = dups;
1743 	}
1744 
1745 	if (sctp->sctp_err_chunks != NULL) {
1746 		linkb(smp, sctp->sctp_err_chunks);
1747 		sctp->sctp_err_chunks = NULL;
1748 		sctp->sctp_err_len = 0;
1749 	}
1750 	return (smp);
1751 }
1752 
1753 /*
1754  * Check and see if we need to send a SACK chunk.  If it is needed,
1755  * send it out.  Return true if a SACK chunk is sent, false otherwise.
1756  */
1757 boolean_t
1758 sctp_sack(sctp_t *sctp, mblk_t *dups)
1759 {
1760 	mblk_t *smp;
1761 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1762 
1763 	/* If we are shutting down, let send_shutdown() bundle the SACK */
1764 	if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
1765 		sctp_send_shutdown(sctp, 0);
1766 	}
1767 
1768 	ASSERT(sctp->sctp_lastdata != NULL);
1769 
1770 	if ((smp = sctp_make_sack(sctp, sctp->sctp_lastdata, dups)) == NULL) {
1771 		/* The caller of sctp_sack() will not free the dups mblk. */
1772 		if (dups != NULL)
1773 			freeb(dups);
1774 		return (B_FALSE);
1775 	}
1776 	dprint(2, ("sctp_sack: sending to %p %x:%x:%x:%x\n",
1777 	    (void *)sctp->sctp_lastdata,
1778 	    SCTP_PRINTADDR(sctp->sctp_lastdata->faddr)));
1779 
1780 	sctp->sctp_active = ddi_get_lbolt64();
1781 
1782 	BUMP_MIB(&sctps->sctps_mib, sctpOutAck);
1783 
1784 	sctp_set_iplen(sctp, smp, sctp->sctp_lastdata->ixa);
1785 	(void) conn_ip_output(smp, sctp->sctp_lastdata->ixa);
1786 	BUMP_LOCAL(sctp->sctp_opkts);
1787 	return (B_TRUE);
1788 }
1789 
1790 /*
1791  * This is called if we have a message that was partially sent and is
1792  * abandoned. The cum TSN will be the last chunk sent for this message,
1793  * subsequent chunks will be marked ABANDONED. We send a Forward TSN
1794  * chunk in this case with the TSN of the last sent chunk so that the
1795  * peer can clean up its fragment list for this message. This message
1796  * will be removed from the transmit list when the peer sends a SACK
1797  * back.
1798  */
1799 int
1800 sctp_check_abandoned_msg(sctp_t *sctp, mblk_t *meta)
1801 {
1802 	sctp_data_hdr_t	*dh;
1803 	mblk_t		*nmp;
1804 	mblk_t		*head;
1805 	int32_t		unsent = 0;
1806 	mblk_t		*mp1 = meta->b_cont;
1807 	uint32_t	adv_pap = sctp->sctp_adv_pap;
1808 	sctp_faddr_t	*fp = sctp->sctp_current;
1809 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1810 
1811 	dh = (sctp_data_hdr_t *)mp1->b_rptr;
1812 	if (SEQ_GEQ(sctp->sctp_lastack_rxd, ntohl(dh->sdh_tsn))) {
1813 		sctp_ftsn_set_t	*sets = NULL;
1814 		uint_t		nsets = 0;
1815 		uint32_t	seglen = sizeof (uint32_t);
1816 		boolean_t	ubit = SCTP_DATA_GET_UBIT(dh);
1817 
1818 		while (mp1->b_next != NULL && SCTP_CHUNK_ISSENT(mp1->b_next))
1819 			mp1 = mp1->b_next;
1820 		dh = (sctp_data_hdr_t *)mp1->b_rptr;
1821 		sctp->sctp_adv_pap = ntohl(dh->sdh_tsn);
1822 		if (!ubit &&
1823 		    !sctp_add_ftsn_set(&sets, fp, meta, &nsets, &seglen)) {
1824 			sctp->sctp_adv_pap = adv_pap;
1825 			return (ENOMEM);
1826 		}
1827 		nmp = sctp_make_ftsn_chunk(sctp, fp, sets, nsets, seglen);
1828 		sctp_free_ftsn_set(sets);
1829 		if (nmp == NULL) {
1830 			sctp->sctp_adv_pap = adv_pap;
1831 			return (ENOMEM);
1832 		}
1833 		head = sctp_add_proto_hdr(sctp, fp, nmp, 0, NULL);
1834 		if (head == NULL) {
1835 			sctp->sctp_adv_pap = adv_pap;
1836 			freemsg(nmp);
1837 			SCTP_KSTAT(sctps, sctp_send_ftsn_failed);
1838 			return (ENOMEM);
1839 		}
1840 		SCTP_MSG_SET_ABANDONED(meta);
1841 		sctp_set_iplen(sctp, head, fp->ixa);
1842 		(void) conn_ip_output(head, fp->ixa);
1843 		BUMP_LOCAL(sctp->sctp_opkts);
1844 		if (!fp->timer_running)
1845 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
1846 		mp1 = mp1->b_next;
1847 		while (mp1 != NULL) {
1848 			ASSERT(!SCTP_CHUNK_ISSENT(mp1));
1849 			ASSERT(!SCTP_CHUNK_ABANDONED(mp1));
1850 			SCTP_ABANDON_CHUNK(mp1);
1851 			dh = (sctp_data_hdr_t *)mp1->b_rptr;
1852 			unsent += ntohs(dh->sdh_len) - sizeof (*dh);
1853 			mp1 = mp1->b_next;
1854 		}
1855 		ASSERT(sctp->sctp_unsent >= unsent);
1856 		sctp->sctp_unsent -= unsent;
1857 		/*
1858 		 * Update ULP the amount of queued data, which is
1859 		 * sent-unack'ed + unsent.
1860 		 */
1861 		if (!SCTP_IS_DETACHED(sctp))
1862 			SCTP_TXQ_UPDATE(sctp);
1863 		return (0);
1864 	}
1865 	return (-1);
1866 }
1867 
1868 uint32_t
1869 sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked)
1870 {
1871 	mblk_t *ump, *nump, *mp = NULL;
1872 	uint16_t chunklen;
1873 	uint32_t xtsn;
1874 	sctp_faddr_t *fp;
1875 	sctp_data_hdr_t *sdc;
1876 	uint32_t cumack_forward = 0;
1877 	sctp_msg_hdr_t	*mhdr;
1878 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1879 
1880 	ump = sctp->sctp_xmit_head;
1881 
1882 	/*
1883 	 * Free messages only when they're completely acked.
1884 	 */
1885 	while (ump != NULL) {
1886 		mhdr = (sctp_msg_hdr_t *)ump->b_rptr;
1887 		for (mp = ump->b_cont; mp != NULL; mp = mp->b_next) {
1888 			if (SCTP_CHUNK_ABANDONED(mp)) {
1889 				ASSERT(SCTP_IS_MSG_ABANDONED(ump));
1890 				mp = NULL;
1891 				break;
1892 			}
1893 			/*
1894 			 * We check for abandoned message if we are PR-SCTP
1895 			 * aware, if this is not the first chunk in the
1896 			 * message (b_cont) and if the message is marked
1897 			 * abandoned.
1898 			 */
1899 			if (!SCTP_CHUNK_ISSENT(mp)) {
1900 				if (sctp->sctp_prsctp_aware &&
1901 				    mp != ump->b_cont &&
1902 				    (SCTP_IS_MSG_ABANDONED(ump) ||
1903 				    SCTP_MSG_TO_BE_ABANDONED(ump, mhdr,
1904 				    sctp))) {
1905 					(void) sctp_check_abandoned_msg(sctp,
1906 					    ump);
1907 				}
1908 				goto cum_ack_done;
1909 			}
1910 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
1911 			xtsn = ntohl(sdc->sdh_tsn);
1912 			if (SEQ_GEQ(sctp->sctp_lastack_rxd, xtsn))
1913 				continue;
1914 			if (SEQ_GEQ(tsn, xtsn)) {
1915 				fp = SCTP_CHUNK_DEST(mp);
1916 				chunklen = ntohs(sdc->sdh_len);
1917 
1918 				if (sctp->sctp_out_time != 0 &&
1919 				    xtsn == sctp->sctp_rtt_tsn) {
1920 					/* Got a new RTT measurement */
1921 					sctp_update_rtt(sctp, fp,
1922 					    ddi_get_lbolt64() -
1923 					    sctp->sctp_out_time);
1924 					sctp->sctp_out_time = 0;
1925 				}
1926 				if (SCTP_CHUNK_ISACKED(mp))
1927 					continue;
1928 				SCTP_CHUNK_SET_SACKCNT(mp, 0);
1929 				SCTP_CHUNK_ACKED(mp);
1930 				ASSERT(fp->suna >= chunklen);
1931 				fp->suna -= chunklen;
1932 				fp->acked += chunklen;
1933 				cumack_forward += chunklen;
1934 				ASSERT(sctp->sctp_unacked >=
1935 				    (chunklen - sizeof (*sdc)));
1936 				sctp->sctp_unacked -=
1937 				    (chunklen - sizeof (*sdc));
1938 				if (fp->suna == 0) {
1939 					/* all outstanding data acked */
1940 					fp->pba = 0;
1941 					SCTP_FADDR_TIMER_STOP(fp);
1942 				} else {
1943 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
1944 					    fp->rto);
1945 				}
1946 			} else {
1947 				goto cum_ack_done;
1948 			}
1949 		}
1950 		nump = ump->b_next;
1951 		if (nump != NULL)
1952 			nump->b_prev = NULL;
1953 		if (ump == sctp->sctp_xmit_tail)
1954 			sctp->sctp_xmit_tail = nump;
1955 		if (SCTP_IS_MSG_ABANDONED(ump)) {
1956 			BUMP_LOCAL(sctp->sctp_prsctpdrop);
1957 			ump->b_next = NULL;
1958 			sctp_sendfail_event(sctp, ump, 0, B_TRUE);
1959 		} else {
1960 			sctp_free_msg(ump);
1961 		}
1962 		sctp->sctp_xmit_head = ump = nump;
1963 	}
1964 cum_ack_done:
1965 	*first_unacked = mp;
1966 	if (cumack_forward > 0) {
1967 		BUMP_MIB(&sctps->sctps_mib, sctpInAck);
1968 		if (SEQ_GT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) {
1969 			sctp->sctp_recovery_tsn = sctp->sctp_lastack_rxd;
1970 		}
1971 
1972 		/*
1973 		 * Update ULP the amount of queued data, which is
1974 		 * sent-unack'ed + unsent.
1975 		 */
1976 		if (!SCTP_IS_DETACHED(sctp))
1977 			SCTP_TXQ_UPDATE(sctp);
1978 
1979 		/* Time to send a shutdown? */
1980 		if (sctp->sctp_state == SCTPS_SHUTDOWN_PENDING) {
1981 			sctp_send_shutdown(sctp, 0);
1982 		}
1983 		sctp->sctp_xmit_unacked = mp;
1984 	} else {
1985 		/* dup ack */
1986 		BUMP_MIB(&sctps->sctps_mib, sctpInDupAck);
1987 	}
1988 	sctp->sctp_lastack_rxd = tsn;
1989 	if (SEQ_LT(sctp->sctp_adv_pap, sctp->sctp_lastack_rxd))
1990 		sctp->sctp_adv_pap = sctp->sctp_lastack_rxd;
1991 	ASSERT(sctp->sctp_xmit_head || sctp->sctp_unacked == 0);
1992 
1993 	return (cumack_forward);
1994 }
1995 
1996 static int
1997 sctp_set_frwnd(sctp_t *sctp, uint32_t frwnd)
1998 {
1999 	uint32_t orwnd;
2000 
2001 	if (sctp->sctp_unacked > frwnd) {
2002 		sctp->sctp_frwnd = 0;
2003 		return (0);
2004 	}
2005 	orwnd = sctp->sctp_frwnd;
2006 	sctp->sctp_frwnd = frwnd - sctp->sctp_unacked;
2007 	if (orwnd < sctp->sctp_frwnd) {
2008 		return (1);
2009 	} else {
2010 		return (0);
2011 	}
2012 }
2013 
2014 /*
2015  * For un-ordered messages.
2016  * Walk the sctp->sctp_uo_frag list and remove any fragments with TSN
2017  * less than/equal to ftsn. Fragments for un-ordered messages are
2018  * strictly in sequence (w.r.t TSN).
2019  */
2020 static int
2021 sctp_ftsn_check_uo_frag(sctp_t *sctp, uint32_t ftsn)
2022 {
2023 	mblk_t		*hmp;
2024 	mblk_t		*hmp_next;
2025 	sctp_data_hdr_t	*dc;
2026 	int		dlen = 0;
2027 
2028 	hmp = sctp->sctp_uo_frags;
2029 	while (hmp != NULL) {
2030 		hmp_next = hmp->b_next;
2031 		dc = (sctp_data_hdr_t *)hmp->b_rptr;
2032 		if (SEQ_GT(ntohl(dc->sdh_tsn), ftsn))
2033 			return (dlen);
2034 		sctp->sctp_uo_frags = hmp_next;
2035 		if (hmp_next != NULL)
2036 			hmp_next->b_prev = NULL;
2037 		hmp->b_next = NULL;
2038 		dlen += ntohs(dc->sdh_len) - sizeof (*dc);
2039 		freeb(hmp);
2040 		hmp = hmp_next;
2041 	}
2042 	return (dlen);
2043 }
2044 
2045 /*
2046  * For ordered messages.
2047  * Check for existing fragments for an sid-ssn pair reported as abandoned,
2048  * hence will not receive, in the Forward TSN. If there are fragments, then
2049  * we just nuke them. If and when Partial Delivery API is supported, we
2050  * would need to send a notification to the upper layer about this.
2051  */
2052 static int
2053 sctp_ftsn_check_frag(sctp_t *sctp, uint16_t ssn, sctp_instr_t *sip)
2054 {
2055 	sctp_reass_t	*srp;
2056 	mblk_t		*hmp;
2057 	mblk_t		*dmp;
2058 	mblk_t		*hmp_next;
2059 	sctp_data_hdr_t	*dc;
2060 	int		dlen = 0;
2061 
2062 	hmp = sip->istr_reass;
2063 	while (hmp != NULL) {
2064 		hmp_next = hmp->b_next;
2065 		srp = (sctp_reass_t *)DB_BASE(hmp);
2066 		if (SSN_GT(srp->ssn, ssn))
2067 			return (dlen);
2068 		/*
2069 		 * If we had sent part of this message up, send a partial
2070 		 * delivery event. Since this is ordered delivery, we should
2071 		 * have sent partial message only for the next in sequence,
2072 		 * hence the ASSERT. See comments in sctp_data_chunk() for
2073 		 * trypartial.
2074 		 */
2075 		if (srp->partial_delivered) {
2076 			ASSERT(sip->nextseq == srp->ssn);
2077 			sctp_partial_delivery_event(sctp);
2078 		}
2079 		/* Take it out of the reass queue */
2080 		sip->istr_reass = hmp_next;
2081 		if (hmp_next != NULL)
2082 			hmp_next->b_prev = NULL;
2083 		hmp->b_next = NULL;
2084 		ASSERT(hmp->b_prev == NULL);
2085 		dmp = hmp;
2086 		ASSERT(DB_TYPE(hmp) == M_CTL);
2087 		dmp = hmp->b_cont;
2088 		hmp->b_cont = NULL;
2089 		freeb(hmp);
2090 		hmp = dmp;
2091 		while (dmp != NULL) {
2092 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
2093 			dlen += ntohs(dc->sdh_len) - sizeof (*dc);
2094 			dmp = dmp->b_cont;
2095 		}
2096 		freemsg(hmp);
2097 		hmp = hmp_next;
2098 	}
2099 	return (dlen);
2100 }
2101 
2102 /*
2103  * Update sctp_ftsn to the cumulative TSN from the Forward TSN chunk. Remove
2104  * any SACK gaps less than the newly updated sctp_ftsn. Walk through the
2105  * sid-ssn pair in the Forward TSN and for each, clean the fragment list
2106  * for this pair, if needed, and check if we can deliver subsequent
2107  * messages, if any, from the instream queue (that were waiting for this
2108  * sid-ssn message to show up). Once we are done try to update the SACK
2109  * info. We could get a duplicate Forward TSN, in which case just send
2110  * a SACK. If any of the sid values in the Forward TSN is invalid,
2111  * send back an "Invalid Stream Identifier" error and continue processing
2112  * the rest.
2113  */
2114 static void
2115 sctp_process_forward_tsn(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp,
2116     ip_pkt_t *ipp, ip_recv_attr_t *ira)
2117 {
2118 	uint32_t	*ftsn = (uint32_t *)(ch + 1);
2119 	ftsn_entry_t	*ftsn_entry;
2120 	sctp_instr_t	*instr;
2121 	boolean_t	can_deliver = B_TRUE;
2122 	size_t		dlen;
2123 	int		flen;
2124 	mblk_t		*dmp;
2125 	mblk_t		*pmp;
2126 	sctp_data_hdr_t	*dc;
2127 	ssize_t		remaining;
2128 	sctp_stack_t	*sctps = sctp->sctp_sctps;
2129 
2130 	*ftsn = ntohl(*ftsn);
2131 	remaining =  ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*ftsn);
2132 
2133 	if (SCTP_IS_DETACHED(sctp)) {
2134 		BUMP_MIB(&sctps->sctps_mib, sctpInClosed);
2135 		can_deliver = B_FALSE;
2136 	}
2137 	/*
2138 	 * un-ordered messages don't have SID-SSN pair entries, we check
2139 	 * for any fragments (for un-ordered message) to be discarded using
2140 	 * the cumulative FTSN.
2141 	 */
2142 	flen = sctp_ftsn_check_uo_frag(sctp, *ftsn);
2143 	if (flen > 0) {
2144 		ASSERT(sctp->sctp_rxqueued >= flen);
2145 		sctp->sctp_rxqueued -= flen;
2146 	}
2147 	ftsn_entry = (ftsn_entry_t *)(ftsn + 1);
2148 	while (remaining >= sizeof (*ftsn_entry)) {
2149 		ftsn_entry->ftsn_sid = ntohs(ftsn_entry->ftsn_sid);
2150 		ftsn_entry->ftsn_ssn = ntohs(ftsn_entry->ftsn_ssn);
2151 		if (ftsn_entry->ftsn_sid >= sctp->sctp_num_istr) {
2152 			sctp_bsc_t	inval_parm;
2153 
2154 			/* Will populate the CAUSE block in the ERROR chunk. */
2155 			inval_parm.bsc_sid = htons(ftsn_entry->ftsn_sid);
2156 			/* RESERVED, ignored at the receiving end */
2157 			inval_parm.bsc_pad = 0;
2158 
2159 			sctp_add_err(sctp, SCTP_ERR_BAD_SID,
2160 			    (void *)&inval_parm, sizeof (sctp_bsc_t), fp);
2161 			ftsn_entry++;
2162 			remaining -= sizeof (*ftsn_entry);
2163 			continue;
2164 		}
2165 		instr = &sctp->sctp_instr[ftsn_entry->ftsn_sid];
2166 		flen = sctp_ftsn_check_frag(sctp, ftsn_entry->ftsn_ssn, instr);
2167 		/* Indicates frags were nuked, update rxqueued */
2168 		if (flen > 0) {
2169 			ASSERT(sctp->sctp_rxqueued >= flen);
2170 			sctp->sctp_rxqueued -= flen;
2171 		}
2172 		/*
2173 		 * It is possible to receive an FTSN chunk with SSN smaller
2174 		 * than then nextseq if this chunk is a retransmission because
2175 		 * of incomplete processing when it was first processed.
2176 		 */
2177 		if (SSN_GE(ftsn_entry->ftsn_ssn, instr->nextseq))
2178 			instr->nextseq = ftsn_entry->ftsn_ssn + 1;
2179 		while (instr->istr_nmsgs > 0) {
2180 			mblk_t	*next;
2181 
2182 			dmp = (mblk_t *)instr->istr_msgs;
2183 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
2184 			if (ntohs(dc->sdh_ssn) != instr->nextseq)
2185 				break;
2186 
2187 			next = dmp->b_next;
2188 			dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
2189 			for (pmp = dmp->b_cont; pmp != NULL;
2190 			    pmp = pmp->b_cont) {
2191 				dlen += MBLKL(pmp);
2192 			}
2193 			if (can_deliver) {
2194 				int32_t	nrwnd;
2195 				int error;
2196 
2197 				dmp->b_rptr = (uchar_t *)(dc + 1);
2198 				dmp->b_next = NULL;
2199 				ASSERT(dmp->b_prev == NULL);
2200 				if (sctp_input_add_ancillary(sctp,
2201 				    &dmp, dc, fp, ipp, ira) == 0) {
2202 					sctp->sctp_rxqueued -= dlen;
2203 					sctp->sctp_rwnd -= dlen;
2204 					/*
2205 					 * Override b_flag for SCTP sockfs
2206 					 * internal use
2207 					 */
2208 
2209 					dmp->b_flag = 0;
2210 					nrwnd = sctp->sctp_ulp_recv(
2211 					    sctp->sctp_ulpd, dmp, msgdsize(dmp),
2212 					    0, &error, NULL);
2213 					if (nrwnd < 0)
2214 						sctp->sctp_rwnd = 0;
2215 					else if (nrwnd > sctp->sctp_rwnd)
2216 						sctp->sctp_rwnd = nrwnd;
2217 				} else {
2218 					/*
2219 					 * We will resume processing when
2220 					 * the FTSN chunk is re-xmitted.
2221 					 */
2222 					dmp->b_rptr = (uchar_t *)dc;
2223 					dmp->b_next = next;
2224 					dprint(0,
2225 					    ("FTSN dequeuing %u failed\n",
2226 					    ntohs(dc->sdh_ssn)));
2227 					return;
2228 				}
2229 			} else {
2230 				sctp->sctp_rxqueued -= dlen;
2231 				ASSERT(dmp->b_prev == NULL);
2232 				dmp->b_next = NULL;
2233 				freemsg(dmp);
2234 			}
2235 			instr->istr_nmsgs--;
2236 			instr->nextseq++;
2237 			sctp->sctp_istr_nmsgs--;
2238 			if (next != NULL)
2239 				next->b_prev = NULL;
2240 			instr->istr_msgs = next;
2241 		}
2242 		ftsn_entry++;
2243 		remaining -= sizeof (*ftsn_entry);
2244 	}
2245 	/* Duplicate FTSN */
2246 	if (*ftsn <= (sctp->sctp_ftsn - 1)) {
2247 		sctp->sctp_force_sack = 1;
2248 		return;
2249 	}
2250 	/* Advance cum TSN to that reported in the Forward TSN chunk */
2251 	sctp->sctp_ftsn = *ftsn + 1;
2252 
2253 	/* Remove all the SACK gaps before the new cum TSN */
2254 	if (sctp->sctp_sack_info != NULL) {
2255 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
2256 		    &sctp->sctp_sack_gaps);
2257 	}
2258 	/*
2259 	 * If there are gap reports pending, check if advancing
2260 	 * the ftsn here closes a gap. If so, we can advance
2261 	 * ftsn to the end of the set.
2262 	 * If ftsn has moved forward, maybe we can remove gap reports.
2263 	 */
2264 	if (sctp->sctp_sack_info != NULL &&
2265 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
2266 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
2267 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
2268 		    &sctp->sctp_sack_gaps);
2269 	}
2270 }
2271 
2272 /*
2273  * When we have processed a SACK we check to see if we can advance the
2274  * cumulative TSN if there are abandoned chunks immediately following
2275  * the updated cumulative TSN. If there are, we attempt to send a
2276  * Forward TSN chunk.
2277  */
2278 static void
2279 sctp_check_abandoned_data(sctp_t *sctp, sctp_faddr_t *fp)
2280 {
2281 	mblk_t		*meta = sctp->sctp_xmit_head;
2282 	mblk_t		*mp;
2283 	mblk_t		*nmp;
2284 	uint32_t	seglen;
2285 	uint32_t	adv_pap = sctp->sctp_adv_pap;
2286 
2287 	/*
2288 	 * We only check in the first meta since otherwise we can't
2289 	 * advance the cumulative ack point. We just look for chunks
2290 	 * marked for retransmission, else we might prematurely
2291 	 * send an FTSN for a sent, but unacked, chunk.
2292 	 */
2293 	for (mp = meta->b_cont; mp != NULL; mp = mp->b_next) {
2294 		if (!SCTP_CHUNK_ISSENT(mp))
2295 			return;
2296 		if (SCTP_CHUNK_WANT_REXMIT(mp))
2297 			break;
2298 	}
2299 	if (mp == NULL)
2300 		return;
2301 	sctp_check_adv_ack_pt(sctp, meta, mp);
2302 	if (SEQ_GT(sctp->sctp_adv_pap, adv_pap)) {
2303 		sctp_make_ftsns(sctp, meta, mp, &nmp, fp, &seglen);
2304 		if (nmp == NULL) {
2305 			sctp->sctp_adv_pap = adv_pap;
2306 			if (!fp->timer_running)
2307 				SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
2308 			return;
2309 		}
2310 		sctp_set_iplen(sctp, nmp, fp->ixa);
2311 		(void) conn_ip_output(nmp, fp->ixa);
2312 		BUMP_LOCAL(sctp->sctp_opkts);
2313 		if (!fp->timer_running)
2314 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
2315 	}
2316 }
2317 
2318 /*
2319  * The processing here follows the same logic in sctp_got_sack(), the reason
2320  * we do this separately is because, usually, gap blocks are ordered and
2321  * we can process it in sctp_got_sack(). However if they aren't we would
2322  * need to do some additional non-optimal stuff when we start processing the
2323  * unordered gaps. To that effect sctp_got_sack() does the processing in the
2324  * simple case and this does the same in the more involved case.
2325  */
2326 static uint32_t
2327 sctp_process_uo_gaps(sctp_t *sctp, uint32_t ctsn, sctp_sack_frag_t *ssf,
2328     int num_gaps, mblk_t *umphead, mblk_t *mphead, int *trysend,
2329     boolean_t *fast_recovery, uint32_t fr_xtsn)
2330 {
2331 	uint32_t		xtsn;
2332 	uint32_t		gapstart = 0;
2333 	uint32_t		gapend = 0;
2334 	int			gapcnt;
2335 	uint16_t		chunklen;
2336 	sctp_data_hdr_t		*sdc;
2337 	int			gstart;
2338 	mblk_t			*ump = umphead;
2339 	mblk_t			*mp = mphead;
2340 	sctp_faddr_t		*fp;
2341 	uint32_t		acked = 0;
2342 	sctp_stack_t		*sctps = sctp->sctp_sctps;
2343 
2344 	/*
2345 	 * gstart tracks the last (in the order of TSN) gapstart that
2346 	 * we process in this SACK gaps walk.
2347 	 */
2348 	gstart = ctsn;
2349 
2350 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2351 	xtsn = ntohl(sdc->sdh_tsn);
2352 	for (gapcnt = 0; gapcnt < num_gaps; gapcnt++, ssf++) {
2353 		if (gapstart != 0) {
2354 			/*
2355 			 * If we have reached the end of the transmit list or
2356 			 * hit an unsent chunk or encountered an unordered gap
2357 			 * block start from the ctsn again.
2358 			 */
2359 			if (ump == NULL || !SCTP_CHUNK_ISSENT(mp) ||
2360 			    SEQ_LT(ctsn + ntohs(ssf->ssf_start), xtsn)) {
2361 				ump = umphead;
2362 				mp = mphead;
2363 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2364 				xtsn = ntohl(sdc->sdh_tsn);
2365 			}
2366 		}
2367 
2368 		gapstart = ctsn + ntohs(ssf->ssf_start);
2369 		gapend = ctsn + ntohs(ssf->ssf_end);
2370 
2371 		/*
2372 		 * Sanity checks:
2373 		 *
2374 		 * 1. SACK for TSN we have not sent - ABORT
2375 		 * 2. Invalid or spurious gaps, ignore all gaps
2376 		 */
2377 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2378 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2379 			BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2380 			*trysend = -1;
2381 			return (acked);
2382 		} else if (SEQ_LT(gapend, gapstart) ||
2383 		    SEQ_LEQ(gapstart, ctsn)) {
2384 			break;
2385 		}
2386 		/*
2387 		 * The xtsn can be the TSN processed for the last gap
2388 		 * (gapend) or it could be the cumulative TSN. We continue
2389 		 * with the last xtsn as long as the gaps are ordered, when
2390 		 * we hit an unordered gap, we re-start from the cumulative
2391 		 * TSN. For the first gap it is always the cumulative TSN.
2392 		 */
2393 		while (xtsn != gapstart) {
2394 			/*
2395 			 * We can't reliably check for reneged chunks
2396 			 * when walking the unordered list, so we don't.
2397 			 * In case the peer reneges then we will end up
2398 			 * sending the reneged chunk via timeout.
2399 			 */
2400 			mp = mp->b_next;
2401 			if (mp == NULL) {
2402 				ump = ump->b_next;
2403 				/*
2404 				 * ump can't be NULL because of the sanity
2405 				 * check above.
2406 				 */
2407 				ASSERT(ump != NULL);
2408 				mp = ump->b_cont;
2409 			}
2410 			/*
2411 			 * mp can't be unsent because of the sanity check
2412 			 * above.
2413 			 */
2414 			ASSERT(SCTP_CHUNK_ISSENT(mp));
2415 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2416 			xtsn = ntohl(sdc->sdh_tsn);
2417 		}
2418 		/*
2419 		 * Now that we have found the chunk with TSN == 'gapstart',
2420 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2421 		 * All intermediate chunks will be marked ACKED, if they
2422 		 * haven't already been.
2423 		 */
2424 		while (SEQ_LEQ(xtsn, gapend)) {
2425 			/*
2426 			 * SACKed
2427 			 */
2428 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
2429 			if (!SCTP_CHUNK_ISACKED(mp)) {
2430 				SCTP_CHUNK_ACKED(mp);
2431 
2432 				fp = SCTP_CHUNK_DEST(mp);
2433 				chunklen = ntohs(sdc->sdh_len);
2434 				ASSERT(fp->suna >= chunklen);
2435 				fp->suna -= chunklen;
2436 				if (fp->suna == 0) {
2437 					/* All outstanding data acked. */
2438 					fp->pba = 0;
2439 					SCTP_FADDR_TIMER_STOP(fp);
2440 				}
2441 				fp->acked += chunklen;
2442 				acked += chunklen;
2443 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
2444 				ASSERT(sctp->sctp_unacked >= 0);
2445 			}
2446 			/*
2447 			 * Move to the next message in the transmit list
2448 			 * if we are done with all the chunks from the current
2449 			 * message. Note, it is possible to hit the end of the
2450 			 * transmit list here, i.e. if we have already completed
2451 			 * processing the gap block.
2452 			 */
2453 			mp = mp->b_next;
2454 			if (mp == NULL) {
2455 				ump = ump->b_next;
2456 				if (ump == NULL) {
2457 					ASSERT(xtsn == gapend);
2458 					break;
2459 				}
2460 				mp = ump->b_cont;
2461 			}
2462 			/*
2463 			 * Likewise, we can hit an unsent chunk once we have
2464 			 * completed processing the gap block.
2465 			 */
2466 			if (!SCTP_CHUNK_ISSENT(mp)) {
2467 				ASSERT(xtsn == gapend);
2468 				break;
2469 			}
2470 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2471 			xtsn = ntohl(sdc->sdh_tsn);
2472 		}
2473 		/*
2474 		 * We keep track of the last gap we successfully processed
2475 		 * so that we can terminate the walk below for incrementing
2476 		 * the SACK count.
2477 		 */
2478 		if (SEQ_LT(gstart, gapstart))
2479 			gstart = gapstart;
2480 	}
2481 	/*
2482 	 * Check if have incremented the SACK count for all unacked TSNs in
2483 	 * sctp_got_sack(), if so we are done.
2484 	 */
2485 	if (SEQ_LEQ(gstart, fr_xtsn))
2486 		return (acked);
2487 
2488 	ump = umphead;
2489 	mp = mphead;
2490 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2491 	xtsn = ntohl(sdc->sdh_tsn);
2492 	while (SEQ_LT(xtsn, gstart)) {
2493 		/*
2494 		 * We have incremented SACK count for TSNs less than fr_tsn
2495 		 * in sctp_got_sack(), so don't increment them again here.
2496 		 */
2497 		if (SEQ_GT(xtsn, fr_xtsn) && !SCTP_CHUNK_ISACKED(mp)) {
2498 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
2499 			if (SCTP_CHUNK_SACKCNT(mp) ==
2500 			    sctps->sctps_fast_rxt_thresh) {
2501 				SCTP_CHUNK_REXMIT(mp);
2502 				sctp->sctp_chk_fast_rexmit = B_TRUE;
2503 				*trysend = 1;
2504 				if (!*fast_recovery) {
2505 					/*
2506 					 * Entering fast recovery.
2507 					 */
2508 					fp = SCTP_CHUNK_DEST(mp);
2509 					fp->ssthresh = fp->cwnd / 2;
2510 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
2511 						fp->ssthresh =
2512 						    2 * fp->sfa_pmss;
2513 					}
2514 					fp->cwnd = fp->ssthresh;
2515 					fp->pba = 0;
2516 					sctp->sctp_recovery_tsn =
2517 					    sctp->sctp_ltsn - 1;
2518 					*fast_recovery = B_TRUE;
2519 				}
2520 			}
2521 		}
2522 		mp = mp->b_next;
2523 		if (mp == NULL) {
2524 			ump = ump->b_next;
2525 			/* We can't get to the end of the transmit list here */
2526 			ASSERT(ump != NULL);
2527 			mp = ump->b_cont;
2528 		}
2529 		/* We can't hit an unsent chunk here */
2530 		ASSERT(SCTP_CHUNK_ISSENT(mp));
2531 		sdc = (sctp_data_hdr_t *)mp->b_rptr;
2532 		xtsn = ntohl(sdc->sdh_tsn);
2533 	}
2534 	return (acked);
2535 }
2536 
2537 static int
2538 sctp_got_sack(sctp_t *sctp, sctp_chunk_hdr_t *sch)
2539 {
2540 	sctp_sack_chunk_t	*sc;
2541 	sctp_data_hdr_t		*sdc;
2542 	sctp_sack_frag_t	*ssf;
2543 	mblk_t			*ump;
2544 	mblk_t			*mp;
2545 	mblk_t			*mp1;
2546 	uint32_t		cumtsn;
2547 	uint32_t		xtsn;
2548 	uint32_t		gapstart = 0;
2549 	uint32_t		gapend = 0;
2550 	uint32_t		acked = 0;
2551 	uint16_t		chunklen;
2552 	sctp_faddr_t		*fp;
2553 	int			num_gaps;
2554 	int			trysend = 0;
2555 	int			i;
2556 	boolean_t		fast_recovery = B_FALSE;
2557 	boolean_t		cumack_forward = B_FALSE;
2558 	boolean_t		fwd_tsn = B_FALSE;
2559 	sctp_stack_t		*sctps = sctp->sctp_sctps;
2560 
2561 	BUMP_LOCAL(sctp->sctp_ibchunks);
2562 	BUMP_LOCAL(sctp->sctp_isacks);
2563 	chunklen = ntohs(sch->sch_len);
2564 	if (chunklen < (sizeof (*sch) + sizeof (*sc)))
2565 		return (0);
2566 
2567 	sc = (sctp_sack_chunk_t *)(sch + 1);
2568 	cumtsn = ntohl(sc->ssc_cumtsn);
2569 
2570 	dprint(2, ("got sack cumtsn %x -> %x\n", sctp->sctp_lastack_rxd,
2571 	    cumtsn));
2572 
2573 	/* out of order */
2574 	if (SEQ_LT(cumtsn, sctp->sctp_lastack_rxd))
2575 		return (0);
2576 
2577 	if (SEQ_GT(cumtsn, sctp->sctp_ltsn - 1)) {
2578 		BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2579 		/* Send an ABORT */
2580 		return (-1);
2581 	}
2582 
2583 	/*
2584 	 * Cwnd only done when not in fast recovery mode.
2585 	 */
2586 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn))
2587 		fast_recovery = B_TRUE;
2588 
2589 	/*
2590 	 * .. and if the cum TSN is not moving ahead on account Forward TSN
2591 	 */
2592 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_adv_pap))
2593 		fwd_tsn = B_TRUE;
2594 
2595 	if (cumtsn == sctp->sctp_lastack_rxd &&
2596 	    (sctp->sctp_xmit_unacked == NULL ||
2597 	    !SCTP_CHUNK_ABANDONED(sctp->sctp_xmit_unacked))) {
2598 		if (sctp->sctp_xmit_unacked != NULL)
2599 			mp = sctp->sctp_xmit_unacked;
2600 		else if (sctp->sctp_xmit_head != NULL)
2601 			mp = sctp->sctp_xmit_head->b_cont;
2602 		else
2603 			mp = NULL;
2604 		BUMP_MIB(&sctps->sctps_mib, sctpInDupAck);
2605 		/*
2606 		 * If we were doing a zero win probe and the win
2607 		 * has now opened to at least MSS, re-transmit the
2608 		 * zero win probe via sctp_rexmit_packet().
2609 		 */
2610 		if (mp != NULL && sctp->sctp_zero_win_probe &&
2611 		    ntohl(sc->ssc_a_rwnd) >= sctp->sctp_current->sfa_pmss) {
2612 			mblk_t	*pkt;
2613 			uint_t	pkt_len;
2614 			mblk_t	*mp1 = mp;
2615 			mblk_t	*meta = sctp->sctp_xmit_head;
2616 
2617 			/*
2618 			 * Reset the RTO since we have been backing-off
2619 			 * to send the ZWP.
2620 			 */
2621 			fp = sctp->sctp_current;
2622 			fp->rto = fp->srtt + 4 * fp->rttvar;
2623 			SCTP_MAX_RTO(sctp, fp);
2624 			/* Resend the ZWP */
2625 			pkt = sctp_rexmit_packet(sctp, &meta, &mp1, fp,
2626 			    &pkt_len);
2627 			if (pkt == NULL) {
2628 				SCTP_KSTAT(sctps, sctp_ss_rexmit_failed);
2629 				return (0);
2630 			}
2631 			ASSERT(pkt_len <= fp->sfa_pmss);
2632 			sctp->sctp_zero_win_probe = B_FALSE;
2633 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2634 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2635 			sctp_set_iplen(sctp, pkt, fp->ixa);
2636 			(void) conn_ip_output(pkt, fp->ixa);
2637 			BUMP_LOCAL(sctp->sctp_opkts);
2638 		}
2639 	} else {
2640 		if (sctp->sctp_zero_win_probe) {
2641 			/*
2642 			 * Reset the RTO since we have been backing-off
2643 			 * to send the ZWP.
2644 			 */
2645 			fp = sctp->sctp_current;
2646 			fp->rto = fp->srtt + 4 * fp->rttvar;
2647 			SCTP_MAX_RTO(sctp, fp);
2648 			sctp->sctp_zero_win_probe = B_FALSE;
2649 			/* This is probably not required */
2650 			if (!sctp->sctp_rexmitting) {
2651 				sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2652 				sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2653 			}
2654 		}
2655 		acked = sctp_cumack(sctp, cumtsn, &mp);
2656 		sctp->sctp_xmit_unacked = mp;
2657 		if (acked > 0) {
2658 			trysend = 1;
2659 			cumack_forward = B_TRUE;
2660 			if (fwd_tsn && SEQ_GEQ(sctp->sctp_lastack_rxd,
2661 			    sctp->sctp_adv_pap)) {
2662 				cumack_forward = B_FALSE;
2663 			}
2664 		}
2665 	}
2666 	num_gaps = ntohs(sc->ssc_numfrags);
2667 	UPDATE_LOCAL(sctp->sctp_gapcnt, num_gaps);
2668 	if (num_gaps == 0 || mp == NULL || !SCTP_CHUNK_ISSENT(mp) ||
2669 	    chunklen < (sizeof (*sch) + sizeof (*sc) +
2670 	    num_gaps * sizeof (*ssf))) {
2671 		goto ret;
2672 	}
2673 #ifdef	DEBUG
2674 	/*
2675 	 * Since we delete any message that has been acked completely,
2676 	 * the unacked chunk must belong to sctp_xmit_head (as
2677 	 * we don't have a back pointer from the mp to the meta data
2678 	 * we do this).
2679 	 */
2680 	{
2681 		mblk_t	*mp2 = sctp->sctp_xmit_head->b_cont;
2682 
2683 		while (mp2 != NULL) {
2684 			if (mp2 == mp)
2685 				break;
2686 			mp2 = mp2->b_next;
2687 		}
2688 		ASSERT(mp2 != NULL);
2689 	}
2690 #endif
2691 	ump = sctp->sctp_xmit_head;
2692 
2693 	/*
2694 	 * Just remember where we started from, in case we need to call
2695 	 * sctp_process_uo_gaps() if the gap blocks are unordered.
2696 	 */
2697 	mp1 = mp;
2698 
2699 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2700 	xtsn = ntohl(sdc->sdh_tsn);
2701 	ASSERT(xtsn == cumtsn + 1);
2702 
2703 	/*
2704 	 * Go through SACK gaps. They are ordered based on start TSN.
2705 	 */
2706 	ssf = (sctp_sack_frag_t *)(sc + 1);
2707 	for (i = 0; i < num_gaps; i++, ssf++) {
2708 		if (gapstart != 0) {
2709 			/* check for unordered gap */
2710 			if (SEQ_LEQ(cumtsn + ntohs(ssf->ssf_start), gapstart)) {
2711 				acked += sctp_process_uo_gaps(sctp,
2712 				    cumtsn, ssf, num_gaps - i,
2713 				    sctp->sctp_xmit_head, mp1,
2714 				    &trysend, &fast_recovery, gapstart);
2715 				if (trysend < 0) {
2716 					BUMP_MIB(&sctps->sctps_mib,
2717 					    sctpInAckUnsent);
2718 					return (-1);
2719 				}
2720 				break;
2721 			}
2722 		}
2723 		gapstart = cumtsn + ntohs(ssf->ssf_start);
2724 		gapend = cumtsn + ntohs(ssf->ssf_end);
2725 
2726 		/*
2727 		 * Sanity checks:
2728 		 *
2729 		 * 1. SACK for TSN we have not sent - ABORT
2730 		 * 2. Invalid or spurious gaps, ignore all gaps
2731 		 */
2732 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2733 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2734 			BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2735 			return (-1);
2736 		} else if (SEQ_LT(gapend, gapstart) ||
2737 		    SEQ_LEQ(gapstart, cumtsn)) {
2738 			break;
2739 		}
2740 		/*
2741 		 * Let's start at the current TSN (for the 1st gap we start
2742 		 * from the cumulative TSN, for subsequent ones we start from
2743 		 * where the previous gapend was found - second while loop
2744 		 * below) and walk the transmit list till we find the TSN
2745 		 * corresponding to gapstart. All the unacked chunks till we
2746 		 * get to the chunk with TSN == gapstart will have their
2747 		 * SACKCNT incremented by 1. Note since the gap blocks are
2748 		 * ordered, we won't be incrementing the SACKCNT for an
2749 		 * unacked chunk by more than one while processing the gap
2750 		 * blocks. If the SACKCNT for any unacked chunk exceeds
2751 		 * the fast retransmit threshold, we will fast retransmit
2752 		 * after processing all the gap blocks.
2753 		 */
2754 		ASSERT(SEQ_LEQ(xtsn, gapstart));
2755 		while (xtsn != gapstart) {
2756 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
2757 			if (SCTP_CHUNK_SACKCNT(mp) ==
2758 			    sctps->sctps_fast_rxt_thresh) {
2759 				SCTP_CHUNK_REXMIT(mp);
2760 				sctp->sctp_chk_fast_rexmit = B_TRUE;
2761 				trysend = 1;
2762 				if (!fast_recovery) {
2763 					/*
2764 					 * Entering fast recovery.
2765 					 */
2766 					fp = SCTP_CHUNK_DEST(mp);
2767 					fp->ssthresh = fp->cwnd / 2;
2768 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
2769 						fp->ssthresh =
2770 						    2 * fp->sfa_pmss;
2771 					}
2772 					fp->cwnd = fp->ssthresh;
2773 					fp->pba = 0;
2774 					sctp->sctp_recovery_tsn =
2775 					    sctp->sctp_ltsn - 1;
2776 					fast_recovery = B_TRUE;
2777 				}
2778 			}
2779 
2780 			/*
2781 			 * Peer may have reneged on this chunk, so un-sack
2782 			 * it now. If the peer did renege, we need to
2783 			 * readjust unacked.
2784 			 */
2785 			if (SCTP_CHUNK_ISACKED(mp)) {
2786 				chunklen = ntohs(sdc->sdh_len);
2787 				fp = SCTP_CHUNK_DEST(mp);
2788 				fp->suna += chunklen;
2789 				sctp->sctp_unacked += chunklen - sizeof (*sdc);
2790 				SCTP_CHUNK_CLEAR_ACKED(mp);
2791 				if (!fp->timer_running) {
2792 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
2793 					    fp->rto);
2794 				}
2795 			}
2796 
2797 			mp = mp->b_next;
2798 			if (mp == NULL) {
2799 				ump = ump->b_next;
2800 				/*
2801 				 * ump can't be NULL given the sanity check
2802 				 * above.  But if it is NULL, it means that
2803 				 * there is a data corruption.  We'd better
2804 				 * panic.
2805 				 */
2806 				if (ump == NULL) {
2807 					panic("Memory corruption detected: gap "
2808 					    "start TSN 0x%x missing from the "
2809 					    "xmit list: %p", gapstart,
2810 					    (void *)sctp);
2811 				}
2812 				mp = ump->b_cont;
2813 			}
2814 			/*
2815 			 * mp can't be unsent given the sanity check above.
2816 			 */
2817 			ASSERT(SCTP_CHUNK_ISSENT(mp));
2818 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2819 			xtsn = ntohl(sdc->sdh_tsn);
2820 		}
2821 		/*
2822 		 * Now that we have found the chunk with TSN == 'gapstart',
2823 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2824 		 * All intermediate chunks will be marked ACKED, if they
2825 		 * haven't already been.
2826 		 */
2827 		while (SEQ_LEQ(xtsn, gapend)) {
2828 			/*
2829 			 * SACKed
2830 			 */
2831 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
2832 			if (!SCTP_CHUNK_ISACKED(mp)) {
2833 				SCTP_CHUNK_ACKED(mp);
2834 
2835 				fp = SCTP_CHUNK_DEST(mp);
2836 				chunklen = ntohs(sdc->sdh_len);
2837 				ASSERT(fp->suna >= chunklen);
2838 				fp->suna -= chunklen;
2839 				if (fp->suna == 0) {
2840 					/* All outstanding data acked. */
2841 					fp->pba = 0;
2842 					SCTP_FADDR_TIMER_STOP(fp);
2843 				}
2844 				fp->acked += chunklen;
2845 				acked += chunklen;
2846 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
2847 				ASSERT(sctp->sctp_unacked >= 0);
2848 			}
2849 			/* Go to the next chunk of the current message */
2850 			mp = mp->b_next;
2851 			/*
2852 			 * Move to the next message in the transmit list
2853 			 * if we are done with all the chunks from the current
2854 			 * message. Note, it is possible to hit the end of the
2855 			 * transmit list here, i.e. if we have already completed
2856 			 * processing the gap block.  But the TSN must be equal
2857 			 * to the gapend because of the above sanity check.
2858 			 * If it is not equal, it means that some data is
2859 			 * missing.
2860 			 * Also, note that we break here, which means we
2861 			 * continue processing gap blocks, if any. In case of
2862 			 * ordered gap blocks there can't be any following
2863 			 * this (if there is it will fail the sanity check
2864 			 * above). In case of un-ordered gap blocks we will
2865 			 * switch to sctp_process_uo_gaps().  In either case
2866 			 * it should be fine to continue with NULL ump/mp,
2867 			 * but we just reset it to xmit_head.
2868 			 */
2869 			if (mp == NULL) {
2870 				ump = ump->b_next;
2871 				if (ump == NULL) {
2872 					if (xtsn != gapend) {
2873 						panic("Memory corruption "
2874 						    "detected: gap end TSN "
2875 						    "0x%x missing from the "
2876 						    "xmit list: %p", gapend,
2877 						    (void *)sctp);
2878 					}
2879 					ump = sctp->sctp_xmit_head;
2880 					mp = mp1;
2881 					sdc = (sctp_data_hdr_t *)mp->b_rptr;
2882 					xtsn = ntohl(sdc->sdh_tsn);
2883 					break;
2884 				}
2885 				mp = ump->b_cont;
2886 			}
2887 			/*
2888 			 * Likewise, we could hit an unsent chunk once we have
2889 			 * completed processing the gap block. Again, it is
2890 			 * fine to continue processing gap blocks with mp
2891 			 * pointing to the unsent chunk, because if there
2892 			 * are more ordered gap blocks, they will fail the
2893 			 * sanity check, and if there are un-ordered gap blocks,
2894 			 * we will continue processing in sctp_process_uo_gaps()
2895 			 * We just reset the mp to the one we started with.
2896 			 */
2897 			if (!SCTP_CHUNK_ISSENT(mp)) {
2898 				ASSERT(xtsn == gapend);
2899 				ump = sctp->sctp_xmit_head;
2900 				mp = mp1;
2901 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2902 				xtsn = ntohl(sdc->sdh_tsn);
2903 				break;
2904 			}
2905 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2906 			xtsn = ntohl(sdc->sdh_tsn);
2907 		}
2908 	}
2909 	if (sctp->sctp_prsctp_aware)
2910 		sctp_check_abandoned_data(sctp, sctp->sctp_current);
2911 	if (sctp->sctp_chk_fast_rexmit)
2912 		sctp_fast_rexmit(sctp);
2913 ret:
2914 	trysend += sctp_set_frwnd(sctp, ntohl(sc->ssc_a_rwnd));
2915 
2916 	/*
2917 	 * If receive window is closed while there is unsent data,
2918 	 * set a timer for doing zero window probes.
2919 	 */
2920 	if (sctp->sctp_frwnd == 0 && sctp->sctp_unacked == 0 &&
2921 	    sctp->sctp_unsent != 0) {
2922 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
2923 		    sctp->sctp_current->rto);
2924 	}
2925 
2926 	/*
2927 	 * Set cwnd for all destinations.
2928 	 * Congestion window gets increased only when cumulative
2929 	 * TSN moves forward, we're not in fast recovery, and
2930 	 * cwnd has been fully utilized (almost fully, need to allow
2931 	 * some leeway due to non-MSS sized messages).
2932 	 */
2933 	if (sctp->sctp_current->acked == acked) {
2934 		/*
2935 		 * Fast-path, only data sent to sctp_current got acked.
2936 		 */
2937 		fp = sctp->sctp_current;
2938 		if (cumack_forward && !fast_recovery &&
2939 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
2940 			if (fp->cwnd < fp->ssthresh) {
2941 				/*
2942 				 * Slow start
2943 				 */
2944 				if (fp->acked > fp->sfa_pmss) {
2945 					fp->cwnd += fp->sfa_pmss;
2946 				} else {
2947 					fp->cwnd += fp->acked;
2948 				}
2949 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
2950 			} else {
2951 				/*
2952 				 * Congestion avoidance
2953 				 */
2954 				fp->pba += fp->acked;
2955 				if (fp->pba >= fp->cwnd) {
2956 					fp->pba -= fp->cwnd;
2957 					fp->cwnd += fp->sfa_pmss;
2958 					fp->cwnd = MIN(fp->cwnd,
2959 					    sctp->sctp_cwnd_max);
2960 				}
2961 			}
2962 		}
2963 		/*
2964 		 * Limit the burst of transmitted data segments.
2965 		 */
2966 		if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss <
2967 		    fp->cwnd) {
2968 			fp->cwnd = fp->suna + sctps->sctps_maxburst *
2969 			    fp->sfa_pmss;
2970 		}
2971 		fp->acked = 0;
2972 		goto check_ss_rxmit;
2973 	}
2974 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
2975 		if (cumack_forward && fp->acked && !fast_recovery &&
2976 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
2977 			if (fp->cwnd < fp->ssthresh) {
2978 				if (fp->acked > fp->sfa_pmss) {
2979 					fp->cwnd += fp->sfa_pmss;
2980 				} else {
2981 					fp->cwnd += fp->acked;
2982 				}
2983 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
2984 			} else {
2985 				fp->pba += fp->acked;
2986 				if (fp->pba >= fp->cwnd) {
2987 					fp->pba -= fp->cwnd;
2988 					fp->cwnd += fp->sfa_pmss;
2989 					fp->cwnd = MIN(fp->cwnd,
2990 					    sctp->sctp_cwnd_max);
2991 				}
2992 			}
2993 		}
2994 		if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss <
2995 		    fp->cwnd) {
2996 			fp->cwnd = fp->suna + sctps->sctps_maxburst *
2997 			    fp->sfa_pmss;
2998 		}
2999 		fp->acked = 0;
3000 	}
3001 	fp = sctp->sctp_current;
3002 check_ss_rxmit:
3003 	/*
3004 	 * If this is a SACK following a timeout, check if there are
3005 	 * still unacked chunks (sent before the timeout) that we can
3006 	 * send.
3007 	 */
3008 	if (sctp->sctp_rexmitting) {
3009 		if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_rxt_maxtsn)) {
3010 			/*
3011 			 * As we are in retransmission phase, we may get a
3012 			 * SACK which indicates some new chunks are received
3013 			 * but cum_tsn does not advance.  During this
3014 			 * phase, the other side advances cum_tsn only because
3015 			 * it receives our retransmitted chunks.  Only
3016 			 * this signals that some chunks are still
3017 			 * missing.
3018 			 */
3019 			if (cumack_forward) {
3020 				fp->rxt_unacked -= acked;
3021 				sctp_ss_rexmit(sctp);
3022 			}
3023 		} else {
3024 			sctp->sctp_rexmitting = B_FALSE;
3025 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
3026 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
3027 			fp->rxt_unacked = 0;
3028 		}
3029 	}
3030 	return (trysend);
3031 }
3032 
3033 /*
3034  * Returns 0 if the caller should stop processing any more chunks,
3035  * 1 if the caller should skip this chunk and continue processing.
3036  */
3037 static int
3038 sctp_strange_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
3039 {
3040 	size_t len;
3041 
3042 	BUMP_LOCAL(sctp->sctp_ibchunks);
3043 	/* check top two bits for action required */
3044 	if (ch->sch_id & 0x40) {	/* also matches 0xc0 */
3045 		len = ntohs(ch->sch_len);
3046 		sctp_add_err(sctp, SCTP_ERR_UNREC_CHUNK, ch, len, fp);
3047 
3048 		if ((ch->sch_id & 0xc0) == 0xc0) {
3049 			/* skip and continue */
3050 			return (1);
3051 		} else {
3052 			/* stop processing */
3053 			return (0);
3054 		}
3055 	}
3056 	if (ch->sch_id & 0x80) {
3057 		/* skip and continue, no error */
3058 		return (1);
3059 	}
3060 	/* top two bits are clear; stop processing and no error */
3061 	return (0);
3062 }
3063 
3064 /*
3065  * Basic sanity checks on all input chunks and parameters: they must
3066  * be of legitimate size for their purported type, and must follow
3067  * ordering conventions as defined in rfc2960.
3068  *
3069  * Returns 1 if the chunk and all encloded params are legitimate,
3070  * 0 otherwise.
3071  */
3072 /*ARGSUSED*/
3073 static int
3074 sctp_check_input(sctp_t *sctp, sctp_chunk_hdr_t *ch, ssize_t len, int first)
3075 {
3076 	sctp_parm_hdr_t	*ph;
3077 	void		*p = NULL;
3078 	ssize_t		clen;
3079 	uint16_t	ch_len;
3080 
3081 	ch_len = ntohs(ch->sch_len);
3082 	if (ch_len > len) {
3083 		return (0);
3084 	}
3085 
3086 	switch (ch->sch_id) {
3087 	case CHUNK_DATA:
3088 		if (ch_len < sizeof (sctp_data_hdr_t)) {
3089 			return (0);
3090 		}
3091 		return (1);
3092 	case CHUNK_INIT:
3093 	case CHUNK_INIT_ACK:
3094 		{
3095 			ssize_t	remlen = len;
3096 
3097 			/*
3098 			 * INIT and INIT-ACK chunks must not be bundled with
3099 			 * any other.
3100 			 */
3101 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
3102 			    (ch_len < (sizeof (*ch) +
3103 			    sizeof (sctp_init_chunk_t)))) {
3104 				return (0);
3105 			}
3106 			/* may have params that need checking */
3107 			p = (char *)(ch + 1) + sizeof (sctp_init_chunk_t);
3108 			clen = ch_len - (sizeof (*ch) +
3109 			    sizeof (sctp_init_chunk_t));
3110 		}
3111 		break;
3112 	case CHUNK_SACK:
3113 		if (ch_len < (sizeof (*ch) + sizeof (sctp_sack_chunk_t))) {
3114 			return (0);
3115 		}
3116 		/* dup and gap reports checked by got_sack() */
3117 		return (1);
3118 	case CHUNK_SHUTDOWN:
3119 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) {
3120 			return (0);
3121 		}
3122 		return (1);
3123 	case CHUNK_ABORT:
3124 	case CHUNK_ERROR:
3125 		if (ch_len < sizeof (*ch)) {
3126 			return (0);
3127 		}
3128 		/* may have params that need checking */
3129 		p = ch + 1;
3130 		clen = ch_len - sizeof (*ch);
3131 		break;
3132 	case CHUNK_ECNE:
3133 	case CHUNK_CWR:
3134 	case CHUNK_HEARTBEAT:
3135 	case CHUNK_HEARTBEAT_ACK:
3136 	/* Full ASCONF chunk and parameter checks are in asconf.c */
3137 	case CHUNK_ASCONF:
3138 	case CHUNK_ASCONF_ACK:
3139 		if (ch_len < sizeof (*ch)) {
3140 			return (0);
3141 		}
3142 		/* heartbeat data checked by process_heartbeat() */
3143 		return (1);
3144 	case CHUNK_SHUTDOWN_COMPLETE:
3145 		{
3146 			ssize_t remlen = len;
3147 
3148 			/*
3149 			 * SHUTDOWN-COMPLETE chunk must not be bundled with any
3150 			 * other
3151 			 */
3152 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
3153 			    ch_len < sizeof (*ch)) {
3154 				return (0);
3155 			}
3156 		}
3157 		return (1);
3158 	case CHUNK_COOKIE:
3159 	case CHUNK_COOKIE_ACK:
3160 	case CHUNK_SHUTDOWN_ACK:
3161 		if (ch_len < sizeof (*ch) || !first) {
3162 			return (0);
3163 		}
3164 		return (1);
3165 	case CHUNK_FORWARD_TSN:
3166 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t)))
3167 			return (0);
3168 		return (1);
3169 	default:
3170 		return (1);	/* handled by strange_chunk() */
3171 	}
3172 
3173 	/* check and byteorder parameters */
3174 	if (clen <= 0) {
3175 		return (1);
3176 	}
3177 	ASSERT(p != NULL);
3178 
3179 	ph = p;
3180 	while (ph != NULL && clen > 0) {
3181 		ch_len = ntohs(ph->sph_len);
3182 		if (ch_len > len || ch_len < sizeof (*ph)) {
3183 			return (0);
3184 		}
3185 		ph = sctp_next_parm(ph, &clen);
3186 	}
3187 
3188 	/* All OK */
3189 	return (1);
3190 }
3191 
3192 static mblk_t *
3193 sctp_check_in_policy(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
3194 {
3195 	boolean_t policy_present;
3196 	ipha_t *ipha;
3197 	ip6_t *ip6h;
3198 	netstack_t	*ns = ipst->ips_netstack;
3199 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3200 
3201 	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
3202 		policy_present = ipss->ipsec_inbound_v4_policy_present;
3203 		ipha = (ipha_t *)mp->b_rptr;
3204 		ip6h = NULL;
3205 	} else {
3206 		policy_present = ipss->ipsec_inbound_v6_policy_present;
3207 		ipha = NULL;
3208 		ip6h = (ip6_t *)mp->b_rptr;
3209 	}
3210 
3211 	if (policy_present) {
3212 		/*
3213 		 * The conn_t parameter is NULL because we already know
3214 		 * nobody's home.
3215 		 */
3216 		mp = ipsec_check_global_policy(mp, (conn_t *)NULL,
3217 		    ipha, ip6h, ira, ns);
3218 		if (mp == NULL)
3219 			return (NULL);
3220 	}
3221 	return (mp);
3222 }
3223 
3224 /* Handle out-of-the-blue packets */
3225 void
3226 sctp_ootb_input(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
3227 {
3228 	sctp_t			*sctp;
3229 	sctp_chunk_hdr_t	*ch;
3230 	sctp_hdr_t		*sctph;
3231 	in6_addr_t		src, dst;
3232 	uint_t			ip_hdr_len = ira->ira_ip_hdr_length;
3233 	ssize_t			mlen;
3234 	sctp_stack_t		*sctps;
3235 	boolean_t		secure;
3236 	zoneid_t		zoneid = ira->ira_zoneid;
3237 	uchar_t			*rptr;
3238 
3239 	ASSERT(ira->ira_ill == NULL);
3240 
3241 	secure = ira->ira_flags & IRAF_IPSEC_SECURE;
3242 
3243 	sctps = ipst->ips_netstack->netstack_sctp;
3244 
3245 	BUMP_MIB(&sctps->sctps_mib, sctpOutOfBlue);
3246 	BUMP_MIB(&sctps->sctps_mib, sctpInSCTPPkts);
3247 
3248 	if (mp->b_cont != NULL) {
3249 		/*
3250 		 * All subsequent code is vastly simplified if it can
3251 		 * assume a single contiguous chunk of data.
3252 		 */
3253 		if (pullupmsg(mp, -1) == 0) {
3254 			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3255 			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3256 			freemsg(mp);
3257 			return;
3258 		}
3259 	}
3260 
3261 	rptr = mp->b_rptr;
3262 	sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]);
3263 	if (ira->ira_flags & IRAF_IS_IPV4) {
3264 		ipha_t *ipha;
3265 
3266 		ipha = (ipha_t *)rptr;
3267 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src);
3268 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst);
3269 	} else {
3270 		ip6_t *ip6h;
3271 
3272 		ip6h = (ip6_t *)rptr;
3273 		src = ip6h->ip6_src;
3274 		dst = ip6h->ip6_dst;
3275 	}
3276 
3277 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
3278 	if ((ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen)) == NULL) {
3279 		dprint(3, ("sctp_ootb_input: invalid packet\n"));
3280 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3281 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3282 		freemsg(mp);
3283 		return;
3284 	}
3285 
3286 	switch (ch->sch_id) {
3287 	case CHUNK_INIT:
3288 		/* no listener; send abort  */
3289 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3290 			return;
3291 		sctp_ootb_send_abort(sctp_init2vtag(ch), 0,
3292 		    NULL, 0, mp, 0, B_TRUE, ira, ipst);
3293 		break;
3294 	case CHUNK_INIT_ACK:
3295 		/* check for changed src addr */
3296 		sctp = sctp_addrlist2sctp(mp, sctph, ch, zoneid, sctps);
3297 		if (sctp != NULL) {
3298 			/* success; proceed to normal path */
3299 			mutex_enter(&sctp->sctp_lock);
3300 			if (sctp->sctp_running) {
3301 				sctp_add_recvq(sctp, mp, B_FALSE, ira);
3302 				mutex_exit(&sctp->sctp_lock);
3303 			} else {
3304 				/*
3305 				 * If the source address is changed, we
3306 				 * don't need to worry too much about
3307 				 * out of order processing.  So we don't
3308 				 * check if the recvq is empty or not here.
3309 				 */
3310 				sctp->sctp_running = B_TRUE;
3311 				mutex_exit(&sctp->sctp_lock);
3312 				sctp_input_data(sctp, mp, ira);
3313 				WAKE_SCTP(sctp);
3314 			}
3315 			SCTP_REFRELE(sctp);
3316 			return;
3317 		}
3318 		/* else bogus init ack; drop it */
3319 		break;
3320 	case CHUNK_SHUTDOWN_ACK:
3321 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3322 			return;
3323 		sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst);
3324 		return;
3325 	case CHUNK_ERROR:
3326 	case CHUNK_ABORT:
3327 	case CHUNK_COOKIE_ACK:
3328 	case CHUNK_SHUTDOWN_COMPLETE:
3329 		break;
3330 	default:
3331 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3332 			return;
3333 		sctp_ootb_send_abort(sctph->sh_verf, 0,
3334 		    NULL, 0, mp, 0, B_TRUE, ira, ipst);
3335 		break;
3336 	}
3337 	freemsg(mp);
3338 }
3339 
3340 /*
3341  * Handle sctp packets.
3342  * Note that we rele the sctp_t (the caller got a reference on it).
3343  */
3344 void
3345 sctp_input(conn_t *connp, ipha_t *ipha, ip6_t *ip6h, mblk_t *mp,
3346     ip_recv_attr_t *ira)
3347 {
3348 	sctp_t		*sctp = CONN2SCTP(connp);
3349 	boolean_t	secure;
3350 	ill_t		*ill = ira->ira_ill;
3351 	ip_stack_t	*ipst = ill->ill_ipst;
3352 	ipsec_stack_t	*ipss = ipst->ips_netstack->netstack_ipsec;
3353 	iaflags_t	iraflags = ira->ira_flags;
3354 	ill_t		*rill = ira->ira_rill;
3355 
3356 	secure = iraflags & IRAF_IPSEC_SECURE;
3357 
3358 	/*
3359 	 * We check some fields in conn_t without holding a lock.
3360 	 * This should be fine.
3361 	 */
3362 	if (((iraflags & IRAF_IS_IPV4) ?
3363 	    CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
3364 	    CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
3365 	    secure) {
3366 		mp = ipsec_check_inbound_policy(mp, connp, ipha,
3367 		    ip6h, ira);
3368 		if (mp == NULL) {
3369 			BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
3370 			/* Note that mp is NULL */
3371 			ip_drop_input("ipIfStatsInDiscards", mp, ill);
3372 			SCTP_REFRELE(sctp);
3373 			return;
3374 		}
3375 	}
3376 
3377 	ira->ira_ill = ira->ira_rill = NULL;
3378 
3379 	mutex_enter(&sctp->sctp_lock);
3380 	if (sctp->sctp_running) {
3381 		sctp_add_recvq(sctp, mp, B_FALSE, ira);
3382 		mutex_exit(&sctp->sctp_lock);
3383 		goto done;
3384 	} else {
3385 		sctp->sctp_running = B_TRUE;
3386 		mutex_exit(&sctp->sctp_lock);
3387 
3388 		mutex_enter(&sctp->sctp_recvq_lock);
3389 		if (sctp->sctp_recvq != NULL) {
3390 			sctp_add_recvq(sctp, mp, B_TRUE, ira);
3391 			mutex_exit(&sctp->sctp_recvq_lock);
3392 			WAKE_SCTP(sctp);
3393 			goto done;
3394 		}
3395 	}
3396 	mutex_exit(&sctp->sctp_recvq_lock);
3397 	if (ira->ira_flags & IRAF_ICMP_ERROR)
3398 		sctp_icmp_error(sctp, mp);
3399 	else
3400 		sctp_input_data(sctp, mp, ira);
3401 	WAKE_SCTP(sctp);
3402 
3403 done:
3404 	SCTP_REFRELE(sctp);
3405 	ira->ira_ill = ill;
3406 	ira->ira_rill = rill;
3407 }
3408 
3409 static void
3410 sctp_process_abort(sctp_t *sctp, sctp_chunk_hdr_t *ch, int err)
3411 {
3412 	sctp_stack_t	*sctps = sctp->sctp_sctps;
3413 
3414 	BUMP_MIB(&sctps->sctps_mib, sctpAborted);
3415 	BUMP_LOCAL(sctp->sctp_ibchunks);
3416 
3417 	sctp_assoc_event(sctp, SCTP_COMM_LOST,
3418 	    ntohs(((sctp_parm_hdr_t *)(ch + 1))->sph_type), ch);
3419 	sctp_clean_death(sctp, err);
3420 }
3421 
3422 void
3423 sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
3424 {
3425 	sctp_chunk_hdr_t	*ch;
3426 	ssize_t			mlen;
3427 	int			gotdata;
3428 	int			trysend;
3429 	sctp_faddr_t		*fp;
3430 	sctp_init_chunk_t	*iack;
3431 	uint32_t		tsn;
3432 	sctp_data_hdr_t		*sdc;
3433 	ip_pkt_t		ipp;
3434 	in6_addr_t		src;
3435 	in6_addr_t		dst;
3436 	uint_t			ifindex;
3437 	sctp_hdr_t		*sctph;
3438 	uint_t			ip_hdr_len = ira->ira_ip_hdr_length;
3439 	mblk_t			*dups = NULL;
3440 	int			recv_adaptation;
3441 	boolean_t		wake_eager = B_FALSE;
3442 	in6_addr_t		peer_src;
3443 	int64_t			now;
3444 	sctp_stack_t		*sctps = sctp->sctp_sctps;
3445 	ip_stack_t		*ipst = sctps->sctps_netstack->netstack_ip;
3446 	boolean_t		hb_already = B_FALSE;
3447 	cred_t			*cr;
3448 	pid_t			cpid;
3449 	uchar_t			*rptr;
3450 	conn_t			*connp = sctp->sctp_connp;
3451 
3452 	ASSERT(DB_TYPE(mp) == M_DATA);
3453 	ASSERT(ira->ira_ill == NULL);
3454 
3455 	if (mp->b_cont != NULL) {
3456 		/*
3457 		 * All subsequent code is vastly simplified if it can
3458 		 * assume a single contiguous chunk of data.
3459 		 */
3460 		if (pullupmsg(mp, -1) == 0) {
3461 			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3462 			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3463 			freemsg(mp);
3464 			return;
3465 		}
3466 	}
3467 
3468 	BUMP_LOCAL(sctp->sctp_ipkts);
3469 	ifindex = ira->ira_ruifindex;
3470 
3471 	rptr = mp->b_rptr;
3472 
3473 	ipp.ipp_fields = 0;
3474 	if (connp->conn_recv_ancillary.crb_all != 0) {
3475 		/*
3476 		 * Record packet information in the ip_pkt_t
3477 		 */
3478 		if (ira->ira_flags & IRAF_IS_IPV4) {
3479 			(void) ip_find_hdr_v4((ipha_t *)rptr, &ipp,
3480 			    B_FALSE);
3481 		} else {
3482 			uint8_t nexthdrp;
3483 
3484 			/*
3485 			 * IPv6 packets can only be received by applications
3486 			 * that are prepared to receive IPv6 addresses.
3487 			 * The IP fanout must ensure this.
3488 			 */
3489 			ASSERT(connp->conn_family == AF_INET6);
3490 
3491 			(void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp,
3492 			    &nexthdrp);
3493 			ASSERT(nexthdrp == IPPROTO_SCTP);
3494 
3495 			/* Could have caused a pullup? */
3496 			rptr = mp->b_rptr;
3497 		}
3498 	}
3499 
3500 	sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]);
3501 
3502 	if (ira->ira_flags & IRAF_IS_IPV4) {
3503 		ipha_t *ipha;
3504 
3505 		ipha = (ipha_t *)rptr;
3506 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src);
3507 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst);
3508 	} else {
3509 		ip6_t *ip6h;
3510 
3511 		ip6h = (ip6_t *)rptr;
3512 		src = ip6h->ip6_src;
3513 		dst = ip6h->ip6_dst;
3514 	}
3515 
3516 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
3517 	ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen);
3518 	if (ch == NULL) {
3519 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3520 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3521 		freemsg(mp);
3522 		return;
3523 	}
3524 
3525 	if (!sctp_check_input(sctp, ch, mlen, 1)) {
3526 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3527 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3528 		goto done;
3529 	}
3530 	/*
3531 	 * Check verfication tag (special handling for INIT,
3532 	 * COOKIE, SHUTDOWN_COMPLETE and SHUTDOWN_ACK chunks).
3533 	 * ABORTs are handled in the chunk processing loop, since
3534 	 * may not appear first. All other checked chunks must
3535 	 * appear first, or will have been dropped by check_input().
3536 	 */
3537 	switch (ch->sch_id) {
3538 	case CHUNK_INIT:
3539 		if (sctph->sh_verf != 0) {
3540 			/* drop it */
3541 			goto done;
3542 		}
3543 		break;
3544 	case CHUNK_SHUTDOWN_COMPLETE:
3545 		if (sctph->sh_verf == sctp->sctp_lvtag)
3546 			break;
3547 		if (sctph->sh_verf == sctp->sctp_fvtag &&
3548 		    SCTP_GET_TBIT(ch)) {
3549 			break;
3550 		}
3551 		/* else drop it */
3552 		goto done;
3553 	case CHUNK_ABORT:
3554 	case CHUNK_COOKIE:
3555 		/* handled below */
3556 		break;
3557 	case CHUNK_SHUTDOWN_ACK:
3558 		if (sctp->sctp_state > SCTPS_BOUND &&
3559 		    sctp->sctp_state < SCTPS_ESTABLISHED) {
3560 			/* treat as OOTB */
3561 			sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst);
3562 			return;
3563 		}
3564 		/* else fallthru */
3565 	default:
3566 		/*
3567 		 * All other packets must have a valid
3568 		 * verification tag, however if this is a
3569 		 * listener, we use a refined version of
3570 		 * out-of-the-blue logic.
3571 		 */
3572 		if (sctph->sh_verf != sctp->sctp_lvtag &&
3573 		    sctp->sctp_state != SCTPS_LISTEN) {
3574 			/* drop it */
3575 			goto done;
3576 		}
3577 		break;
3578 	}
3579 
3580 	/* Have a valid sctp for this packet */
3581 	fp = sctp_lookup_faddr(sctp, &src);
3582 	dprint(2, ("sctp_dispatch_rput: mp=%p fp=%p sctp=%p\n", (void *)mp,
3583 	    (void *)fp, (void *)sctp));
3584 
3585 	gotdata = 0;
3586 	trysend = 0;
3587 
3588 	now = ddi_get_lbolt64();
3589 	/* Process the chunks */
3590 	do {
3591 		dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n",
3592 		    sctp->sctp_state, (int)(ch->sch_id)));
3593 
3594 		if (ch->sch_id == CHUNK_ABORT) {
3595 			if (sctph->sh_verf != sctp->sctp_lvtag &&
3596 			    sctph->sh_verf != sctp->sctp_fvtag) {
3597 				/* drop it */
3598 				goto done;
3599 			}
3600 		}
3601 
3602 		switch (sctp->sctp_state) {
3603 
3604 		case SCTPS_ESTABLISHED:
3605 		case SCTPS_SHUTDOWN_PENDING:
3606 		case SCTPS_SHUTDOWN_SENT:
3607 			switch (ch->sch_id) {
3608 			case CHUNK_DATA:
3609 				/* 0-length data chunks are not allowed */
3610 				if (ntohs(ch->sch_len) == sizeof (*sdc)) {
3611 					sdc = (sctp_data_hdr_t *)ch;
3612 					tsn = sdc->sdh_tsn;
3613 					sctp_send_abort(sctp, sctp->sctp_fvtag,
3614 					    SCTP_ERR_NO_USR_DATA, (char *)&tsn,
3615 					    sizeof (tsn), mp, 0, B_FALSE, ira);
3616 					sctp_assoc_event(sctp, SCTP_COMM_LOST,
3617 					    0, NULL);
3618 					sctp_clean_death(sctp, ECONNABORTED);
3619 					goto done;
3620 				}
3621 
3622 				ASSERT(fp != NULL);
3623 				sctp->sctp_lastdata = fp;
3624 				sctp_data_chunk(sctp, ch, mp, &dups, fp,
3625 				    &ipp, ira);
3626 				gotdata = 1;
3627 				/* Restart shutdown timer if shutting down */
3628 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
3629 					/*
3630 					 * If we have exceeded our max
3631 					 * wait bound for waiting for a
3632 					 * shutdown ack from the peer,
3633 					 * abort the association.
3634 					 */
3635 					if (sctps->sctps_shutack_wait_bound !=
3636 					    0 &&
3637 					    TICK_TO_MSEC(now -
3638 					    sctp->sctp_out_time) >
3639 					    sctps->sctps_shutack_wait_bound) {
3640 						sctp_send_abort(sctp,
3641 						    sctp->sctp_fvtag, 0, NULL,
3642 						    0, mp, 0, B_FALSE, ira);
3643 						sctp_assoc_event(sctp,
3644 						    SCTP_COMM_LOST, 0, NULL);
3645 						sctp_clean_death(sctp,
3646 						    ECONNABORTED);
3647 						goto done;
3648 					}
3649 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
3650 					    fp->rto);
3651 				}
3652 				break;
3653 			case CHUNK_SACK:
3654 				ASSERT(fp != NULL);
3655 				/*
3656 				 * Peer is real and alive if it can ack our
3657 				 * data.
3658 				 */
3659 				sctp_faddr_alive(sctp, fp);
3660 				trysend = sctp_got_sack(sctp, ch);
3661 				if (trysend < 0) {
3662 					sctp_send_abort(sctp, sctph->sh_verf,
3663 					    0, NULL, 0, mp, 0, B_FALSE, ira);
3664 					sctp_assoc_event(sctp,
3665 					    SCTP_COMM_LOST, 0, NULL);
3666 					sctp_clean_death(sctp,
3667 					    ECONNABORTED);
3668 					goto done;
3669 				}
3670 				break;
3671 			case CHUNK_HEARTBEAT:
3672 				if (!hb_already) {
3673 					/*
3674 					 * In any one packet, there should
3675 					 * only be one heartbeat chunk.  So
3676 					 * we should not process more than
3677 					 * once.
3678 					 */
3679 					sctp_return_heartbeat(sctp, ch, mp);
3680 					hb_already = B_TRUE;
3681 				}
3682 				break;
3683 			case CHUNK_HEARTBEAT_ACK:
3684 				sctp_process_heartbeat(sctp, ch);
3685 				break;
3686 			case CHUNK_SHUTDOWN:
3687 				sctp_shutdown_event(sctp);
3688 				trysend = sctp_shutdown_received(sctp, ch,
3689 				    B_FALSE, B_FALSE, fp);
3690 				BUMP_LOCAL(sctp->sctp_ibchunks);
3691 				break;
3692 			case CHUNK_SHUTDOWN_ACK:
3693 				BUMP_LOCAL(sctp->sctp_ibchunks);
3694 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
3695 					sctp_shutdown_complete(sctp);
3696 					BUMP_MIB(&sctps->sctps_mib,
3697 					    sctpShutdowns);
3698 					sctp_assoc_event(sctp,
3699 					    SCTP_SHUTDOWN_COMP, 0, NULL);
3700 					sctp_clean_death(sctp, 0);
3701 					goto done;
3702 				}
3703 				break;
3704 			case CHUNK_ABORT: {
3705 				sctp_saddr_ipif_t *sp;
3706 
3707 				/* Ignore if delete pending */
3708 				sp = sctp_saddr_lookup(sctp, &dst, 0);
3709 				ASSERT(sp != NULL);
3710 				if (sp->saddr_ipif_delete_pending) {
3711 					BUMP_LOCAL(sctp->sctp_ibchunks);
3712 					break;
3713 				}
3714 
3715 				sctp_process_abort(sctp, ch, ECONNRESET);
3716 				goto done;
3717 			}
3718 			case CHUNK_INIT:
3719 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3720 				break;
3721 			case CHUNK_COOKIE:
3722 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3723 				    sctph, &recv_adaptation, NULL, ira) != -1) {
3724 					sctp_send_cookie_ack(sctp);
3725 					sctp_assoc_event(sctp, SCTP_RESTART,
3726 					    0, NULL);
3727 					if (recv_adaptation) {
3728 						sctp->sctp_recv_adaptation = 1;
3729 						sctp_adaptation_event(sctp);
3730 					}
3731 				} else {
3732 					BUMP_MIB(&sctps->sctps_mib,
3733 					    sctpInInvalidCookie);
3734 				}
3735 				break;
3736 			case CHUNK_ERROR: {
3737 				int error;
3738 
3739 				BUMP_LOCAL(sctp->sctp_ibchunks);
3740 				error = sctp_handle_error(sctp, sctph, ch, mp,
3741 				    ira);
3742 				if (error != 0) {
3743 					sctp_assoc_event(sctp, SCTP_COMM_LOST,
3744 					    0, NULL);
3745 					sctp_clean_death(sctp, error);
3746 					goto done;
3747 				}
3748 				break;
3749 			}
3750 			case CHUNK_ASCONF:
3751 				ASSERT(fp != NULL);
3752 				sctp_input_asconf(sctp, ch, fp);
3753 				BUMP_LOCAL(sctp->sctp_ibchunks);
3754 				break;
3755 			case CHUNK_ASCONF_ACK:
3756 				ASSERT(fp != NULL);
3757 				sctp_faddr_alive(sctp, fp);
3758 				sctp_input_asconf_ack(sctp, ch, fp);
3759 				BUMP_LOCAL(sctp->sctp_ibchunks);
3760 				break;
3761 			case CHUNK_FORWARD_TSN:
3762 				ASSERT(fp != NULL);
3763 				sctp->sctp_lastdata = fp;
3764 				sctp_process_forward_tsn(sctp, ch, fp,
3765 				    &ipp, ira);
3766 				gotdata = 1;
3767 				BUMP_LOCAL(sctp->sctp_ibchunks);
3768 				break;
3769 			default:
3770 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
3771 					goto nomorechunks;
3772 				} /* else skip and continue processing */
3773 				break;
3774 			}
3775 			break;
3776 
3777 		case SCTPS_LISTEN:
3778 			switch (ch->sch_id) {
3779 			case CHUNK_INIT:
3780 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3781 				break;
3782 			case CHUNK_COOKIE: {
3783 				sctp_t *eager;
3784 
3785 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3786 				    sctph, &recv_adaptation, &peer_src,
3787 				    ira) == -1) {
3788 					BUMP_MIB(&sctps->sctps_mib,
3789 					    sctpInInvalidCookie);
3790 					goto done;
3791 				}
3792 
3793 				/*
3794 				 * The cookie is good; ensure that
3795 				 * the peer used the verification
3796 				 * tag from the init ack in the header.
3797 				 */
3798 				if (iack->sic_inittag != sctph->sh_verf)
3799 					goto done;
3800 
3801 				eager = sctp_conn_request(sctp, mp, ifindex,
3802 				    ip_hdr_len, iack, ira);
3803 				if (eager == NULL) {
3804 					sctp_send_abort(sctp, sctph->sh_verf,
3805 					    SCTP_ERR_NO_RESOURCES, NULL, 0, mp,
3806 					    0, B_FALSE, ira);
3807 					goto done;
3808 				}
3809 
3810 				/*
3811 				 * If there were extra chunks
3812 				 * bundled with the cookie,
3813 				 * they must be processed
3814 				 * on the eager's queue. We
3815 				 * accomplish this by refeeding
3816 				 * the whole packet into the
3817 				 * state machine on the right
3818 				 * q. The packet (mp) gets
3819 				 * there via the eager's
3820 				 * cookie_mp field (overloaded
3821 				 * with the active open role).
3822 				 * This is picked up when
3823 				 * processing the null bind
3824 				 * request put on the eager's
3825 				 * q by sctp_accept(). We must
3826 				 * first revert the cookie
3827 				 * chunk's length field to network
3828 				 * byteorder so it can be
3829 				 * properly reprocessed on the
3830 				 * eager's queue.
3831 				 */
3832 				BUMP_MIB(&sctps->sctps_mib, sctpPassiveEstab);
3833 				if (mlen > ntohs(ch->sch_len)) {
3834 					eager->sctp_cookie_mp = dupb(mp);
3835 					/*
3836 					 * If no mem, just let
3837 					 * the peer retransmit.
3838 					 */
3839 				}
3840 				sctp_assoc_event(eager, SCTP_COMM_UP, 0, NULL);
3841 				if (recv_adaptation) {
3842 					eager->sctp_recv_adaptation = 1;
3843 					eager->sctp_rx_adaptation_code =
3844 					    sctp->sctp_rx_adaptation_code;
3845 					sctp_adaptation_event(eager);
3846 				}
3847 
3848 				eager->sctp_active = now;
3849 				sctp_send_cookie_ack(eager);
3850 
3851 				wake_eager = B_TRUE;
3852 
3853 				/*
3854 				 * Process rest of the chunks with eager.
3855 				 */
3856 				sctp = eager;
3857 				fp = sctp_lookup_faddr(sctp, &peer_src);
3858 				/*
3859 				 * Confirm peer's original source.  fp can
3860 				 * only be NULL if peer does not use the
3861 				 * original source as one of its addresses...
3862 				 */
3863 				if (fp == NULL)
3864 					fp = sctp_lookup_faddr(sctp, &src);
3865 				else
3866 					sctp_faddr_alive(sctp, fp);
3867 
3868 				/*
3869 				 * Validate the peer addresses.  It also starts
3870 				 * the heartbeat timer.
3871 				 */
3872 				sctp_validate_peer(sctp);
3873 				break;
3874 			}
3875 			/* Anything else is considered out-of-the-blue */
3876 			case CHUNK_ERROR:
3877 			case CHUNK_ABORT:
3878 			case CHUNK_COOKIE_ACK:
3879 			case CHUNK_SHUTDOWN_COMPLETE:
3880 				BUMP_LOCAL(sctp->sctp_ibchunks);
3881 				goto done;
3882 			default:
3883 				BUMP_LOCAL(sctp->sctp_ibchunks);
3884 				sctp_send_abort(sctp, sctph->sh_verf, 0, NULL,
3885 				    0, mp, 0, B_TRUE, ira);
3886 				goto done;
3887 			}
3888 			break;
3889 
3890 		case SCTPS_COOKIE_WAIT:
3891 			switch (ch->sch_id) {
3892 			case CHUNK_INIT_ACK:
3893 				sctp_stop_faddr_timers(sctp);
3894 				sctp_faddr_alive(sctp, sctp->sctp_current);
3895 				sctp_send_cookie_echo(sctp, ch, mp, ira);
3896 				BUMP_LOCAL(sctp->sctp_ibchunks);
3897 				break;
3898 			case CHUNK_ABORT:
3899 				sctp_process_abort(sctp, ch, ECONNREFUSED);
3900 				goto done;
3901 			case CHUNK_INIT:
3902 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3903 				break;
3904 			case CHUNK_COOKIE:
3905 				cr = ira->ira_cred;
3906 				cpid = ira->ira_cpid;
3907 
3908 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3909 				    sctph, &recv_adaptation, NULL, ira) == -1) {
3910 					BUMP_MIB(&sctps->sctps_mib,
3911 					    sctpInInvalidCookie);
3912 					break;
3913 				}
3914 				sctp_send_cookie_ack(sctp);
3915 				sctp_stop_faddr_timers(sctp);
3916 				if (!SCTP_IS_DETACHED(sctp)) {
3917 					sctp->sctp_ulp_connected(
3918 					    sctp->sctp_ulpd, 0, cr, cpid);
3919 					sctp_set_ulp_prop(sctp);
3920 
3921 				}
3922 				sctp->sctp_state = SCTPS_ESTABLISHED;
3923 				sctp->sctp_assoc_start_time =
3924 				    (uint32_t)ddi_get_lbolt();
3925 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
3926 				if (sctp->sctp_cookie_mp) {
3927 					freemsg(sctp->sctp_cookie_mp);
3928 					sctp->sctp_cookie_mp = NULL;
3929 				}
3930 
3931 				/* Validate the peer addresses. */
3932 				sctp->sctp_active = now;
3933 				sctp_validate_peer(sctp);
3934 
3935 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
3936 				if (recv_adaptation) {
3937 					sctp->sctp_recv_adaptation = 1;
3938 					sctp_adaptation_event(sctp);
3939 				}
3940 				/* Try sending queued data, or ASCONFs */
3941 				trysend = 1;
3942 				break;
3943 			default:
3944 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
3945 					goto nomorechunks;
3946 				} /* else skip and continue processing */
3947 				break;
3948 			}
3949 			break;
3950 
3951 		case SCTPS_COOKIE_ECHOED:
3952 			switch (ch->sch_id) {
3953 			case CHUNK_COOKIE_ACK:
3954 				cr = ira->ira_cred;
3955 				cpid = ira->ira_cpid;
3956 
3957 				if (!SCTP_IS_DETACHED(sctp)) {
3958 					sctp->sctp_ulp_connected(
3959 					    sctp->sctp_ulpd, 0, cr, cpid);
3960 					sctp_set_ulp_prop(sctp);
3961 				}
3962 				if (sctp->sctp_unacked == 0)
3963 					sctp_stop_faddr_timers(sctp);
3964 				sctp->sctp_state = SCTPS_ESTABLISHED;
3965 				sctp->sctp_assoc_start_time =
3966 				    (uint32_t)ddi_get_lbolt();
3967 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
3968 				BUMP_LOCAL(sctp->sctp_ibchunks);
3969 				if (sctp->sctp_cookie_mp) {
3970 					freemsg(sctp->sctp_cookie_mp);
3971 					sctp->sctp_cookie_mp = NULL;
3972 				}
3973 				sctp_faddr_alive(sctp, fp);
3974 				/* Validate the peer addresses. */
3975 				sctp->sctp_active = now;
3976 				sctp_validate_peer(sctp);
3977 
3978 				/* Try sending queued data, or ASCONFs */
3979 				trysend = 1;
3980 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
3981 				sctp_adaptation_event(sctp);
3982 				break;
3983 			case CHUNK_ABORT:
3984 				sctp_process_abort(sctp, ch, ECONNREFUSED);
3985 				goto done;
3986 			case CHUNK_COOKIE:
3987 				cr = ira->ira_cred;
3988 				cpid = ira->ira_cpid;
3989 
3990 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3991 				    sctph, &recv_adaptation, NULL, ira) == -1) {
3992 					BUMP_MIB(&sctps->sctps_mib,
3993 					    sctpInInvalidCookie);
3994 					break;
3995 				}
3996 				sctp_send_cookie_ack(sctp);
3997 
3998 				if (!SCTP_IS_DETACHED(sctp)) {
3999 					sctp->sctp_ulp_connected(
4000 					    sctp->sctp_ulpd, 0, cr, cpid);
4001 					sctp_set_ulp_prop(sctp);
4002 
4003 				}
4004 				if (sctp->sctp_unacked == 0)
4005 					sctp_stop_faddr_timers(sctp);
4006 				sctp->sctp_state = SCTPS_ESTABLISHED;
4007 				sctp->sctp_assoc_start_time =
4008 				    (uint32_t)ddi_get_lbolt();
4009 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
4010 				if (sctp->sctp_cookie_mp) {
4011 					freemsg(sctp->sctp_cookie_mp);
4012 					sctp->sctp_cookie_mp = NULL;
4013 				}
4014 				/* Validate the peer addresses. */
4015 				sctp->sctp_active = now;
4016 				sctp_validate_peer(sctp);
4017 
4018 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
4019 				if (recv_adaptation) {
4020 					sctp->sctp_recv_adaptation = 1;
4021 					sctp_adaptation_event(sctp);
4022 				}
4023 				/* Try sending queued data, or ASCONFs */
4024 				trysend = 1;
4025 				break;
4026 			case CHUNK_INIT:
4027 				sctp_send_initack(sctp, sctph, ch, mp, ira);
4028 				break;
4029 			case CHUNK_ERROR: {
4030 				sctp_parm_hdr_t *p;
4031 
4032 				BUMP_LOCAL(sctp->sctp_ibchunks);
4033 				/* check for a stale cookie */
4034 				if (ntohs(ch->sch_len) >=
4035 				    (sizeof (*p) + sizeof (*ch)) +
4036 				    sizeof (uint32_t)) {
4037 
4038 					p = (sctp_parm_hdr_t *)(ch + 1);
4039 					if (p->sph_type ==
4040 					    htons(SCTP_ERR_STALE_COOKIE)) {
4041 						BUMP_MIB(&sctps->sctps_mib,
4042 						    sctpAborted);
4043 						sctp_error_event(sctp,
4044 						    ch, B_FALSE);
4045 						sctp_assoc_event(sctp,
4046 						    SCTP_COMM_LOST, 0, NULL);
4047 						sctp_clean_death(sctp,
4048 						    ECONNREFUSED);
4049 						goto done;
4050 					}
4051 				}
4052 				break;
4053 			}
4054 			case CHUNK_HEARTBEAT:
4055 				if (!hb_already) {
4056 					sctp_return_heartbeat(sctp, ch, mp);
4057 					hb_already = B_TRUE;
4058 				}
4059 				break;
4060 			default:
4061 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4062 					goto nomorechunks;
4063 				} /* else skip and continue processing */
4064 			} /* switch (ch->sch_id) */
4065 			break;
4066 
4067 		case SCTPS_SHUTDOWN_ACK_SENT:
4068 			switch (ch->sch_id) {
4069 			case CHUNK_ABORT:
4070 				/* Pass gathered wisdom to IP for keeping */
4071 				sctp_update_dce(sctp);
4072 				sctp_process_abort(sctp, ch, 0);
4073 				goto done;
4074 			case CHUNK_SHUTDOWN_COMPLETE:
4075 				BUMP_LOCAL(sctp->sctp_ibchunks);
4076 				BUMP_MIB(&sctps->sctps_mib, sctpShutdowns);
4077 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
4078 				    NULL);
4079 
4080 				/* Pass gathered wisdom to IP for keeping */
4081 				sctp_update_dce(sctp);
4082 				sctp_clean_death(sctp, 0);
4083 				goto done;
4084 			case CHUNK_SHUTDOWN_ACK:
4085 				sctp_shutdown_complete(sctp);
4086 				BUMP_LOCAL(sctp->sctp_ibchunks);
4087 				BUMP_MIB(&sctps->sctps_mib, sctpShutdowns);
4088 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
4089 				    NULL);
4090 				sctp_clean_death(sctp, 0);
4091 				goto done;
4092 			case CHUNK_COOKIE:
4093 				(void) sctp_shutdown_received(sctp, NULL,
4094 				    B_TRUE, B_FALSE, fp);
4095 				BUMP_LOCAL(sctp->sctp_ibchunks);
4096 				break;
4097 			case CHUNK_HEARTBEAT:
4098 				if (!hb_already) {
4099 					sctp_return_heartbeat(sctp, ch, mp);
4100 					hb_already = B_TRUE;
4101 				}
4102 				break;
4103 			default:
4104 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4105 					goto nomorechunks;
4106 				} /* else skip and continue processing */
4107 				break;
4108 			}
4109 			break;
4110 
4111 		case SCTPS_SHUTDOWN_RECEIVED:
4112 			switch (ch->sch_id) {
4113 			case CHUNK_SHUTDOWN:
4114 				trysend = sctp_shutdown_received(sctp, ch,
4115 				    B_FALSE, B_FALSE, fp);
4116 				break;
4117 			case CHUNK_SACK:
4118 				trysend = sctp_got_sack(sctp, ch);
4119 				if (trysend < 0) {
4120 					sctp_send_abort(sctp, sctph->sh_verf,
4121 					    0, NULL, 0, mp, 0, B_FALSE, ira);
4122 					sctp_assoc_event(sctp,
4123 					    SCTP_COMM_LOST, 0, NULL);
4124 					sctp_clean_death(sctp,
4125 					    ECONNABORTED);
4126 					goto done;
4127 				}
4128 				break;
4129 			case CHUNK_ABORT:
4130 				sctp_process_abort(sctp, ch, ECONNRESET);
4131 				goto done;
4132 			case CHUNK_HEARTBEAT:
4133 				if (!hb_already) {
4134 					sctp_return_heartbeat(sctp, ch, mp);
4135 					hb_already = B_TRUE;
4136 				}
4137 				break;
4138 			default:
4139 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4140 					goto nomorechunks;
4141 				} /* else skip and continue processing */
4142 				break;
4143 			}
4144 			break;
4145 
4146 		default:
4147 			/*
4148 			 * The only remaining states are SCTPS_IDLE and
4149 			 * SCTPS_BOUND, and we should not be getting here
4150 			 * for these.
4151 			 */
4152 			ASSERT(0);
4153 		} /* switch (sctp->sctp_state) */
4154 
4155 		ch = sctp_next_chunk(ch, &mlen);
4156 		if (ch != NULL && !sctp_check_input(sctp, ch, mlen, 0))
4157 			goto done;
4158 	} while (ch != NULL);
4159 
4160 	/* Finished processing all chunks in packet */
4161 
4162 nomorechunks:
4163 	/* SACK if necessary */
4164 	if (gotdata) {
4165 		boolean_t sack_sent;
4166 
4167 		(sctp->sctp_sack_toggle)++;
4168 		sack_sent = sctp_sack(sctp, dups);
4169 		dups = NULL;
4170 
4171 		/* If a SACK is sent, no need to restart the timer. */
4172 		if (!sack_sent && !sctp->sctp_ack_timer_running) {
4173 			sctp->sctp_ack_timer_running = B_TRUE;
4174 			sctp_timer(sctp, sctp->sctp_ack_mp,
4175 			    MSEC_TO_TICK(sctps->sctps_deferred_ack_interval));
4176 		}
4177 	}
4178 
4179 	if (trysend) {
4180 		sctp_output(sctp, UINT_MAX);
4181 		if (sctp->sctp_cxmit_list != NULL)
4182 			sctp_wput_asconf(sctp, NULL);
4183 	}
4184 	/*
4185 	 * If there is unsent data, make sure a timer is running, check
4186 	 * timer_mp, if sctp_closei_local() ran the timers may be free.
4187 	 */
4188 	if (sctp->sctp_unsent > 0 && !sctp->sctp_current->timer_running &&
4189 	    sctp->sctp_current->timer_mp != NULL) {
4190 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
4191 		    sctp->sctp_current->rto);
4192 	}
4193 
4194 done:
4195 	if (dups != NULL)
4196 		freeb(dups);
4197 	freemsg(mp);
4198 
4199 	if (sctp->sctp_err_chunks != NULL)
4200 		sctp_process_err(sctp);
4201 
4202 	if (wake_eager) {
4203 		/*
4204 		 * sctp points to newly created control block, need to
4205 		 * release it before exiting.
4206 		 */
4207 		WAKE_SCTP(sctp);
4208 	}
4209 }
4210 
4211 /*
4212  * Some amount of data got removed from rx q.
4213  * Check if we should send a window update.
4214  *
4215  * Due to way sctp_rwnd updates are made, ULP can give reports out-of-order.
4216  * To keep from dropping incoming data due to this, we only update
4217  * sctp_rwnd when if it's larger than what we've reported to peer earlier.
4218  */
4219 void
4220 sctp_recvd(sctp_t *sctp, int len)
4221 {
4222 	int32_t old, new;
4223 	sctp_stack_t	*sctps = sctp->sctp_sctps;
4224 
4225 	ASSERT(sctp != NULL);
4226 	RUN_SCTP(sctp);
4227 
4228 	if (len < sctp->sctp_rwnd) {
4229 		WAKE_SCTP(sctp);
4230 		return;
4231 	}
4232 	ASSERT(sctp->sctp_rwnd >= sctp->sctp_rxqueued);
4233 	old = sctp->sctp_rwnd - sctp->sctp_rxqueued;
4234 	new = len - sctp->sctp_rxqueued;
4235 	sctp->sctp_rwnd = len;
4236 
4237 	if (sctp->sctp_state >= SCTPS_ESTABLISHED &&
4238 	    ((old <= new >> 1) || (old < sctp->sctp_mss))) {
4239 		sctp->sctp_force_sack = 1;
4240 		BUMP_MIB(&sctps->sctps_mib, sctpOutWinUpdate);
4241 		(void) sctp_sack(sctp, NULL);
4242 	}
4243 	WAKE_SCTP(sctp);
4244 }
4245