xref: /illumos-gate/usr/src/uts/common/inet/sctp/sctp_input.c (revision aedf2b3bb56b025fcaf87b49ec6c8aeea07f16d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/systm.h>
29 #include <sys/stream.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #define	_SUN_TPI_VERSION 2
33 #include <sys/tihdr.h>
34 #include <sys/socket.h>
35 #include <sys/strsun.h>
36 #include <sys/strsubr.h>
37 
38 #include <netinet/in.h>
39 #include <netinet/ip6.h>
40 #include <netinet/tcp_seq.h>
41 #include <netinet/sctp.h>
42 
43 #include <inet/common.h>
44 #include <inet/ip.h>
45 #include <inet/ip_if.h>
46 #include <inet/ip6.h>
47 #include <inet/mib2.h>
48 #include <inet/ipclassifier.h>
49 #include <inet/ipp_common.h>
50 #include <inet/ipsec_impl.h>
51 #include <inet/sctp_ip.h>
52 
53 #include "sctp_impl.h"
54 #include "sctp_asconf.h"
55 #include "sctp_addr.h"
56 
57 static struct kmem_cache *sctp_kmem_set_cache;
58 
59 /*
60  * PR-SCTP comments.
61  *
62  * When we get a valid Forward TSN chunk, we check the fragment list for this
63  * SSN and preceeding SSNs free all them. Further, if this Forward TSN causes
64  * the next expected SSN to be present in the stream queue, we deliver any
65  * such stranded messages upstream. We also update the SACK info. appropriately.
66  * When checking for advancing the cumulative ack (in sctp_cumack()) we must
67  * check for abandoned chunks and messages. While traversing the tramsmit
68  * list if we come across an abandoned chunk, we can skip the message (i.e.
69  * take it out of the (re)transmit list) since this message, and hence this
70  * chunk, has been marked abandoned by sctp_rexmit(). If we come across an
71  * unsent chunk for a message this now abandoned we need to check if a
72  * Forward TSN needs to be sent, this could be a case where we deferred sending
73  * a Forward TSN in sctp_get_msg_to_send(). Further, after processing a
74  * SACK we check if the Advanced peer ack point can be moved ahead, i.e.
75  * if we can send a Forward TSN via sctp_check_abandoned_data().
76  */
77 void
78 sctp_free_set(sctp_set_t *s)
79 {
80 	sctp_set_t *p;
81 
82 	while (s) {
83 		p = s->next;
84 		kmem_cache_free(sctp_kmem_set_cache, s);
85 		s = p;
86 	}
87 }
88 
89 static void
90 sctp_ack_add(sctp_set_t **head, uint32_t tsn, int *num)
91 {
92 	sctp_set_t *p, *t;
93 
94 	if (head == NULL || num == NULL)
95 		return;
96 
97 	ASSERT(*num >= 0);
98 	ASSERT((*num == 0 && *head == NULL) || (*num > 0 && *head != NULL));
99 
100 	if (*head == NULL) {
101 		*head = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
102 		if (*head == NULL)
103 			return;
104 		(*head)->prev = (*head)->next = NULL;
105 		(*head)->begin = tsn;
106 		(*head)->end = tsn;
107 		*num = 1;
108 		return;
109 	}
110 
111 	ASSERT((*head)->prev == NULL);
112 
113 	/*
114 	 * Handle this special case here so we don't have to check
115 	 * for it each time in the loop.
116 	 */
117 	if (SEQ_LT(tsn + 1, (*head)->begin)) {
118 		/* add a new set, and move the head pointer */
119 		t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
120 		if (t == NULL)
121 			return;
122 		t->next = *head;
123 		t->prev = NULL;
124 		(*head)->prev = t;
125 		t->begin = tsn;
126 		t->end = tsn;
127 		(*num)++;
128 		*head = t;
129 		return;
130 	}
131 
132 	/*
133 	 * We need to handle the following cases, where p points to
134 	 * the current set (as we walk through the loop):
135 	 *
136 	 * 1. tsn is entirely less than p; create a new set before p.
137 	 * 2. tsn borders p from less; coalesce p with tsn.
138 	 * 3. tsn is withing p; do nothing.
139 	 * 4. tsn borders p from greater; coalesce p with tsn.
140 	 * 4a. p may now border p->next from less; if so, coalesce those
141 	 *    two sets.
142 	 * 5. tsn is entirely greater then all sets; add a new set at
143 	 *    the end.
144 	 */
145 	for (p = *head; ; p = p->next) {
146 		if (SEQ_LT(tsn + 1, p->begin)) {
147 			/* 1: add a new set before p. */
148 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
149 			if (t == NULL)
150 				return;
151 			t->next = p;
152 			t->prev = NULL;
153 			t->begin = tsn;
154 			t->end = tsn;
155 			if (p->prev) {
156 				t->prev = p->prev;
157 				p->prev->next = t;
158 			}
159 			p->prev = t;
160 			(*num)++;
161 			return;
162 		}
163 
164 		if ((tsn + 1) == p->begin) {
165 			/* 2: adjust p->begin */
166 			p->begin = tsn;
167 			return;
168 		}
169 
170 		if (SEQ_GEQ(tsn, p->begin) && SEQ_LEQ(tsn, p->end)) {
171 			/* 3; do nothing */
172 			return;
173 		}
174 
175 		if ((p->end + 1) == tsn) {
176 			/* 4; adjust p->end */
177 			p->end = tsn;
178 
179 			if (p->next != NULL && (tsn + 1) == p->next->begin) {
180 				/* 4a: coalesce p and p->next */
181 				t = p->next;
182 				p->end = t->end;
183 				p->next = t->next;
184 				if (t->next != NULL)
185 					t->next->prev = p;
186 				kmem_cache_free(sctp_kmem_set_cache, t);
187 				(*num)--;
188 			}
189 			return;
190 		}
191 
192 		if (p->next == NULL) {
193 			/* 5: add new set at the end */
194 			t = kmem_cache_alloc(sctp_kmem_set_cache, KM_NOSLEEP);
195 			if (t == NULL)
196 				return;
197 			t->next = NULL;
198 			t->prev = p;
199 			t->begin = tsn;
200 			t->end = tsn;
201 			p->next = t;
202 			(*num)++;
203 			return;
204 		}
205 
206 		if (SEQ_GT(tsn, p->end + 1))
207 			continue;
208 	}
209 }
210 
211 static void
212 sctp_ack_rem(sctp_set_t **head, uint32_t end, int *num)
213 {
214 	sctp_set_t *p, *t;
215 
216 	if (head == NULL || *head == NULL || num == NULL)
217 		return;
218 
219 	/* Nothing to remove */
220 	if (SEQ_LT(end, (*head)->begin))
221 		return;
222 
223 	/* Find out where to start removing sets */
224 	for (p = *head; p->next; p = p->next) {
225 		if (SEQ_LEQ(end, p->end))
226 			break;
227 	}
228 
229 	if (SEQ_LT(end, p->end) && SEQ_GEQ(end, p->begin)) {
230 		/* adjust p */
231 		p->begin = end + 1;
232 		/* all done */
233 		if (p == *head)
234 			return;
235 	} else if (SEQ_GEQ(end, p->end)) {
236 		/* remove this set too */
237 		p = p->next;
238 	}
239 
240 	/* unlink everything before this set */
241 	t = *head;
242 	*head = p;
243 	if (p != NULL && p->prev != NULL) {
244 		p->prev->next = NULL;
245 		p->prev = NULL;
246 	}
247 
248 	sctp_free_set(t);
249 
250 	/* recount the number of sets */
251 	*num = 0;
252 
253 	for (p = *head; p != NULL; p = p->next)
254 		(*num)++;
255 }
256 
257 void
258 sctp_sets_init()
259 {
260 	sctp_kmem_set_cache = kmem_cache_create("sctp_set_cache",
261 	    sizeof (sctp_set_t), 0, NULL, NULL, NULL, NULL,
262 	    NULL, 0);
263 }
264 
265 void
266 sctp_sets_fini()
267 {
268 	kmem_cache_destroy(sctp_kmem_set_cache);
269 }
270 
271 sctp_chunk_hdr_t *
272 sctp_first_chunk(uchar_t *rptr, ssize_t remaining)
273 {
274 	sctp_chunk_hdr_t *ch;
275 	uint16_t ch_len;
276 
277 	if (remaining < sizeof (*ch)) {
278 		return (NULL);
279 	}
280 
281 	ch = (sctp_chunk_hdr_t *)rptr;
282 	ch_len = ntohs(ch->sch_len);
283 
284 	if (ch_len < sizeof (*ch) || remaining < ch_len) {
285 		return (NULL);
286 	}
287 
288 	return (ch);
289 }
290 
291 sctp_chunk_hdr_t *
292 sctp_next_chunk(sctp_chunk_hdr_t *ch, ssize_t *remaining)
293 {
294 	int pad;
295 	uint16_t ch_len;
296 
297 	if (!ch) {
298 		return (NULL);
299 	}
300 
301 	ch_len = ntohs(ch->sch_len);
302 
303 	if ((pad = ch_len & (SCTP_ALIGN - 1)) != 0) {
304 		pad = SCTP_ALIGN - pad;
305 	}
306 
307 	*remaining -= (ch_len + pad);
308 	ch = (sctp_chunk_hdr_t *)((char *)ch + ch_len + pad);
309 
310 	return (sctp_first_chunk((uchar_t *)ch, *remaining));
311 }
312 
313 /*
314  * Attach ancillary data to a received SCTP segments.
315  * If the source address (fp) is not the primary, send up a
316  * unitdata_ind so recvfrom() can populate the msg_name field.
317  * If ancillary data is also requested, we append it to the
318  * unitdata_req. Otherwise, we just send up an optdata_ind.
319  */
320 static int
321 sctp_input_add_ancillary(sctp_t *sctp, mblk_t **mp, sctp_data_hdr_t *dcp,
322     sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira)
323 {
324 	struct T_unitdata_ind	*tudi;
325 	int			optlen;
326 	int			hdrlen;
327 	uchar_t			*optptr;
328 	struct cmsghdr		*cmsg;
329 	mblk_t			*mp1;
330 	struct sockaddr_in6	sin_buf[1];
331 	struct sockaddr_in6	*sin6;
332 	struct sockaddr_in	*sin4;
333 	crb_t			 addflag;	/* Which pieces to add */
334 	conn_t			*connp = sctp->sctp_connp;
335 
336 	sin4 = NULL;
337 	sin6 = NULL;
338 
339 	optlen = hdrlen = 0;
340 	addflag.crb_all = 0;
341 
342 	/* Figure out address size */
343 	if (connp->conn_family == AF_INET) {
344 		sin4 = (struct sockaddr_in *)sin_buf;
345 		sin4->sin_family = AF_INET;
346 		sin4->sin_port = connp->conn_fport;
347 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, sin4->sin_addr.s_addr);
348 		hdrlen = sizeof (*tudi) + sizeof (*sin4);
349 	} else {
350 		sin6 = sin_buf;
351 		sin6->sin6_family = AF_INET6;
352 		sin6->sin6_port = connp->conn_fport;
353 		sin6->sin6_addr = fp->faddr;
354 		hdrlen = sizeof (*tudi) + sizeof (*sin6);
355 	}
356 	/* If app asked to receive send / recv info */
357 	if (sctp->sctp_recvsndrcvinfo)
358 		optlen += sizeof (*cmsg) + sizeof (struct sctp_sndrcvinfo);
359 
360 	if (connp->conn_recv_ancillary.crb_all == 0)
361 		goto noancillary;
362 
363 	if (connp->conn_recv_ancillary.crb_ip_recvpktinfo &&
364 	    ira->ira_ruifindex != sctp->sctp_recvifindex) {
365 		optlen += sizeof (*cmsg) + sizeof (struct in6_pktinfo);
366 		if (hdrlen == 0)
367 			hdrlen = sizeof (struct T_unitdata_ind);
368 		addflag.crb_ip_recvpktinfo = 1;
369 	}
370 	/* If app asked for hoplimit and it has changed ... */
371 	if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit &&
372 	    ipp->ipp_hoplimit != sctp->sctp_recvhops) {
373 		optlen += sizeof (*cmsg) + sizeof (uint_t);
374 		if (hdrlen == 0)
375 			hdrlen = sizeof (struct T_unitdata_ind);
376 		addflag.crb_ipv6_recvhoplimit = 1;
377 	}
378 	/* If app asked for tclass and it has changed ... */
379 	if (connp->conn_recv_ancillary.crb_ipv6_recvtclass &&
380 	    ipp->ipp_tclass != sctp->sctp_recvtclass) {
381 		optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
382 		if (hdrlen == 0)
383 			hdrlen = sizeof (struct T_unitdata_ind);
384 		addflag.crb_ipv6_recvtclass = 1;
385 	}
386 	/* If app asked for hopbyhop headers and it has changed ... */
387 	if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts &&
388 	    ip_cmpbuf(sctp->sctp_hopopts, sctp->sctp_hopoptslen,
389 	    (ipp->ipp_fields & IPPF_HOPOPTS),
390 	    ipp->ipp_hopopts, ipp->ipp_hopoptslen)) {
391 		optlen += sizeof (*cmsg) + ipp->ipp_hopoptslen -
392 		    sctp->sctp_v6label_len;
393 		if (hdrlen == 0)
394 			hdrlen = sizeof (struct T_unitdata_ind);
395 		addflag.crb_ipv6_recvhopopts = 1;
396 		if (!ip_allocbuf((void **)&sctp->sctp_hopopts,
397 		    &sctp->sctp_hopoptslen,
398 		    (ipp->ipp_fields & IPPF_HOPOPTS),
399 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen))
400 			return (-1);
401 	}
402 	/* If app asked for dst headers before routing headers ... */
403 	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts &&
404 	    ip_cmpbuf(sctp->sctp_rthdrdstopts, sctp->sctp_rthdrdstoptslen,
405 	    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
406 	    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) {
407 		optlen += sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen;
408 		if (hdrlen == 0)
409 			hdrlen = sizeof (struct T_unitdata_ind);
410 		addflag.crb_ipv6_recvrthdrdstopts = 1;
411 		if (!ip_allocbuf((void **)&sctp->sctp_rthdrdstopts,
412 		    &sctp->sctp_rthdrdstoptslen,
413 		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
414 		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen))
415 			return (-1);
416 	}
417 	/* If app asked for routing headers and it has changed ... */
418 	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr &&
419 	    ip_cmpbuf(sctp->sctp_rthdr, sctp->sctp_rthdrlen,
420 	    (ipp->ipp_fields & IPPF_RTHDR),
421 	    ipp->ipp_rthdr, ipp->ipp_rthdrlen)) {
422 		optlen += sizeof (*cmsg) + ipp->ipp_rthdrlen;
423 		if (hdrlen == 0)
424 			hdrlen = sizeof (struct T_unitdata_ind);
425 		addflag.crb_ipv6_recvrthdr = 1;
426 		if (!ip_allocbuf((void **)&sctp->sctp_rthdr,
427 		    &sctp->sctp_rthdrlen,
428 		    (ipp->ipp_fields & IPPF_RTHDR),
429 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen))
430 			return (-1);
431 	}
432 	/* If app asked for dest headers and it has changed ... */
433 	if (connp->conn_recv_ancillary.crb_ipv6_recvdstopts &&
434 	    ip_cmpbuf(sctp->sctp_dstopts, sctp->sctp_dstoptslen,
435 	    (ipp->ipp_fields & IPPF_DSTOPTS),
436 	    ipp->ipp_dstopts, ipp->ipp_dstoptslen)) {
437 		optlen += sizeof (*cmsg) + ipp->ipp_dstoptslen;
438 		if (hdrlen == 0)
439 			hdrlen = sizeof (struct T_unitdata_ind);
440 		addflag.crb_ipv6_recvdstopts = 1;
441 		if (!ip_allocbuf((void **)&sctp->sctp_dstopts,
442 		    &sctp->sctp_dstoptslen,
443 		    (ipp->ipp_fields & IPPF_DSTOPTS),
444 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen))
445 			return (-1);
446 	}
447 noancillary:
448 	/* Nothing to add */
449 	if (hdrlen == 0)
450 		return (-1);
451 
452 	mp1 = allocb(hdrlen + optlen + sizeof (void *), BPRI_MED);
453 	if (mp1 == NULL)
454 		return (-1);
455 	mp1->b_cont = *mp;
456 	*mp = mp1;
457 	mp1->b_rptr += sizeof (void *);  /* pointer worth of padding */
458 	mp1->b_wptr = mp1->b_rptr + hdrlen + optlen;
459 	DB_TYPE(mp1) = M_PROTO;
460 	tudi = (struct T_unitdata_ind *)mp1->b_rptr;
461 	tudi->PRIM_type = T_UNITDATA_IND;
462 	tudi->SRC_length = sin4 ? sizeof (*sin4) : sizeof (*sin6);
463 	tudi->SRC_offset = sizeof (*tudi);
464 	tudi->OPT_offset = sizeof (*tudi) + tudi->SRC_length;
465 	tudi->OPT_length = optlen;
466 	if (sin4) {
467 		bcopy(sin4, tudi + 1, sizeof (*sin4));
468 	} else {
469 		bcopy(sin6, tudi + 1, sizeof (*sin6));
470 	}
471 	optptr = (uchar_t *)tudi + tudi->OPT_offset;
472 
473 	if (sctp->sctp_recvsndrcvinfo) {
474 		/* XXX need backout method if memory allocation fails. */
475 		struct sctp_sndrcvinfo *sri;
476 
477 		cmsg = (struct cmsghdr *)optptr;
478 		cmsg->cmsg_level = IPPROTO_SCTP;
479 		cmsg->cmsg_type = SCTP_SNDRCV;
480 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*sri);
481 		optptr += sizeof (*cmsg);
482 
483 		sri = (struct sctp_sndrcvinfo *)(cmsg + 1);
484 		ASSERT(OK_32PTR(sri));
485 		sri->sinfo_stream = ntohs(dcp->sdh_sid);
486 		sri->sinfo_ssn = ntohs(dcp->sdh_ssn);
487 		if (SCTP_DATA_GET_UBIT(dcp)) {
488 			sri->sinfo_flags = MSG_UNORDERED;
489 		} else {
490 			sri->sinfo_flags = 0;
491 		}
492 		sri->sinfo_ppid = dcp->sdh_payload_id;
493 		sri->sinfo_context = 0;
494 		sri->sinfo_timetolive = 0;
495 		sri->sinfo_tsn = ntohl(dcp->sdh_tsn);
496 		sri->sinfo_cumtsn = sctp->sctp_ftsn;
497 		sri->sinfo_assoc_id = 0;
498 
499 		optptr += sizeof (*sri);
500 	}
501 
502 	/*
503 	 * If app asked for pktinfo and the index has changed ...
504 	 * Note that the local address never changes for the connection.
505 	 */
506 	if (addflag.crb_ip_recvpktinfo) {
507 		struct in6_pktinfo *pkti;
508 		uint_t ifindex;
509 
510 		ifindex = ira->ira_ruifindex;
511 		cmsg = (struct cmsghdr *)optptr;
512 		cmsg->cmsg_level = IPPROTO_IPV6;
513 		cmsg->cmsg_type = IPV6_PKTINFO;
514 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (*pkti);
515 		optptr += sizeof (*cmsg);
516 
517 		pkti = (struct in6_pktinfo *)optptr;
518 		if (connp->conn_family == AF_INET6)
519 			pkti->ipi6_addr = sctp->sctp_ip6h->ip6_src;
520 		else
521 			IN6_IPADDR_TO_V4MAPPED(sctp->sctp_ipha->ipha_src,
522 			    &pkti->ipi6_addr);
523 
524 		pkti->ipi6_ifindex = ifindex;
525 		optptr += sizeof (*pkti);
526 		ASSERT(OK_32PTR(optptr));
527 		/* Save as "last" value */
528 		sctp->sctp_recvifindex = ifindex;
529 	}
530 	/* If app asked for hoplimit and it has changed ... */
531 	if (addflag.crb_ipv6_recvhoplimit) {
532 		cmsg = (struct cmsghdr *)optptr;
533 		cmsg->cmsg_level = IPPROTO_IPV6;
534 		cmsg->cmsg_type = IPV6_HOPLIMIT;
535 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t);
536 		optptr += sizeof (*cmsg);
537 
538 		*(uint_t *)optptr = ipp->ipp_hoplimit;
539 		optptr += sizeof (uint_t);
540 		ASSERT(OK_32PTR(optptr));
541 		/* Save as "last" value */
542 		sctp->sctp_recvhops = ipp->ipp_hoplimit;
543 	}
544 	/* If app asked for tclass and it has changed ... */
545 	if (addflag.crb_ipv6_recvtclass) {
546 		cmsg = (struct cmsghdr *)optptr;
547 		cmsg->cmsg_level = IPPROTO_IPV6;
548 		cmsg->cmsg_type = IPV6_TCLASS;
549 		cmsg->cmsg_len = sizeof (*cmsg) + sizeof (uint_t);
550 		optptr += sizeof (*cmsg);
551 
552 		*(uint_t *)optptr = ipp->ipp_tclass;
553 		optptr += sizeof (uint_t);
554 		ASSERT(OK_32PTR(optptr));
555 		/* Save as "last" value */
556 		sctp->sctp_recvtclass = ipp->ipp_tclass;
557 	}
558 	if (addflag.crb_ipv6_recvhopopts) {
559 		cmsg = (struct cmsghdr *)optptr;
560 		cmsg->cmsg_level = IPPROTO_IPV6;
561 		cmsg->cmsg_type = IPV6_HOPOPTS;
562 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_hopoptslen;
563 		optptr += sizeof (*cmsg);
564 
565 		bcopy(ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen);
566 		optptr += ipp->ipp_hopoptslen;
567 		ASSERT(OK_32PTR(optptr));
568 		/* Save as last value */
569 		ip_savebuf((void **)&sctp->sctp_hopopts,
570 		    &sctp->sctp_hopoptslen,
571 		    (ipp->ipp_fields & IPPF_HOPOPTS),
572 		    ipp->ipp_hopopts, ipp->ipp_hopoptslen);
573 	}
574 	if (addflag.crb_ipv6_recvrthdrdstopts) {
575 		cmsg = (struct cmsghdr *)optptr;
576 		cmsg->cmsg_level = IPPROTO_IPV6;
577 		cmsg->cmsg_type = IPV6_RTHDRDSTOPTS;
578 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrdstoptslen;
579 		optptr += sizeof (*cmsg);
580 
581 		bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen);
582 		optptr += ipp->ipp_rthdrdstoptslen;
583 		ASSERT(OK_32PTR(optptr));
584 		/* Save as last value */
585 		ip_savebuf((void **)&sctp->sctp_rthdrdstopts,
586 		    &sctp->sctp_rthdrdstoptslen,
587 		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
588 		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
589 	}
590 	if (addflag.crb_ipv6_recvrthdr) {
591 		cmsg = (struct cmsghdr *)optptr;
592 		cmsg->cmsg_level = IPPROTO_IPV6;
593 		cmsg->cmsg_type = IPV6_RTHDR;
594 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_rthdrlen;
595 		optptr += sizeof (*cmsg);
596 
597 		bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen);
598 		optptr += ipp->ipp_rthdrlen;
599 		ASSERT(OK_32PTR(optptr));
600 		/* Save as last value */
601 		ip_savebuf((void **)&sctp->sctp_rthdr,
602 		    &sctp->sctp_rthdrlen,
603 		    (ipp->ipp_fields & IPPF_RTHDR),
604 		    ipp->ipp_rthdr, ipp->ipp_rthdrlen);
605 	}
606 	if (addflag.crb_ipv6_recvdstopts) {
607 		cmsg = (struct cmsghdr *)optptr;
608 		cmsg->cmsg_level = IPPROTO_IPV6;
609 		cmsg->cmsg_type = IPV6_DSTOPTS;
610 		cmsg->cmsg_len = sizeof (*cmsg) + ipp->ipp_dstoptslen;
611 		optptr += sizeof (*cmsg);
612 
613 		bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen);
614 		optptr += ipp->ipp_dstoptslen;
615 		ASSERT(OK_32PTR(optptr));
616 		/* Save as last value */
617 		ip_savebuf((void **)&sctp->sctp_dstopts,
618 		    &sctp->sctp_dstoptslen,
619 		    (ipp->ipp_fields & IPPF_DSTOPTS),
620 		    ipp->ipp_dstopts, ipp->ipp_dstoptslen);
621 	}
622 
623 	ASSERT(optptr == mp1->b_wptr);
624 
625 	return (0);
626 }
627 
628 void
629 sctp_free_reass(sctp_instr_t *sip)
630 {
631 	mblk_t *mp, *mpnext, *mctl;
632 
633 	for (mp = sip->istr_reass; mp != NULL; mp = mpnext) {
634 		mpnext = mp->b_next;
635 		mp->b_next = NULL;
636 		mp->b_prev = NULL;
637 		if (DB_TYPE(mp) == M_CTL) {
638 			mctl = mp;
639 			ASSERT(mp->b_cont != NULL);
640 			mp = mp->b_cont;
641 			mctl->b_cont = NULL;
642 			freeb(mctl);
643 		}
644 		freemsg(mp);
645 	}
646 }
647 
648 /*
649  * If the series of data fragments of which dmp is a part is successfully
650  * reassembled, the first mblk in the series is returned. dc is adjusted
651  * to point at the data chunk in the lead mblk, and b_rptr also points to
652  * the data chunk; the following mblk's b_rptr's point at the actual payload.
653  *
654  * If the series is not yet reassembled, NULL is returned. dc is not changed.
655  * XXX should probably move this up into the state machine.
656  */
657 
658 /* Fragment list for un-ordered messages. Partial delivery is not supported */
659 static mblk_t *
660 sctp_uodata_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc)
661 {
662 	mblk_t		*hmp;
663 	mblk_t		*begin = NULL;
664 	mblk_t		*end = NULL;
665 	sctp_data_hdr_t	*qdc;
666 	uint32_t	ntsn;
667 	uint32_t	tsn = ntohl((*dc)->sdh_tsn);
668 #ifdef	DEBUG
669 	mblk_t		*mp1;
670 #endif
671 
672 	/* First frag. */
673 	if (sctp->sctp_uo_frags == NULL) {
674 		sctp->sctp_uo_frags = dmp;
675 		return (NULL);
676 	}
677 	hmp = sctp->sctp_uo_frags;
678 	/*
679 	 * Insert the segment according to the TSN, fragmented unordered
680 	 * chunks are sequenced by TSN.
681 	 */
682 	while (hmp != NULL) {
683 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
684 		ntsn = ntohl(qdc->sdh_tsn);
685 		if (SEQ_GT(ntsn, tsn)) {
686 			if (hmp->b_prev == NULL) {
687 				dmp->b_next = hmp;
688 				hmp->b_prev = dmp;
689 				sctp->sctp_uo_frags = dmp;
690 			} else {
691 				dmp->b_next = hmp;
692 				dmp->b_prev = hmp->b_prev;
693 				hmp->b_prev->b_next = dmp;
694 				hmp->b_prev = dmp;
695 			}
696 			break;
697 		}
698 		if (hmp->b_next == NULL) {
699 			hmp->b_next = dmp;
700 			dmp->b_prev = hmp;
701 			break;
702 		}
703 		hmp = hmp->b_next;
704 	}
705 	/* check if we completed a msg */
706 	if (SCTP_DATA_GET_BBIT(*dc)) {
707 		begin = dmp;
708 	} else if (SCTP_DATA_GET_EBIT(*dc)) {
709 		end = dmp;
710 	}
711 	/*
712 	 * We walk consecutive TSNs backwards till we get a seg. with
713 	 * the B bit
714 	 */
715 	if (begin == NULL) {
716 		for (hmp = dmp->b_prev; hmp != NULL; hmp = hmp->b_prev) {
717 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
718 			ntsn = ntohl(qdc->sdh_tsn);
719 			if ((int32_t)(tsn - ntsn) > 1) {
720 				return (NULL);
721 			}
722 			if (SCTP_DATA_GET_BBIT(qdc)) {
723 				begin = hmp;
724 				break;
725 			}
726 			tsn = ntsn;
727 		}
728 	}
729 	tsn = ntohl((*dc)->sdh_tsn);
730 	/*
731 	 * We walk consecutive TSNs till we get a seg. with the E bit
732 	 */
733 	if (end == NULL) {
734 		for (hmp = dmp->b_next; hmp != NULL; hmp = hmp->b_next) {
735 			qdc = (sctp_data_hdr_t *)hmp->b_rptr;
736 			ntsn = ntohl(qdc->sdh_tsn);
737 			if ((int32_t)(ntsn - tsn) > 1) {
738 				return (NULL);
739 			}
740 			if (SCTP_DATA_GET_EBIT(qdc)) {
741 				end = hmp;
742 				break;
743 			}
744 			tsn = ntsn;
745 		}
746 	}
747 	if (begin == NULL || end == NULL) {
748 		return (NULL);
749 	}
750 	/* Got one!, Remove the msg from the list */
751 	if (sctp->sctp_uo_frags == begin) {
752 		ASSERT(begin->b_prev == NULL);
753 		sctp->sctp_uo_frags = end->b_next;
754 		if (end->b_next != NULL)
755 			end->b_next->b_prev = NULL;
756 	} else {
757 		begin->b_prev->b_next = end->b_next;
758 		if (end->b_next != NULL)
759 			end->b_next->b_prev = begin->b_prev;
760 	}
761 	begin->b_prev = NULL;
762 	end->b_next = NULL;
763 
764 	/*
765 	 * Null out b_next and b_prev and chain using b_cont.
766 	 */
767 	dmp = end = begin;
768 	hmp = begin->b_next;
769 	*dc = (sctp_data_hdr_t *)begin->b_rptr;
770 	begin->b_next = NULL;
771 	while (hmp != NULL) {
772 		qdc = (sctp_data_hdr_t *)hmp->b_rptr;
773 		hmp->b_rptr = (uchar_t *)(qdc + 1);
774 		end = hmp->b_next;
775 		dmp->b_cont = hmp;
776 		dmp = hmp;
777 
778 		if (end != NULL)
779 			hmp->b_next = NULL;
780 		hmp->b_prev = NULL;
781 		hmp = end;
782 	}
783 	BUMP_LOCAL(sctp->sctp_reassmsgs);
784 #ifdef	DEBUG
785 	mp1 = begin;
786 	while (mp1 != NULL) {
787 		ASSERT(mp1->b_next == NULL);
788 		ASSERT(mp1->b_prev == NULL);
789 		mp1 = mp1->b_cont;
790 	}
791 #endif
792 	return (begin);
793 }
794 
795 /*
796  * Try partial delivery.
797  */
798 static mblk_t *
799 sctp_try_partial_delivery(sctp_t *sctp, mblk_t *hmp, sctp_reass_t *srp,
800     sctp_data_hdr_t **dc)
801 {
802 	mblk_t		*mp;
803 	mblk_t		*dmp;
804 	mblk_t		*qmp;
805 	mblk_t		*prev;
806 	sctp_data_hdr_t	*qdc;
807 	uint32_t	tsn;
808 
809 	ASSERT(DB_TYPE(hmp) == M_CTL);
810 
811 	dprint(4, ("trypartial: got=%d, needed=%d\n",
812 	    (int)(srp->got), (int)(srp->needed)));
813 
814 	mp = hmp->b_cont;
815 	qdc = (sctp_data_hdr_t *)mp->b_rptr;
816 
817 	ASSERT(SCTP_DATA_GET_BBIT(qdc) && srp->hasBchunk);
818 
819 	tsn = ntohl(qdc->sdh_tsn) + 1;
820 
821 	/*
822 	 * This loop has two exit conditions: the
823 	 * end of received chunks has been reached, or
824 	 * there is a break in the sequence. We want
825 	 * to chop the reassembly list as follows (the
826 	 * numbers are TSNs):
827 	 *   10 -> 11 -> 	(end of chunks)
828 	 *   10 -> 11 -> | 13   (break in sequence)
829 	 */
830 	prev = mp;
831 	mp = mp->b_cont;
832 	while (mp != NULL) {
833 		qdc = (sctp_data_hdr_t *)mp->b_rptr;
834 		if (ntohl(qdc->sdh_tsn) != tsn)
835 			break;
836 		prev = mp;
837 		mp = mp->b_cont;
838 		tsn++;
839 	}
840 	/*
841 	 * We are sending all the fragments upstream, we have to retain
842 	 * the srp info for further fragments.
843 	 */
844 	if (mp == NULL) {
845 		dmp = hmp->b_cont;
846 		hmp->b_cont = NULL;
847 		srp->nexttsn = tsn;
848 		srp->msglen = 0;
849 		srp->needed = 0;
850 		srp->got = 0;
851 		srp->partial_delivered = B_TRUE;
852 		srp->tail = NULL;
853 	} else {
854 		dmp = hmp->b_cont;
855 		hmp->b_cont = mp;
856 	}
857 	srp->hasBchunk = B_FALSE;
858 	/*
859 	 * mp now points at the last chunk in the sequence,
860 	 * and prev points to mp's previous in the list.
861 	 * We chop the list at prev, and convert mp into the
862 	 * new list head by setting the B bit. Subsequence
863 	 * fragment deliveries will follow the normal reassembly
864 	 * path.
865 	 */
866 	prev->b_cont = NULL;
867 	srp->partial_delivered = B_TRUE;
868 
869 	dprint(4, ("trypartial: got some, got=%d, needed=%d\n",
870 	    (int)(srp->got), (int)(srp->needed)));
871 
872 	/*
873 	 * Adjust all mblk's except the lead so their rptr's point to the
874 	 * payload. sctp_data_chunk() will need to process the lead's
875 	 * data chunk section, so leave it's rptr pointing at the data chunk.
876 	 */
877 	*dc = (sctp_data_hdr_t *)dmp->b_rptr;
878 	if (srp->tail != NULL) {
879 		srp->got--;
880 		ASSERT(srp->got != 0);
881 		if (srp->needed != 0) {
882 			srp->needed--;
883 			ASSERT(srp->needed != 0);
884 		}
885 		srp->msglen -= ntohs((*dc)->sdh_len);
886 	}
887 	for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) {
888 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
889 		qmp->b_rptr = (uchar_t *)(qdc + 1);
890 
891 		/*
892 		 * Deduct the balance from got and needed here, now that
893 		 * we know we are actually delivering these data.
894 		 */
895 		if (srp->tail != NULL) {
896 			srp->got--;
897 			ASSERT(srp->got != 0);
898 			if (srp->needed != 0) {
899 				srp->needed--;
900 				ASSERT(srp->needed != 0);
901 			}
902 			srp->msglen -= ntohs(qdc->sdh_len);
903 		}
904 	}
905 	ASSERT(srp->msglen == 0);
906 	BUMP_LOCAL(sctp->sctp_reassmsgs);
907 
908 	return (dmp);
909 }
910 
911 /*
912  * Fragment list for ordered messages.
913  * If no error occures, error is set to 0. If we run out of memory, error
914  * is set to 1. If the peer commits a fatal error (like using different
915  * sequence numbers for the same data fragment series), the association is
916  * aborted and error is set to 2. tpfinished indicates whether we have
917  * assembled a complete message, this is used in sctp_data_chunk() to
918  * see if we can try to send any queued message for this stream.
919  */
920 static mblk_t *
921 sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error,
922     sctp_instr_t *sip, boolean_t *tpfinished)
923 {
924 	mblk_t		*hmp;
925 	mblk_t		*pmp;
926 	mblk_t		*qmp;
927 	mblk_t		*first_mp;
928 	sctp_reass_t	*srp;
929 	sctp_data_hdr_t	*qdc;
930 	sctp_data_hdr_t	*bdc;
931 	sctp_data_hdr_t	*edc;
932 	uint32_t	tsn;
933 	uint16_t	fraglen = 0;
934 
935 	*error = 0;
936 
937 	/* find the reassembly queue for this data chunk */
938 	hmp = qmp = sip->istr_reass;
939 	for (; hmp != NULL; hmp = hmp->b_next) {
940 		srp = (sctp_reass_t *)DB_BASE(hmp);
941 		if (ntohs((*dc)->sdh_ssn) == srp->ssn)
942 			goto foundit;
943 		else if (SSN_GT(srp->ssn, ntohs((*dc)->sdh_ssn)))
944 			break;
945 		qmp = hmp;
946 	}
947 
948 	/*
949 	 * Allocate a M_CTL that will contain information about this
950 	 * fragmented message.
951 	 */
952 	if ((pmp = allocb(sizeof (*srp), BPRI_MED)) == NULL) {
953 		*error = 1;
954 		return (NULL);
955 	}
956 	DB_TYPE(pmp) = M_CTL;
957 	srp = (sctp_reass_t *)DB_BASE(pmp);
958 	pmp->b_cont = dmp;
959 
960 	if (hmp != NULL) {
961 		if (sip->istr_reass == hmp) {
962 			sip->istr_reass = pmp;
963 			pmp->b_next = hmp;
964 			pmp->b_prev = NULL;
965 			hmp->b_prev = pmp;
966 		} else {
967 			qmp->b_next = pmp;
968 			pmp->b_prev = qmp;
969 			pmp->b_next = hmp;
970 			hmp->b_prev = pmp;
971 		}
972 	} else {
973 		/* make a new reass head and stick it on the end */
974 		if (sip->istr_reass == NULL) {
975 			sip->istr_reass = pmp;
976 			pmp->b_prev = NULL;
977 		} else {
978 			qmp->b_next = pmp;
979 			pmp->b_prev = qmp;
980 		}
981 		pmp->b_next = NULL;
982 	}
983 	srp->partial_delivered = B_FALSE;
984 	srp->ssn = ntohs((*dc)->sdh_ssn);
985 empty_srp:
986 	srp->needed = 0;
987 	srp->got = 1;
988 	srp->tail = dmp;
989 	if (SCTP_DATA_GET_BBIT(*dc)) {
990 		srp->msglen = ntohs((*dc)->sdh_len);
991 		srp->nexttsn = ntohl((*dc)->sdh_tsn) + 1;
992 		srp->hasBchunk = B_TRUE;
993 	} else if (srp->partial_delivered &&
994 	    srp->nexttsn == ntohl((*dc)->sdh_tsn)) {
995 		SCTP_DATA_SET_BBIT(*dc);
996 		/* Last fragment */
997 		if (SCTP_DATA_GET_EBIT(*dc)) {
998 			srp->needed = 1;
999 			goto frag_done;
1000 		}
1001 		srp->hasBchunk = B_TRUE;
1002 		srp->msglen = ntohs((*dc)->sdh_len);
1003 		srp->nexttsn++;
1004 	}
1005 	return (NULL);
1006 foundit:
1007 	/*
1008 	 * else already have a reassembly queue. Insert the new data chunk
1009 	 * in the reassemble queue. Try the tail first, on the assumption
1010 	 * that the fragments are coming in in order.
1011 	 */
1012 	qmp = srp->tail;
1013 
1014 	/*
1015 	 * This means the message was partially delivered.
1016 	 */
1017 	if (qmp == NULL) {
1018 		ASSERT(srp->got == 0 && srp->needed == 0 &&
1019 		    srp->partial_delivered);
1020 		ASSERT(hmp->b_cont == NULL);
1021 		hmp->b_cont = dmp;
1022 		goto empty_srp;
1023 	}
1024 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1025 	ASSERT(qmp->b_cont == NULL);
1026 
1027 	/* XXXIs it fine to do this just here? */
1028 	if ((*dc)->sdh_sid != qdc->sdh_sid) {
1029 		/* our peer is fatally confused; XXX abort the assc */
1030 		*error = 2;
1031 		return (NULL);
1032 	}
1033 	if (SEQ_GT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1034 		qmp->b_cont = dmp;
1035 		srp->tail = dmp;
1036 		dmp->b_cont = NULL;
1037 		if (srp->hasBchunk && srp->nexttsn == ntohl((*dc)->sdh_tsn)) {
1038 			srp->msglen += ntohs((*dc)->sdh_len);
1039 			srp->nexttsn++;
1040 		}
1041 		goto inserted;
1042 	}
1043 
1044 	/* Next check for insertion at the beginning */
1045 	qmp = hmp->b_cont;
1046 	qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1047 	if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1048 		dmp->b_cont = qmp;
1049 		hmp->b_cont = dmp;
1050 		if (SCTP_DATA_GET_BBIT(*dc)) {
1051 			srp->hasBchunk = B_TRUE;
1052 			srp->nexttsn = ntohl((*dc)->sdh_tsn);
1053 		}
1054 		goto preinserted;
1055 	}
1056 
1057 	/* Insert somewhere in the middle */
1058 	for (;;) {
1059 		/* Tail check above should have caught this */
1060 		ASSERT(qmp->b_cont != NULL);
1061 
1062 		qdc = (sctp_data_hdr_t *)qmp->b_cont->b_rptr;
1063 		if (SEQ_LT(ntohl((*dc)->sdh_tsn), ntohl(qdc->sdh_tsn))) {
1064 			/* insert here */
1065 			dmp->b_cont = qmp->b_cont;
1066 			qmp->b_cont = dmp;
1067 			break;
1068 		}
1069 		qmp = qmp->b_cont;
1070 	}
1071 preinserted:
1072 	if (!srp->hasBchunk || ntohl((*dc)->sdh_tsn) != srp->nexttsn)
1073 		goto inserted;
1074 	/*
1075 	 * fraglen contains the length of consecutive chunks of fragments.
1076 	 * starting from the chunk inserted recently.
1077 	 */
1078 	tsn = srp->nexttsn;
1079 	for (qmp = dmp; qmp != NULL; qmp = qmp->b_cont) {
1080 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1081 		if (tsn != ntohl(qdc->sdh_tsn))
1082 			break;
1083 		fraglen += ntohs(qdc->sdh_len);
1084 		tsn++;
1085 	}
1086 	srp->nexttsn = tsn;
1087 	srp->msglen += fraglen;
1088 inserted:
1089 	srp->got++;
1090 	first_mp = hmp->b_cont;
1091 	if (srp->needed == 0) {
1092 		/* check if we have the first and last fragments */
1093 		bdc = (sctp_data_hdr_t *)first_mp->b_rptr;
1094 		edc = (sctp_data_hdr_t *)srp->tail->b_rptr;
1095 
1096 		/* calculate how many fragments are needed, if possible  */
1097 		if (SCTP_DATA_GET_BBIT(bdc) && SCTP_DATA_GET_EBIT(edc)) {
1098 			srp->needed = ntohl(edc->sdh_tsn) -
1099 			    ntohl(bdc->sdh_tsn) + 1;
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * Try partial delivery if the message length has exceeded the
1105 	 * partial delivery point. Only do this if we can immediately
1106 	 * deliver the partially assembled message, and only partially
1107 	 * deliver one message at a time (i.e. messages cannot be
1108 	 * intermixed arriving at the upper layer). A simple way to
1109 	 * enforce this is to only try partial delivery if this TSN is
1110 	 * the next expected TSN. Partial Delivery not supported
1111 	 * for un-ordered message.
1112 	 */
1113 	if (srp->needed != srp->got) {
1114 		dmp = NULL;
1115 		if (ntohl((*dc)->sdh_tsn) == sctp->sctp_ftsn &&
1116 		    srp->msglen >= sctp->sctp_pd_point) {
1117 			dmp = sctp_try_partial_delivery(sctp, hmp, srp, dc);
1118 			*tpfinished = B_FALSE;
1119 		}
1120 		return (dmp);
1121 	}
1122 frag_done:
1123 	/*
1124 	 * else reassembly done; prepare the data for delivery.
1125 	 * First unlink hmp from the ssn list.
1126 	 */
1127 	if (sip->istr_reass == hmp) {
1128 		sip->istr_reass = hmp->b_next;
1129 		if (hmp->b_next)
1130 			hmp->b_next->b_prev = NULL;
1131 	} else {
1132 		ASSERT(hmp->b_prev != NULL);
1133 		hmp->b_prev->b_next = hmp->b_next;
1134 		if (hmp->b_next)
1135 			hmp->b_next->b_prev = hmp->b_prev;
1136 	}
1137 
1138 	/*
1139 	 * Using b_prev and b_next was a little sinful, but OK since
1140 	 * this mblk is never put*'d. However, freeb() will still
1141 	 * ASSERT that they are unused, so we need to NULL them out now.
1142 	 */
1143 	hmp->b_next = NULL;
1144 	hmp->b_prev = NULL;
1145 	dmp = hmp;
1146 	dmp = dmp->b_cont;
1147 	hmp->b_cont = NULL;
1148 	freeb(hmp);
1149 	*tpfinished = B_TRUE;
1150 
1151 	/*
1152 	 * Adjust all mblk's except the lead so their rptr's point to the
1153 	 * payload. sctp_data_chunk() will need to process the lead's
1154 	 * data chunk section, so leave it's rptr pointing at the data chunk.
1155 	 */
1156 	*dc = (sctp_data_hdr_t *)dmp->b_rptr;
1157 	for (qmp = dmp->b_cont; qmp != NULL; qmp = qmp->b_cont) {
1158 		qdc = (sctp_data_hdr_t *)qmp->b_rptr;
1159 		qmp->b_rptr = (uchar_t *)(qdc + 1);
1160 	}
1161 	BUMP_LOCAL(sctp->sctp_reassmsgs);
1162 
1163 	return (dmp);
1164 }
1165 static void
1166 sctp_add_dup(uint32_t tsn, mblk_t **dups)
1167 {
1168 	mblk_t *mp;
1169 	size_t bsize = SCTP_DUP_MBLK_SZ * sizeof (tsn);
1170 
1171 	if (dups == NULL) {
1172 		return;
1173 	}
1174 
1175 	/* first time? */
1176 	if (*dups == NULL) {
1177 		*dups = allocb(bsize, BPRI_MED);
1178 		if (*dups == NULL) {
1179 			return;
1180 		}
1181 	}
1182 
1183 	mp = *dups;
1184 	if ((mp->b_wptr - mp->b_rptr) >= bsize) {
1185 		/* maximum reached */
1186 		return;
1187 	}
1188 
1189 	/* add the duplicate tsn */
1190 	bcopy(&tsn, mp->b_wptr, sizeof (tsn));
1191 	mp->b_wptr += sizeof (tsn);
1192 	ASSERT((mp->b_wptr - mp->b_rptr) <= bsize);
1193 }
1194 
1195 static void
1196 sctp_data_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *mp, mblk_t **dups,
1197     sctp_faddr_t *fp, ip_pkt_t *ipp, ip_recv_attr_t *ira)
1198 {
1199 	sctp_data_hdr_t *dc;
1200 	mblk_t *dmp, *pmp;
1201 	sctp_instr_t *instr;
1202 	int ubit;
1203 	int isfrag;
1204 	uint16_t ssn;
1205 	uint32_t oftsn;
1206 	boolean_t can_deliver = B_TRUE;
1207 	uint32_t tsn;
1208 	int dlen;
1209 	boolean_t tpfinished = B_TRUE;
1210 	int32_t new_rwnd;
1211 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1212 	int	error;
1213 
1214 	/* The following are used multiple times, so we inline them */
1215 #define	SCTP_ACK_IT(sctp, tsn)						\
1216 	if (tsn == sctp->sctp_ftsn) {					\
1217 		dprint(2, ("data_chunk: acking next %x\n", tsn));	\
1218 		(sctp)->sctp_ftsn++;					\
1219 		if ((sctp)->sctp_sack_gaps > 0)				\
1220 			(sctp)->sctp_force_sack = 1;			\
1221 	} else if (SEQ_GT(tsn, sctp->sctp_ftsn)) {			\
1222 		/* Got a gap; record it */				\
1223 		BUMP_LOCAL(sctp->sctp_outseqtsns);			\
1224 		dprint(2, ("data_chunk: acking gap %x\n", tsn));	\
1225 		sctp_ack_add(&sctp->sctp_sack_info, tsn,		\
1226 		    &sctp->sctp_sack_gaps);				\
1227 		sctp->sctp_force_sack = 1;				\
1228 	}
1229 
1230 	dmp = NULL;
1231 
1232 	dc = (sctp_data_hdr_t *)ch;
1233 	tsn = ntohl(dc->sdh_tsn);
1234 
1235 	dprint(3, ("sctp_data_chunk: mp=%p tsn=%x\n", (void *)mp, tsn));
1236 
1237 	/* Check for duplicates */
1238 	if (SEQ_LT(tsn, sctp->sctp_ftsn)) {
1239 		dprint(4, ("sctp_data_chunk: dropping duplicate\n"));
1240 		BUMP_LOCAL(sctp->sctp_idupchunks);
1241 		sctp->sctp_force_sack = 1;
1242 		sctp_add_dup(dc->sdh_tsn, dups);
1243 		return;
1244 	}
1245 
1246 	if (sctp->sctp_sack_info != NULL) {
1247 		sctp_set_t *sp;
1248 
1249 		for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
1250 			if (SEQ_GEQ(tsn, sp->begin) && SEQ_LEQ(tsn, sp->end)) {
1251 				dprint(4,
1252 				    ("sctp_data_chunk: dropping dup > "
1253 				    "cumtsn\n"));
1254 				BUMP_LOCAL(sctp->sctp_idupchunks);
1255 				sctp->sctp_force_sack = 1;
1256 				sctp_add_dup(dc->sdh_tsn, dups);
1257 				return;
1258 			}
1259 		}
1260 	}
1261 
1262 	/* We cannot deliver anything up now but we still need to handle it. */
1263 	if (SCTP_IS_DETACHED(sctp)) {
1264 		BUMP_MIB(&sctps->sctps_mib, sctpInClosed);
1265 		can_deliver = B_FALSE;
1266 	}
1267 
1268 	dlen = ntohs(dc->sdh_len) - sizeof (*dc);
1269 
1270 	/*
1271 	 * Check for buffer space. Note if this is the next expected TSN
1272 	 * we have to take it to avoid deadlock because we cannot deliver
1273 	 * later queued TSNs and thus clear buffer space without it.
1274 	 * We drop anything that is purely zero window probe data here.
1275 	 */
1276 	if ((sctp->sctp_rwnd - sctp->sctp_rxqueued < dlen) &&
1277 	    (tsn != sctp->sctp_ftsn || sctp->sctp_rwnd == 0)) {
1278 		/* Drop and SACK, but don't advance the cumulative TSN. */
1279 		sctp->sctp_force_sack = 1;
1280 		dprint(0, ("sctp_data_chunk: exceed rwnd %d rxqueued %d "
1281 		    "dlen %d ssn %d tsn %x\n", sctp->sctp_rwnd,
1282 		    sctp->sctp_rxqueued, dlen, ntohs(dc->sdh_ssn),
1283 		    ntohl(dc->sdh_tsn)));
1284 		return;
1285 	}
1286 
1287 	if (ntohs(dc->sdh_sid) >= sctp->sctp_num_istr) {
1288 		sctp_bsc_t	inval_parm;
1289 
1290 		/* Will populate the CAUSE block in the ERROR chunk. */
1291 		inval_parm.bsc_sid = dc->sdh_sid;
1292 		/* RESERVED, ignored at the receiving end */
1293 		inval_parm.bsc_pad = 0;
1294 
1295 		/* ack and drop it */
1296 		sctp_add_err(sctp, SCTP_ERR_BAD_SID, (void *)&inval_parm,
1297 		    sizeof (sctp_bsc_t), fp);
1298 		SCTP_ACK_IT(sctp, tsn);
1299 		return;
1300 	}
1301 
1302 	ubit = SCTP_DATA_GET_UBIT(dc);
1303 	ASSERT(sctp->sctp_instr != NULL);
1304 	instr = &sctp->sctp_instr[ntohs(dc->sdh_sid)];
1305 	/* Initialize the stream, if not yet used */
1306 	if (instr->sctp == NULL)
1307 		instr->sctp = sctp;
1308 
1309 	isfrag = !(SCTP_DATA_GET_BBIT(dc) && SCTP_DATA_GET_EBIT(dc));
1310 	ssn = ntohs(dc->sdh_ssn);
1311 
1312 	dmp = dupb(mp);
1313 	if (dmp == NULL) {
1314 		/* drop it and don't ack it, causing the peer to retransmit */
1315 		return;
1316 	}
1317 	dmp->b_wptr = (uchar_t *)ch + ntohs(ch->sch_len);
1318 
1319 	sctp->sctp_rxqueued += dlen;
1320 
1321 	oftsn = sctp->sctp_ftsn;
1322 
1323 	if (isfrag) {
1324 
1325 		error = 0;
1326 		/* fragmented data chunk */
1327 		dmp->b_rptr = (uchar_t *)dc;
1328 		if (ubit) {
1329 			dmp = sctp_uodata_frag(sctp, dmp, &dc);
1330 #if	DEBUG
1331 			if (dmp != NULL) {
1332 				ASSERT(instr ==
1333 				    &sctp->sctp_instr[ntohs(dc->sdh_sid)]);
1334 			}
1335 #endif
1336 		} else {
1337 			dmp = sctp_data_frag(sctp, dmp, &dc, &error, instr,
1338 			    &tpfinished);
1339 		}
1340 		if (error != 0) {
1341 			sctp->sctp_rxqueued -= dlen;
1342 			if (error == 1) {
1343 				/*
1344 				 * out of memory; don't ack it so
1345 				 * the peer retransmits
1346 				 */
1347 				return;
1348 			} else if (error == 2) {
1349 				/*
1350 				 * fatal error (i.e. peer used different
1351 				 * ssn's for same fragmented data) --
1352 				 * the association has been aborted.
1353 				 * XXX need to return errval so state
1354 				 * machine can also abort processing.
1355 				 */
1356 				dprint(0, ("error 2: must not happen!\n"));
1357 				return;
1358 			}
1359 		}
1360 
1361 		if (dmp == NULL) {
1362 			/*
1363 			 * Can't process this data now, but the cumulative
1364 			 * TSN may be advanced, so do the checks at done.
1365 			 */
1366 			SCTP_ACK_IT(sctp, tsn);
1367 			goto done;
1368 		}
1369 	}
1370 
1371 	/*
1372 	 * Insert complete messages in correct order for ordered delivery.
1373 	 * tpfinished is true when the incoming chunk contains a complete
1374 	 * message or is the final missing fragment which completed a message.
1375 	 */
1376 	if (!ubit && tpfinished && ssn != instr->nextseq) {
1377 		/* Adjust rptr to point at the data chunk for compares */
1378 		dmp->b_rptr = (uchar_t *)dc;
1379 
1380 		dprint(2,
1381 		    ("data_chunk: inserted %x in pq (ssn %d expected %d)\n",
1382 		    ntohl(dc->sdh_tsn), (int)(ssn), (int)(instr->nextseq)));
1383 
1384 		if (instr->istr_msgs == NULL) {
1385 			instr->istr_msgs = dmp;
1386 			ASSERT(dmp->b_prev == NULL && dmp->b_next == NULL);
1387 		} else {
1388 			mblk_t			*imblk = instr->istr_msgs;
1389 			sctp_data_hdr_t		*idc;
1390 
1391 			/*
1392 			 * XXXNeed to take sequence wraps into account,
1393 			 * ... and a more efficient insertion algo.
1394 			 */
1395 			for (;;) {
1396 				idc = (sctp_data_hdr_t *)imblk->b_rptr;
1397 				if (SSN_GT(ntohs(idc->sdh_ssn),
1398 				    ntohs(dc->sdh_ssn))) {
1399 					if (instr->istr_msgs == imblk) {
1400 						instr->istr_msgs = dmp;
1401 						dmp->b_next = imblk;
1402 						imblk->b_prev = dmp;
1403 					} else {
1404 						ASSERT(imblk->b_prev != NULL);
1405 						imblk->b_prev->b_next = dmp;
1406 						dmp->b_prev = imblk->b_prev;
1407 						imblk->b_prev = dmp;
1408 						dmp->b_next = imblk;
1409 					}
1410 					break;
1411 				}
1412 				if (imblk->b_next == NULL) {
1413 					imblk->b_next = dmp;
1414 					dmp->b_prev = imblk;
1415 					break;
1416 				}
1417 				imblk = imblk->b_next;
1418 			}
1419 		}
1420 		(instr->istr_nmsgs)++;
1421 		(sctp->sctp_istr_nmsgs)++;
1422 		SCTP_ACK_IT(sctp, tsn);
1423 		return;
1424 	}
1425 
1426 	/*
1427 	 * Else we can deliver the data directly. Recalculate
1428 	 * dlen now since we may have reassembled data.
1429 	 */
1430 	dlen = dmp->b_wptr - (uchar_t *)dc - sizeof (*dc);
1431 	for (pmp = dmp->b_cont; pmp != NULL; pmp = pmp->b_cont)
1432 		dlen += MBLKL(pmp);
1433 	ASSERT(sctp->sctp_rxqueued >= dlen);
1434 
1435 	/* Deliver the message. */
1436 	sctp->sctp_rxqueued -= dlen;
1437 
1438 	if (can_deliver) {
1439 
1440 		dmp->b_rptr = (uchar_t *)(dc + 1);
1441 		if (sctp_input_add_ancillary(sctp, &dmp, dc, fp,
1442 		    ipp, ira) == 0) {
1443 			dprint(1, ("sctp_data_chunk: delivering %lu bytes\n",
1444 			    msgdsize(dmp)));
1445 			sctp->sctp_rwnd -= dlen;
1446 			/*
1447 			 * Override b_flag for SCTP sockfs internal use
1448 			 */
1449 			dmp->b_flag = tpfinished ? 0 : SCTP_PARTIAL_DATA;
1450 			new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd, dmp,
1451 			    msgdsize(dmp), 0, &error, NULL);
1452 			/*
1453 			 * Since we always deliver the next TSN data chunk,
1454 			 * we may buffer a little more than allowed. In
1455 			 * that case, just mark the window as 0.
1456 			 */
1457 			if (new_rwnd < 0)
1458 				sctp->sctp_rwnd = 0;
1459 			else if (new_rwnd > sctp->sctp_rwnd)
1460 				sctp->sctp_rwnd = new_rwnd;
1461 			SCTP_ACK_IT(sctp, tsn);
1462 		} else {
1463 			/* Just free the message if we don't have memory. */
1464 			freemsg(dmp);
1465 			return;
1466 		}
1467 	} else {
1468 		/* About to free the data */
1469 		freemsg(dmp);
1470 		SCTP_ACK_IT(sctp, tsn);
1471 	}
1472 
1473 	/*
1474 	 * data, now enqueued, may already have been processed and free'd
1475 	 * by the ULP (or we may have just freed it above, if we could not
1476 	 * deliver it), so we must not reference it (this is why we kept
1477 	 * the ssn and ubit above).
1478 	 */
1479 	if (ubit != 0) {
1480 		BUMP_LOCAL(sctp->sctp_iudchunks);
1481 		goto done;
1482 	}
1483 	BUMP_LOCAL(sctp->sctp_idchunks);
1484 
1485 	/*
1486 	 * If there was a partial delivery and it has not finished,
1487 	 * don't pull anything from the pqueues.
1488 	 */
1489 	if (!tpfinished) {
1490 		goto done;
1491 	}
1492 
1493 	instr->nextseq = ssn + 1;
1494 	/* Deliver any successive data chunks in the instr queue */
1495 	while (instr->istr_nmsgs > 0) {
1496 		dmp = (mblk_t *)instr->istr_msgs;
1497 		dc = (sctp_data_hdr_t *)dmp->b_rptr;
1498 		ssn = ntohs(dc->sdh_ssn);
1499 		/* Gap in the sequence */
1500 		if (ssn != instr->nextseq)
1501 			break;
1502 
1503 		/* Else deliver the data */
1504 		(instr->istr_nmsgs)--;
1505 		(instr->nextseq)++;
1506 		(sctp->sctp_istr_nmsgs)--;
1507 
1508 		instr->istr_msgs = instr->istr_msgs->b_next;
1509 		if (instr->istr_msgs != NULL)
1510 			instr->istr_msgs->b_prev = NULL;
1511 		dmp->b_next = dmp->b_prev = NULL;
1512 
1513 		dprint(2, ("data_chunk: pulling %x from pq (ssn %d)\n",
1514 		    ntohl(dc->sdh_tsn), (int)ssn));
1515 
1516 		/*
1517 		 * If this chunk was reassembled, each b_cont represents
1518 		 * another TSN; advance ftsn now.
1519 		 */
1520 		dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
1521 		for (pmp = dmp->b_cont; pmp; pmp = pmp->b_cont)
1522 			dlen += MBLKL(pmp);
1523 
1524 		ASSERT(sctp->sctp_rxqueued >= dlen);
1525 
1526 		sctp->sctp_rxqueued -= dlen;
1527 		if (can_deliver) {
1528 			dmp->b_rptr = (uchar_t *)(dc + 1);
1529 			if (sctp_input_add_ancillary(sctp, &dmp, dc, fp,
1530 			    ipp, ira) == 0) {
1531 				dprint(1, ("sctp_data_chunk: delivering %lu "
1532 				    "bytes\n", msgdsize(dmp)));
1533 				sctp->sctp_rwnd -= dlen;
1534 				/*
1535 				 * Override b_flag for SCTP sockfs internal use
1536 				 */
1537 				dmp->b_flag = tpfinished ?
1538 				    0 : SCTP_PARTIAL_DATA;
1539 				new_rwnd = sctp->sctp_ulp_recv(sctp->sctp_ulpd,
1540 				    dmp, msgdsize(dmp), 0, &error, NULL);
1541 				if (new_rwnd < 0)
1542 					sctp->sctp_rwnd = 0;
1543 				else if (new_rwnd > sctp->sctp_rwnd)
1544 					sctp->sctp_rwnd = new_rwnd;
1545 				SCTP_ACK_IT(sctp, tsn);
1546 			} else {
1547 				freemsg(dmp);
1548 				return;
1549 			}
1550 		} else {
1551 			/* About to free the data */
1552 			freemsg(dmp);
1553 			SCTP_ACK_IT(sctp, tsn);
1554 		}
1555 	}
1556 
1557 done:
1558 
1559 	/*
1560 	 * If there are gap reports pending, check if advancing
1561 	 * the ftsn here closes a gap. If so, we can advance
1562 	 * ftsn to the end of the set.
1563 	 */
1564 	if (sctp->sctp_sack_info != NULL &&
1565 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
1566 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
1567 	}
1568 	/*
1569 	 * If ftsn has moved forward, maybe we can remove gap reports.
1570 	 * NB: dmp may now be NULL, so don't dereference it here.
1571 	 */
1572 	if (oftsn != sctp->sctp_ftsn && sctp->sctp_sack_info != NULL) {
1573 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
1574 		    &sctp->sctp_sack_gaps);
1575 		dprint(2, ("data_chunk: removed acks before %x (num=%d)\n",
1576 		    sctp->sctp_ftsn - 1, sctp->sctp_sack_gaps));
1577 	}
1578 
1579 #ifdef	DEBUG
1580 	if (sctp->sctp_sack_info != NULL) {
1581 		ASSERT(sctp->sctp_ftsn != sctp->sctp_sack_info->begin);
1582 	}
1583 #endif
1584 
1585 #undef	SCTP_ACK_IT
1586 }
1587 
1588 void
1589 sctp_fill_sack(sctp_t *sctp, unsigned char *dst, int sacklen)
1590 {
1591 	sctp_chunk_hdr_t *sch;
1592 	sctp_sack_chunk_t *sc;
1593 	sctp_sack_frag_t *sf;
1594 	uint16_t num_gaps = sctp->sctp_sack_gaps;
1595 	sctp_set_t *sp;
1596 
1597 	/* Chunk hdr */
1598 	sch = (sctp_chunk_hdr_t *)dst;
1599 	sch->sch_id = CHUNK_SACK;
1600 	sch->sch_flags = 0;
1601 	sch->sch_len = htons(sacklen);
1602 
1603 	/* SACK chunk */
1604 	sctp->sctp_lastacked = sctp->sctp_ftsn - 1;
1605 
1606 	sc = (sctp_sack_chunk_t *)(sch + 1);
1607 	sc->ssc_cumtsn = htonl(sctp->sctp_lastacked);
1608 	if (sctp->sctp_rxqueued < sctp->sctp_rwnd) {
1609 		sc->ssc_a_rwnd = htonl(sctp->sctp_rwnd - sctp->sctp_rxqueued);
1610 	} else {
1611 		sc->ssc_a_rwnd = 0;
1612 	}
1613 	sc->ssc_numfrags = htons(num_gaps);
1614 	sc->ssc_numdups = 0;
1615 
1616 	/* lay in gap reports */
1617 	sf = (sctp_sack_frag_t *)(sc + 1);
1618 	for (sp = sctp->sctp_sack_info; sp; sp = sp->next) {
1619 		uint16_t offset;
1620 
1621 		/* start */
1622 		if (sp->begin > sctp->sctp_lastacked) {
1623 			offset = (uint16_t)(sp->begin - sctp->sctp_lastacked);
1624 		} else {
1625 			/* sequence number wrap */
1626 			offset = (uint16_t)(UINT32_MAX - sctp->sctp_lastacked +
1627 			    sp->begin);
1628 		}
1629 		sf->ssf_start = htons(offset);
1630 
1631 		/* end */
1632 		if (sp->end >= sp->begin) {
1633 			offset += (uint16_t)(sp->end - sp->begin);
1634 		} else {
1635 			/* sequence number wrap */
1636 			offset += (uint16_t)(UINT32_MAX - sp->begin + sp->end);
1637 		}
1638 		sf->ssf_end = htons(offset);
1639 
1640 		sf++;
1641 		/* This is just for debugging (a la the following assertion) */
1642 		num_gaps--;
1643 	}
1644 
1645 	ASSERT(num_gaps == 0);
1646 
1647 	/* If the SACK timer is running, stop it */
1648 	if (sctp->sctp_ack_timer_running) {
1649 		sctp_timer_stop(sctp->sctp_ack_mp);
1650 		sctp->sctp_ack_timer_running = B_FALSE;
1651 	}
1652 
1653 	BUMP_LOCAL(sctp->sctp_obchunks);
1654 	BUMP_LOCAL(sctp->sctp_osacks);
1655 }
1656 
1657 mblk_t *
1658 sctp_make_sack(sctp_t *sctp, sctp_faddr_t *sendto, mblk_t *dups)
1659 {
1660 	mblk_t *smp;
1661 	size_t slen;
1662 	sctp_chunk_hdr_t *sch;
1663 	sctp_sack_chunk_t *sc;
1664 	int32_t acks_max;
1665 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1666 	uint32_t	dups_len;
1667 	sctp_faddr_t	*fp;
1668 
1669 	ASSERT(sendto != NULL);
1670 
1671 	if (sctp->sctp_force_sack) {
1672 		sctp->sctp_force_sack = 0;
1673 		goto checks_done;
1674 	}
1675 
1676 	acks_max = sctps->sctps_deferred_acks_max;
1677 	if (sctp->sctp_state == SCTPS_ESTABLISHED) {
1678 		if (sctp->sctp_sack_toggle < acks_max) {
1679 			/* no need to SACK right now */
1680 			dprint(2, ("sctp_make_sack: %p no sack (toggle)\n",
1681 			    (void *)sctp));
1682 			return (NULL);
1683 		} else if (sctp->sctp_sack_toggle >= acks_max) {
1684 			sctp->sctp_sack_toggle = 0;
1685 		}
1686 	}
1687 
1688 	if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) {
1689 		dprint(2, ("sctp_make_sack: %p no sack (already)\n",
1690 		    (void *)sctp));
1691 		return (NULL);
1692 	}
1693 
1694 checks_done:
1695 	dprint(2, ("sctp_make_sack: acking %x\n", sctp->sctp_ftsn - 1));
1696 
1697 	if (dups != NULL)
1698 		dups_len = MBLKL(dups);
1699 	else
1700 		dups_len = 0;
1701 	slen = sizeof (*sch) + sizeof (*sc) +
1702 	    (sizeof (sctp_sack_frag_t) * sctp->sctp_sack_gaps);
1703 
1704 	/*
1705 	 * If there are error chunks, check and see if we can send the
1706 	 * SACK chunk and error chunks together in one packet.  If not,
1707 	 * send the error chunks out now.
1708 	 */
1709 	if (sctp->sctp_err_chunks != NULL) {
1710 		fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks);
1711 		if (sctp->sctp_err_len + slen + dups_len > fp->sfa_pmss) {
1712 			if ((smp = sctp_make_mp(sctp, fp, 0)) == NULL) {
1713 				SCTP_KSTAT(sctps, sctp_send_err_failed);
1714 				SCTP_KSTAT(sctps, sctp_send_sack_failed);
1715 				freemsg(sctp->sctp_err_chunks);
1716 				sctp->sctp_err_chunks = NULL;
1717 				sctp->sctp_err_len = 0;
1718 				return (NULL);
1719 			}
1720 			smp->b_cont = sctp->sctp_err_chunks;
1721 			sctp_set_iplen(sctp, smp, fp->ixa);
1722 			(void) conn_ip_output(smp, fp->ixa);
1723 			BUMP_LOCAL(sctp->sctp_opkts);
1724 			sctp->sctp_err_chunks = NULL;
1725 			sctp->sctp_err_len = 0;
1726 		}
1727 	}
1728 	smp = sctp_make_mp(sctp, sendto, slen);
1729 	if (smp == NULL) {
1730 		SCTP_KSTAT(sctps, sctp_send_sack_failed);
1731 		return (NULL);
1732 	}
1733 	sch = (sctp_chunk_hdr_t *)smp->b_wptr;
1734 
1735 	sctp_fill_sack(sctp, smp->b_wptr, slen);
1736 	smp->b_wptr += slen;
1737 	if (dups != NULL) {
1738 		sc = (sctp_sack_chunk_t *)(sch + 1);
1739 		sc->ssc_numdups = htons(MBLKL(dups) / sizeof (uint32_t));
1740 		sch->sch_len = htons(slen + dups_len);
1741 		smp->b_cont = dups;
1742 	}
1743 
1744 	if (sctp->sctp_err_chunks != NULL) {
1745 		linkb(smp, sctp->sctp_err_chunks);
1746 		sctp->sctp_err_chunks = NULL;
1747 		sctp->sctp_err_len = 0;
1748 	}
1749 	return (smp);
1750 }
1751 
1752 /*
1753  * Check and see if we need to send a SACK chunk.  If it is needed,
1754  * send it out.  Return true if a SACK chunk is sent, false otherwise.
1755  */
1756 boolean_t
1757 sctp_sack(sctp_t *sctp, mblk_t *dups)
1758 {
1759 	mblk_t *smp;
1760 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1761 
1762 	/* If we are shutting down, let send_shutdown() bundle the SACK */
1763 	if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
1764 		sctp_send_shutdown(sctp, 0);
1765 	}
1766 
1767 	ASSERT(sctp->sctp_lastdata != NULL);
1768 
1769 	if ((smp = sctp_make_sack(sctp, sctp->sctp_lastdata, dups)) == NULL) {
1770 		/* The caller of sctp_sack() will not free the dups mblk. */
1771 		if (dups != NULL)
1772 			freeb(dups);
1773 		return (B_FALSE);
1774 	}
1775 	dprint(2, ("sctp_sack: sending to %p %x:%x:%x:%x\n",
1776 	    (void *)sctp->sctp_lastdata,
1777 	    SCTP_PRINTADDR(sctp->sctp_lastdata->faddr)));
1778 
1779 	sctp->sctp_active = ddi_get_lbolt64();
1780 
1781 	BUMP_MIB(&sctps->sctps_mib, sctpOutAck);
1782 
1783 	sctp_set_iplen(sctp, smp, sctp->sctp_lastdata->ixa);
1784 	(void) conn_ip_output(smp, sctp->sctp_lastdata->ixa);
1785 	BUMP_LOCAL(sctp->sctp_opkts);
1786 	return (B_TRUE);
1787 }
1788 
1789 /*
1790  * This is called if we have a message that was partially sent and is
1791  * abandoned. The cum TSN will be the last chunk sent for this message,
1792  * subsequent chunks will be marked ABANDONED. We send a Forward TSN
1793  * chunk in this case with the TSN of the last sent chunk so that the
1794  * peer can clean up its fragment list for this message. This message
1795  * will be removed from the transmit list when the peer sends a SACK
1796  * back.
1797  */
1798 int
1799 sctp_check_abandoned_msg(sctp_t *sctp, mblk_t *meta)
1800 {
1801 	sctp_data_hdr_t	*dh;
1802 	mblk_t		*nmp;
1803 	mblk_t		*head;
1804 	int32_t		unsent = 0;
1805 	mblk_t		*mp1 = meta->b_cont;
1806 	uint32_t	adv_pap = sctp->sctp_adv_pap;
1807 	sctp_faddr_t	*fp = sctp->sctp_current;
1808 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1809 
1810 	dh = (sctp_data_hdr_t *)mp1->b_rptr;
1811 	if (SEQ_GEQ(sctp->sctp_lastack_rxd, ntohl(dh->sdh_tsn))) {
1812 		sctp_ftsn_set_t	*sets = NULL;
1813 		uint_t		nsets = 0;
1814 		uint32_t	seglen = sizeof (uint32_t);
1815 		boolean_t	ubit = SCTP_DATA_GET_UBIT(dh);
1816 
1817 		while (mp1->b_next != NULL && SCTP_CHUNK_ISSENT(mp1->b_next))
1818 			mp1 = mp1->b_next;
1819 		dh = (sctp_data_hdr_t *)mp1->b_rptr;
1820 		sctp->sctp_adv_pap = ntohl(dh->sdh_tsn);
1821 		if (!ubit &&
1822 		    !sctp_add_ftsn_set(&sets, fp, meta, &nsets, &seglen)) {
1823 			sctp->sctp_adv_pap = adv_pap;
1824 			return (ENOMEM);
1825 		}
1826 		nmp = sctp_make_ftsn_chunk(sctp, fp, sets, nsets, seglen);
1827 		sctp_free_ftsn_set(sets);
1828 		if (nmp == NULL) {
1829 			sctp->sctp_adv_pap = adv_pap;
1830 			return (ENOMEM);
1831 		}
1832 		head = sctp_add_proto_hdr(sctp, fp, nmp, 0, NULL);
1833 		if (head == NULL) {
1834 			sctp->sctp_adv_pap = adv_pap;
1835 			freemsg(nmp);
1836 			SCTP_KSTAT(sctps, sctp_send_ftsn_failed);
1837 			return (ENOMEM);
1838 		}
1839 		SCTP_MSG_SET_ABANDONED(meta);
1840 		sctp_set_iplen(sctp, head, fp->ixa);
1841 		(void) conn_ip_output(head, fp->ixa);
1842 		BUMP_LOCAL(sctp->sctp_opkts);
1843 		if (!fp->timer_running)
1844 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
1845 		mp1 = mp1->b_next;
1846 		while (mp1 != NULL) {
1847 			ASSERT(!SCTP_CHUNK_ISSENT(mp1));
1848 			ASSERT(!SCTP_CHUNK_ABANDONED(mp1));
1849 			SCTP_ABANDON_CHUNK(mp1);
1850 			dh = (sctp_data_hdr_t *)mp1->b_rptr;
1851 			unsent += ntohs(dh->sdh_len) - sizeof (*dh);
1852 			mp1 = mp1->b_next;
1853 		}
1854 		ASSERT(sctp->sctp_unsent >= unsent);
1855 		sctp->sctp_unsent -= unsent;
1856 		/*
1857 		 * Update ULP the amount of queued data, which is
1858 		 * sent-unack'ed + unsent.
1859 		 */
1860 		if (!SCTP_IS_DETACHED(sctp))
1861 			SCTP_TXQ_UPDATE(sctp);
1862 		return (0);
1863 	}
1864 	return (-1);
1865 }
1866 
1867 uint32_t
1868 sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked)
1869 {
1870 	mblk_t *ump, *nump, *mp = NULL;
1871 	uint16_t chunklen;
1872 	uint32_t xtsn;
1873 	sctp_faddr_t *fp;
1874 	sctp_data_hdr_t *sdc;
1875 	uint32_t cumack_forward = 0;
1876 	sctp_msg_hdr_t	*mhdr;
1877 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1878 
1879 	ump = sctp->sctp_xmit_head;
1880 
1881 	/*
1882 	 * Free messages only when they're completely acked.
1883 	 */
1884 	while (ump != NULL) {
1885 		mhdr = (sctp_msg_hdr_t *)ump->b_rptr;
1886 		for (mp = ump->b_cont; mp != NULL; mp = mp->b_next) {
1887 			if (SCTP_CHUNK_ABANDONED(mp)) {
1888 				ASSERT(SCTP_IS_MSG_ABANDONED(ump));
1889 				mp = NULL;
1890 				break;
1891 			}
1892 			/*
1893 			 * We check for abandoned message if we are PR-SCTP
1894 			 * aware, if this is not the first chunk in the
1895 			 * message (b_cont) and if the message is marked
1896 			 * abandoned.
1897 			 */
1898 			if (!SCTP_CHUNK_ISSENT(mp)) {
1899 				if (sctp->sctp_prsctp_aware &&
1900 				    mp != ump->b_cont &&
1901 				    (SCTP_IS_MSG_ABANDONED(ump) ||
1902 				    SCTP_MSG_TO_BE_ABANDONED(ump, mhdr,
1903 				    sctp))) {
1904 					(void) sctp_check_abandoned_msg(sctp,
1905 					    ump);
1906 				}
1907 				goto cum_ack_done;
1908 			}
1909 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
1910 			xtsn = ntohl(sdc->sdh_tsn);
1911 			if (SEQ_GEQ(sctp->sctp_lastack_rxd, xtsn))
1912 				continue;
1913 			if (SEQ_GEQ(tsn, xtsn)) {
1914 				fp = SCTP_CHUNK_DEST(mp);
1915 				chunklen = ntohs(sdc->sdh_len);
1916 
1917 				if (sctp->sctp_out_time != 0 &&
1918 				    xtsn == sctp->sctp_rtt_tsn) {
1919 					/* Got a new RTT measurement */
1920 					sctp_update_rtt(sctp, fp,
1921 					    ddi_get_lbolt64() -
1922 					    sctp->sctp_out_time);
1923 					sctp->sctp_out_time = 0;
1924 				}
1925 				if (SCTP_CHUNK_ISACKED(mp))
1926 					continue;
1927 				SCTP_CHUNK_SET_SACKCNT(mp, 0);
1928 				SCTP_CHUNK_ACKED(mp);
1929 				ASSERT(fp->suna >= chunklen);
1930 				fp->suna -= chunklen;
1931 				fp->acked += chunklen;
1932 				cumack_forward += chunklen;
1933 				ASSERT(sctp->sctp_unacked >=
1934 				    (chunklen - sizeof (*sdc)));
1935 				sctp->sctp_unacked -=
1936 				    (chunklen - sizeof (*sdc));
1937 				if (fp->suna == 0) {
1938 					/* all outstanding data acked */
1939 					fp->pba = 0;
1940 					SCTP_FADDR_TIMER_STOP(fp);
1941 				} else {
1942 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
1943 					    fp->rto);
1944 				}
1945 			} else {
1946 				goto cum_ack_done;
1947 			}
1948 		}
1949 		nump = ump->b_next;
1950 		if (nump != NULL)
1951 			nump->b_prev = NULL;
1952 		if (ump == sctp->sctp_xmit_tail)
1953 			sctp->sctp_xmit_tail = nump;
1954 		if (SCTP_IS_MSG_ABANDONED(ump)) {
1955 			BUMP_LOCAL(sctp->sctp_prsctpdrop);
1956 			ump->b_next = NULL;
1957 			sctp_sendfail_event(sctp, ump, 0, B_TRUE);
1958 		} else {
1959 			sctp_free_msg(ump);
1960 		}
1961 		sctp->sctp_xmit_head = ump = nump;
1962 	}
1963 cum_ack_done:
1964 	*first_unacked = mp;
1965 	if (cumack_forward > 0) {
1966 		BUMP_MIB(&sctps->sctps_mib, sctpInAck);
1967 		if (SEQ_GT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn)) {
1968 			sctp->sctp_recovery_tsn = sctp->sctp_lastack_rxd;
1969 		}
1970 
1971 		/*
1972 		 * Update ULP the amount of queued data, which is
1973 		 * sent-unack'ed + unsent.
1974 		 */
1975 		if (!SCTP_IS_DETACHED(sctp))
1976 			SCTP_TXQ_UPDATE(sctp);
1977 
1978 		/* Time to send a shutdown? */
1979 		if (sctp->sctp_state == SCTPS_SHUTDOWN_PENDING) {
1980 			sctp_send_shutdown(sctp, 0);
1981 		}
1982 		sctp->sctp_xmit_unacked = mp;
1983 	} else {
1984 		/* dup ack */
1985 		BUMP_MIB(&sctps->sctps_mib, sctpInDupAck);
1986 	}
1987 	sctp->sctp_lastack_rxd = tsn;
1988 	if (SEQ_LT(sctp->sctp_adv_pap, sctp->sctp_lastack_rxd))
1989 		sctp->sctp_adv_pap = sctp->sctp_lastack_rxd;
1990 	ASSERT(sctp->sctp_xmit_head || sctp->sctp_unacked == 0);
1991 
1992 	return (cumack_forward);
1993 }
1994 
1995 static int
1996 sctp_set_frwnd(sctp_t *sctp, uint32_t frwnd)
1997 {
1998 	uint32_t orwnd;
1999 
2000 	if (sctp->sctp_unacked > frwnd) {
2001 		sctp->sctp_frwnd = 0;
2002 		return (0);
2003 	}
2004 	orwnd = sctp->sctp_frwnd;
2005 	sctp->sctp_frwnd = frwnd - sctp->sctp_unacked;
2006 	if (orwnd < sctp->sctp_frwnd) {
2007 		return (1);
2008 	} else {
2009 		return (0);
2010 	}
2011 }
2012 
2013 /*
2014  * For un-ordered messages.
2015  * Walk the sctp->sctp_uo_frag list and remove any fragments with TSN
2016  * less than/equal to ftsn. Fragments for un-ordered messages are
2017  * strictly in sequence (w.r.t TSN).
2018  */
2019 static int
2020 sctp_ftsn_check_uo_frag(sctp_t *sctp, uint32_t ftsn)
2021 {
2022 	mblk_t		*hmp;
2023 	mblk_t		*hmp_next;
2024 	sctp_data_hdr_t	*dc;
2025 	int		dlen = 0;
2026 
2027 	hmp = sctp->sctp_uo_frags;
2028 	while (hmp != NULL) {
2029 		hmp_next = hmp->b_next;
2030 		dc = (sctp_data_hdr_t *)hmp->b_rptr;
2031 		if (SEQ_GT(ntohl(dc->sdh_tsn), ftsn))
2032 			return (dlen);
2033 		sctp->sctp_uo_frags = hmp_next;
2034 		if (hmp_next != NULL)
2035 			hmp_next->b_prev = NULL;
2036 		hmp->b_next = NULL;
2037 		dlen += ntohs(dc->sdh_len) - sizeof (*dc);
2038 		freeb(hmp);
2039 		hmp = hmp_next;
2040 	}
2041 	return (dlen);
2042 }
2043 
2044 /*
2045  * For ordered messages.
2046  * Check for existing fragments for an sid-ssn pair reported as abandoned,
2047  * hence will not receive, in the Forward TSN. If there are fragments, then
2048  * we just nuke them. If and when Partial Delivery API is supported, we
2049  * would need to send a notification to the upper layer about this.
2050  */
2051 static int
2052 sctp_ftsn_check_frag(sctp_t *sctp, uint16_t ssn, sctp_instr_t *sip)
2053 {
2054 	sctp_reass_t	*srp;
2055 	mblk_t		*hmp;
2056 	mblk_t		*dmp;
2057 	mblk_t		*hmp_next;
2058 	sctp_data_hdr_t	*dc;
2059 	int		dlen = 0;
2060 
2061 	hmp = sip->istr_reass;
2062 	while (hmp != NULL) {
2063 		hmp_next = hmp->b_next;
2064 		srp = (sctp_reass_t *)DB_BASE(hmp);
2065 		if (SSN_GT(srp->ssn, ssn))
2066 			return (dlen);
2067 		/*
2068 		 * If we had sent part of this message up, send a partial
2069 		 * delivery event. Since this is ordered delivery, we should
2070 		 * have sent partial message only for the next in sequence,
2071 		 * hence the ASSERT. See comments in sctp_data_chunk() for
2072 		 * trypartial.
2073 		 */
2074 		if (srp->partial_delivered) {
2075 			ASSERT(sip->nextseq == srp->ssn);
2076 			sctp_partial_delivery_event(sctp);
2077 		}
2078 		/* Take it out of the reass queue */
2079 		sip->istr_reass = hmp_next;
2080 		if (hmp_next != NULL)
2081 			hmp_next->b_prev = NULL;
2082 		hmp->b_next = NULL;
2083 		ASSERT(hmp->b_prev == NULL);
2084 		dmp = hmp;
2085 		ASSERT(DB_TYPE(hmp) == M_CTL);
2086 		dmp = hmp->b_cont;
2087 		hmp->b_cont = NULL;
2088 		freeb(hmp);
2089 		hmp = dmp;
2090 		while (dmp != NULL) {
2091 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
2092 			dlen += ntohs(dc->sdh_len) - sizeof (*dc);
2093 			dmp = dmp->b_cont;
2094 		}
2095 		freemsg(hmp);
2096 		hmp = hmp_next;
2097 	}
2098 	return (dlen);
2099 }
2100 
2101 /*
2102  * Update sctp_ftsn to the cumulative TSN from the Forward TSN chunk. Remove
2103  * any SACK gaps less than the newly updated sctp_ftsn. Walk through the
2104  * sid-ssn pair in the Forward TSN and for each, clean the fragment list
2105  * for this pair, if needed, and check if we can deliver subsequent
2106  * messages, if any, from the instream queue (that were waiting for this
2107  * sid-ssn message to show up). Once we are done try to update the SACK
2108  * info. We could get a duplicate Forward TSN, in which case just send
2109  * a SACK. If any of the sid values in the Forward TSN is invalid,
2110  * send back an "Invalid Stream Identifier" error and continue processing
2111  * the rest.
2112  */
2113 static void
2114 sctp_process_forward_tsn(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp,
2115     ip_pkt_t *ipp, ip_recv_attr_t *ira)
2116 {
2117 	uint32_t	*ftsn = (uint32_t *)(ch + 1);
2118 	ftsn_entry_t	*ftsn_entry;
2119 	sctp_instr_t	*instr;
2120 	boolean_t	can_deliver = B_TRUE;
2121 	size_t		dlen;
2122 	int		flen;
2123 	mblk_t		*dmp;
2124 	mblk_t		*pmp;
2125 	sctp_data_hdr_t	*dc;
2126 	ssize_t		remaining;
2127 	sctp_stack_t	*sctps = sctp->sctp_sctps;
2128 
2129 	*ftsn = ntohl(*ftsn);
2130 	remaining =  ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*ftsn);
2131 
2132 	if (SCTP_IS_DETACHED(sctp)) {
2133 		BUMP_MIB(&sctps->sctps_mib, sctpInClosed);
2134 		can_deliver = B_FALSE;
2135 	}
2136 	/*
2137 	 * un-ordered messages don't have SID-SSN pair entries, we check
2138 	 * for any fragments (for un-ordered message) to be discarded using
2139 	 * the cumulative FTSN.
2140 	 */
2141 	flen = sctp_ftsn_check_uo_frag(sctp, *ftsn);
2142 	if (flen > 0) {
2143 		ASSERT(sctp->sctp_rxqueued >= flen);
2144 		sctp->sctp_rxqueued -= flen;
2145 	}
2146 	ftsn_entry = (ftsn_entry_t *)(ftsn + 1);
2147 	while (remaining >= sizeof (*ftsn_entry)) {
2148 		ftsn_entry->ftsn_sid = ntohs(ftsn_entry->ftsn_sid);
2149 		ftsn_entry->ftsn_ssn = ntohs(ftsn_entry->ftsn_ssn);
2150 		if (ftsn_entry->ftsn_sid >= sctp->sctp_num_istr) {
2151 			sctp_bsc_t	inval_parm;
2152 
2153 			/* Will populate the CAUSE block in the ERROR chunk. */
2154 			inval_parm.bsc_sid = htons(ftsn_entry->ftsn_sid);
2155 			/* RESERVED, ignored at the receiving end */
2156 			inval_parm.bsc_pad = 0;
2157 
2158 			sctp_add_err(sctp, SCTP_ERR_BAD_SID,
2159 			    (void *)&inval_parm, sizeof (sctp_bsc_t), fp);
2160 			ftsn_entry++;
2161 			remaining -= sizeof (*ftsn_entry);
2162 			continue;
2163 		}
2164 		instr = &sctp->sctp_instr[ftsn_entry->ftsn_sid];
2165 		flen = sctp_ftsn_check_frag(sctp, ftsn_entry->ftsn_ssn, instr);
2166 		/* Indicates frags were nuked, update rxqueued */
2167 		if (flen > 0) {
2168 			ASSERT(sctp->sctp_rxqueued >= flen);
2169 			sctp->sctp_rxqueued -= flen;
2170 		}
2171 		/*
2172 		 * It is possible to receive an FTSN chunk with SSN smaller
2173 		 * than then nextseq if this chunk is a retransmission because
2174 		 * of incomplete processing when it was first processed.
2175 		 */
2176 		if (SSN_GE(ftsn_entry->ftsn_ssn, instr->nextseq))
2177 			instr->nextseq = ftsn_entry->ftsn_ssn + 1;
2178 		while (instr->istr_nmsgs > 0) {
2179 			mblk_t	*next;
2180 
2181 			dmp = (mblk_t *)instr->istr_msgs;
2182 			dc = (sctp_data_hdr_t *)dmp->b_rptr;
2183 			if (ntohs(dc->sdh_ssn) != instr->nextseq)
2184 				break;
2185 
2186 			next = dmp->b_next;
2187 			dlen = dmp->b_wptr - dmp->b_rptr - sizeof (*dc);
2188 			for (pmp = dmp->b_cont; pmp != NULL;
2189 			    pmp = pmp->b_cont) {
2190 				dlen += MBLKL(pmp);
2191 			}
2192 			if (can_deliver) {
2193 				int32_t	nrwnd;
2194 				int error;
2195 
2196 				dmp->b_rptr = (uchar_t *)(dc + 1);
2197 				dmp->b_next = NULL;
2198 				ASSERT(dmp->b_prev == NULL);
2199 				if (sctp_input_add_ancillary(sctp,
2200 				    &dmp, dc, fp, ipp, ira) == 0) {
2201 					sctp->sctp_rxqueued -= dlen;
2202 					sctp->sctp_rwnd -= dlen;
2203 					/*
2204 					 * Override b_flag for SCTP sockfs
2205 					 * internal use
2206 					 */
2207 
2208 					dmp->b_flag = 0;
2209 					nrwnd = sctp->sctp_ulp_recv(
2210 					    sctp->sctp_ulpd, dmp, msgdsize(dmp),
2211 					    0, &error, NULL);
2212 					if (nrwnd < 0)
2213 						sctp->sctp_rwnd = 0;
2214 					else if (nrwnd > sctp->sctp_rwnd)
2215 						sctp->sctp_rwnd = nrwnd;
2216 				} else {
2217 					/*
2218 					 * We will resume processing when
2219 					 * the FTSN chunk is re-xmitted.
2220 					 */
2221 					dmp->b_rptr = (uchar_t *)dc;
2222 					dmp->b_next = next;
2223 					dprint(0,
2224 					    ("FTSN dequeuing %u failed\n",
2225 					    ntohs(dc->sdh_ssn)));
2226 					return;
2227 				}
2228 			} else {
2229 				sctp->sctp_rxqueued -= dlen;
2230 				ASSERT(dmp->b_prev == NULL);
2231 				dmp->b_next = NULL;
2232 				freemsg(dmp);
2233 			}
2234 			instr->istr_nmsgs--;
2235 			instr->nextseq++;
2236 			sctp->sctp_istr_nmsgs--;
2237 			if (next != NULL)
2238 				next->b_prev = NULL;
2239 			instr->istr_msgs = next;
2240 		}
2241 		ftsn_entry++;
2242 		remaining -= sizeof (*ftsn_entry);
2243 	}
2244 	/* Duplicate FTSN */
2245 	if (*ftsn <= (sctp->sctp_ftsn - 1)) {
2246 		sctp->sctp_force_sack = 1;
2247 		return;
2248 	}
2249 	/* Advance cum TSN to that reported in the Forward TSN chunk */
2250 	sctp->sctp_ftsn = *ftsn + 1;
2251 
2252 	/* Remove all the SACK gaps before the new cum TSN */
2253 	if (sctp->sctp_sack_info != NULL) {
2254 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
2255 		    &sctp->sctp_sack_gaps);
2256 	}
2257 	/*
2258 	 * If there are gap reports pending, check if advancing
2259 	 * the ftsn here closes a gap. If so, we can advance
2260 	 * ftsn to the end of the set.
2261 	 * If ftsn has moved forward, maybe we can remove gap reports.
2262 	 */
2263 	if (sctp->sctp_sack_info != NULL &&
2264 	    sctp->sctp_ftsn == sctp->sctp_sack_info->begin) {
2265 		sctp->sctp_ftsn = sctp->sctp_sack_info->end + 1;
2266 		sctp_ack_rem(&sctp->sctp_sack_info, sctp->sctp_ftsn - 1,
2267 		    &sctp->sctp_sack_gaps);
2268 	}
2269 }
2270 
2271 /*
2272  * When we have processed a SACK we check to see if we can advance the
2273  * cumulative TSN if there are abandoned chunks immediately following
2274  * the updated cumulative TSN. If there are, we attempt to send a
2275  * Forward TSN chunk.
2276  */
2277 static void
2278 sctp_check_abandoned_data(sctp_t *sctp, sctp_faddr_t *fp)
2279 {
2280 	mblk_t		*meta = sctp->sctp_xmit_head;
2281 	mblk_t		*mp;
2282 	mblk_t		*nmp;
2283 	uint32_t	seglen;
2284 	uint32_t	adv_pap = sctp->sctp_adv_pap;
2285 
2286 	/*
2287 	 * We only check in the first meta since otherwise we can't
2288 	 * advance the cumulative ack point. We just look for chunks
2289 	 * marked for retransmission, else we might prematurely
2290 	 * send an FTSN for a sent, but unacked, chunk.
2291 	 */
2292 	for (mp = meta->b_cont; mp != NULL; mp = mp->b_next) {
2293 		if (!SCTP_CHUNK_ISSENT(mp))
2294 			return;
2295 		if (SCTP_CHUNK_WANT_REXMIT(mp))
2296 			break;
2297 	}
2298 	if (mp == NULL)
2299 		return;
2300 	sctp_check_adv_ack_pt(sctp, meta, mp);
2301 	if (SEQ_GT(sctp->sctp_adv_pap, adv_pap)) {
2302 		sctp_make_ftsns(sctp, meta, mp, &nmp, fp, &seglen);
2303 		if (nmp == NULL) {
2304 			sctp->sctp_adv_pap = adv_pap;
2305 			if (!fp->timer_running)
2306 				SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
2307 			return;
2308 		}
2309 		sctp_set_iplen(sctp, nmp, fp->ixa);
2310 		(void) conn_ip_output(nmp, fp->ixa);
2311 		BUMP_LOCAL(sctp->sctp_opkts);
2312 		if (!fp->timer_running)
2313 			SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
2314 	}
2315 }
2316 
2317 /*
2318  * The processing here follows the same logic in sctp_got_sack(), the reason
2319  * we do this separately is because, usually, gap blocks are ordered and
2320  * we can process it in sctp_got_sack(). However if they aren't we would
2321  * need to do some additional non-optimal stuff when we start processing the
2322  * unordered gaps. To that effect sctp_got_sack() does the processing in the
2323  * simple case and this does the same in the more involved case.
2324  */
2325 static uint32_t
2326 sctp_process_uo_gaps(sctp_t *sctp, uint32_t ctsn, sctp_sack_frag_t *ssf,
2327     int num_gaps, mblk_t *umphead, mblk_t *mphead, int *trysend,
2328     boolean_t *fast_recovery, uint32_t fr_xtsn)
2329 {
2330 	uint32_t		xtsn;
2331 	uint32_t		gapstart = 0;
2332 	uint32_t		gapend = 0;
2333 	int			gapcnt;
2334 	uint16_t		chunklen;
2335 	sctp_data_hdr_t		*sdc;
2336 	int			gstart;
2337 	mblk_t			*ump = umphead;
2338 	mblk_t			*mp = mphead;
2339 	sctp_faddr_t		*fp;
2340 	uint32_t		acked = 0;
2341 	sctp_stack_t		*sctps = sctp->sctp_sctps;
2342 
2343 	/*
2344 	 * gstart tracks the last (in the order of TSN) gapstart that
2345 	 * we process in this SACK gaps walk.
2346 	 */
2347 	gstart = ctsn;
2348 
2349 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2350 	xtsn = ntohl(sdc->sdh_tsn);
2351 	for (gapcnt = 0; gapcnt < num_gaps; gapcnt++, ssf++) {
2352 		if (gapstart != 0) {
2353 			/*
2354 			 * If we have reached the end of the transmit list or
2355 			 * hit an unsent chunk or encountered an unordered gap
2356 			 * block start from the ctsn again.
2357 			 */
2358 			if (ump == NULL || !SCTP_CHUNK_ISSENT(mp) ||
2359 			    SEQ_LT(ctsn + ntohs(ssf->ssf_start), xtsn)) {
2360 				ump = umphead;
2361 				mp = mphead;
2362 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2363 				xtsn = ntohl(sdc->sdh_tsn);
2364 			}
2365 		}
2366 
2367 		gapstart = ctsn + ntohs(ssf->ssf_start);
2368 		gapend = ctsn + ntohs(ssf->ssf_end);
2369 
2370 		/*
2371 		 * Sanity checks:
2372 		 *
2373 		 * 1. SACK for TSN we have not sent - ABORT
2374 		 * 2. Invalid or spurious gaps, ignore all gaps
2375 		 */
2376 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2377 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2378 			BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2379 			*trysend = -1;
2380 			return (acked);
2381 		} else if (SEQ_LT(gapend, gapstart) ||
2382 		    SEQ_LEQ(gapstart, ctsn)) {
2383 			break;
2384 		}
2385 		/*
2386 		 * The xtsn can be the TSN processed for the last gap
2387 		 * (gapend) or it could be the cumulative TSN. We continue
2388 		 * with the last xtsn as long as the gaps are ordered, when
2389 		 * we hit an unordered gap, we re-start from the cumulative
2390 		 * TSN. For the first gap it is always the cumulative TSN.
2391 		 */
2392 		while (xtsn != gapstart) {
2393 			/*
2394 			 * We can't reliably check for reneged chunks
2395 			 * when walking the unordered list, so we don't.
2396 			 * In case the peer reneges then we will end up
2397 			 * sending the reneged chunk via timeout.
2398 			 */
2399 			mp = mp->b_next;
2400 			if (mp == NULL) {
2401 				ump = ump->b_next;
2402 				/*
2403 				 * ump can't be NULL because of the sanity
2404 				 * check above.
2405 				 */
2406 				ASSERT(ump != NULL);
2407 				mp = ump->b_cont;
2408 			}
2409 			/*
2410 			 * mp can't be unsent because of the sanity check
2411 			 * above.
2412 			 */
2413 			ASSERT(SCTP_CHUNK_ISSENT(mp));
2414 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2415 			xtsn = ntohl(sdc->sdh_tsn);
2416 		}
2417 		/*
2418 		 * Now that we have found the chunk with TSN == 'gapstart',
2419 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2420 		 * All intermediate chunks will be marked ACKED, if they
2421 		 * haven't already been.
2422 		 */
2423 		while (SEQ_LEQ(xtsn, gapend)) {
2424 			/*
2425 			 * SACKed
2426 			 */
2427 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
2428 			if (!SCTP_CHUNK_ISACKED(mp)) {
2429 				SCTP_CHUNK_ACKED(mp);
2430 
2431 				fp = SCTP_CHUNK_DEST(mp);
2432 				chunklen = ntohs(sdc->sdh_len);
2433 				ASSERT(fp->suna >= chunklen);
2434 				fp->suna -= chunklen;
2435 				if (fp->suna == 0) {
2436 					/* All outstanding data acked. */
2437 					fp->pba = 0;
2438 					SCTP_FADDR_TIMER_STOP(fp);
2439 				}
2440 				fp->acked += chunklen;
2441 				acked += chunklen;
2442 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
2443 				ASSERT(sctp->sctp_unacked >= 0);
2444 			}
2445 			/*
2446 			 * Move to the next message in the transmit list
2447 			 * if we are done with all the chunks from the current
2448 			 * message. Note, it is possible to hit the end of the
2449 			 * transmit list here, i.e. if we have already completed
2450 			 * processing the gap block.
2451 			 */
2452 			mp = mp->b_next;
2453 			if (mp == NULL) {
2454 				ump = ump->b_next;
2455 				if (ump == NULL) {
2456 					ASSERT(xtsn == gapend);
2457 					break;
2458 				}
2459 				mp = ump->b_cont;
2460 			}
2461 			/*
2462 			 * Likewise, we can hit an unsent chunk once we have
2463 			 * completed processing the gap block.
2464 			 */
2465 			if (!SCTP_CHUNK_ISSENT(mp)) {
2466 				ASSERT(xtsn == gapend);
2467 				break;
2468 			}
2469 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2470 			xtsn = ntohl(sdc->sdh_tsn);
2471 		}
2472 		/*
2473 		 * We keep track of the last gap we successfully processed
2474 		 * so that we can terminate the walk below for incrementing
2475 		 * the SACK count.
2476 		 */
2477 		if (SEQ_LT(gstart, gapstart))
2478 			gstart = gapstart;
2479 	}
2480 	/*
2481 	 * Check if have incremented the SACK count for all unacked TSNs in
2482 	 * sctp_got_sack(), if so we are done.
2483 	 */
2484 	if (SEQ_LEQ(gstart, fr_xtsn))
2485 		return (acked);
2486 
2487 	ump = umphead;
2488 	mp = mphead;
2489 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2490 	xtsn = ntohl(sdc->sdh_tsn);
2491 	while (SEQ_LT(xtsn, gstart)) {
2492 		/*
2493 		 * We have incremented SACK count for TSNs less than fr_tsn
2494 		 * in sctp_got_sack(), so don't increment them again here.
2495 		 */
2496 		if (SEQ_GT(xtsn, fr_xtsn) && !SCTP_CHUNK_ISACKED(mp)) {
2497 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
2498 			if (SCTP_CHUNK_SACKCNT(mp) ==
2499 			    sctps->sctps_fast_rxt_thresh) {
2500 				SCTP_CHUNK_REXMIT(mp);
2501 				sctp->sctp_chk_fast_rexmit = B_TRUE;
2502 				*trysend = 1;
2503 				if (!*fast_recovery) {
2504 					/*
2505 					 * Entering fast recovery.
2506 					 */
2507 					fp = SCTP_CHUNK_DEST(mp);
2508 					fp->ssthresh = fp->cwnd / 2;
2509 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
2510 						fp->ssthresh =
2511 						    2 * fp->sfa_pmss;
2512 					}
2513 					fp->cwnd = fp->ssthresh;
2514 					fp->pba = 0;
2515 					sctp->sctp_recovery_tsn =
2516 					    sctp->sctp_ltsn - 1;
2517 					*fast_recovery = B_TRUE;
2518 				}
2519 			}
2520 		}
2521 		mp = mp->b_next;
2522 		if (mp == NULL) {
2523 			ump = ump->b_next;
2524 			/* We can't get to the end of the transmit list here */
2525 			ASSERT(ump != NULL);
2526 			mp = ump->b_cont;
2527 		}
2528 		/* We can't hit an unsent chunk here */
2529 		ASSERT(SCTP_CHUNK_ISSENT(mp));
2530 		sdc = (sctp_data_hdr_t *)mp->b_rptr;
2531 		xtsn = ntohl(sdc->sdh_tsn);
2532 	}
2533 	return (acked);
2534 }
2535 
2536 static int
2537 sctp_got_sack(sctp_t *sctp, sctp_chunk_hdr_t *sch)
2538 {
2539 	sctp_sack_chunk_t	*sc;
2540 	sctp_data_hdr_t		*sdc;
2541 	sctp_sack_frag_t	*ssf;
2542 	mblk_t			*ump;
2543 	mblk_t			*mp;
2544 	mblk_t			*mp1;
2545 	uint32_t		cumtsn;
2546 	uint32_t		xtsn;
2547 	uint32_t		gapstart = 0;
2548 	uint32_t		gapend = 0;
2549 	uint32_t		acked = 0;
2550 	uint16_t		chunklen;
2551 	sctp_faddr_t		*fp;
2552 	int			num_gaps;
2553 	int			trysend = 0;
2554 	int			i;
2555 	boolean_t		fast_recovery = B_FALSE;
2556 	boolean_t		cumack_forward = B_FALSE;
2557 	boolean_t		fwd_tsn = B_FALSE;
2558 	sctp_stack_t		*sctps = sctp->sctp_sctps;
2559 
2560 	BUMP_LOCAL(sctp->sctp_ibchunks);
2561 	BUMP_LOCAL(sctp->sctp_isacks);
2562 	chunklen = ntohs(sch->sch_len);
2563 	if (chunklen < (sizeof (*sch) + sizeof (*sc)))
2564 		return (0);
2565 
2566 	sc = (sctp_sack_chunk_t *)(sch + 1);
2567 	cumtsn = ntohl(sc->ssc_cumtsn);
2568 
2569 	dprint(2, ("got sack cumtsn %x -> %x\n", sctp->sctp_lastack_rxd,
2570 	    cumtsn));
2571 
2572 	/* out of order */
2573 	if (SEQ_LT(cumtsn, sctp->sctp_lastack_rxd))
2574 		return (0);
2575 
2576 	if (SEQ_GT(cumtsn, sctp->sctp_ltsn - 1)) {
2577 		BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2578 		/* Send an ABORT */
2579 		return (-1);
2580 	}
2581 
2582 	/*
2583 	 * Cwnd only done when not in fast recovery mode.
2584 	 */
2585 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_recovery_tsn))
2586 		fast_recovery = B_TRUE;
2587 
2588 	/*
2589 	 * .. and if the cum TSN is not moving ahead on account Forward TSN
2590 	 */
2591 	if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_adv_pap))
2592 		fwd_tsn = B_TRUE;
2593 
2594 	if (cumtsn == sctp->sctp_lastack_rxd &&
2595 	    (sctp->sctp_xmit_unacked == NULL ||
2596 	    !SCTP_CHUNK_ABANDONED(sctp->sctp_xmit_unacked))) {
2597 		if (sctp->sctp_xmit_unacked != NULL)
2598 			mp = sctp->sctp_xmit_unacked;
2599 		else if (sctp->sctp_xmit_head != NULL)
2600 			mp = sctp->sctp_xmit_head->b_cont;
2601 		else
2602 			mp = NULL;
2603 		BUMP_MIB(&sctps->sctps_mib, sctpInDupAck);
2604 		/*
2605 		 * If we were doing a zero win probe and the win
2606 		 * has now opened to at least MSS, re-transmit the
2607 		 * zero win probe via sctp_rexmit_packet().
2608 		 */
2609 		if (mp != NULL && sctp->sctp_zero_win_probe &&
2610 		    ntohl(sc->ssc_a_rwnd) >= sctp->sctp_current->sfa_pmss) {
2611 			mblk_t	*pkt;
2612 			uint_t	pkt_len;
2613 			mblk_t	*mp1 = mp;
2614 			mblk_t	*meta = sctp->sctp_xmit_head;
2615 
2616 			/*
2617 			 * Reset the RTO since we have been backing-off
2618 			 * to send the ZWP.
2619 			 */
2620 			fp = sctp->sctp_current;
2621 			fp->rto = fp->srtt + 4 * fp->rttvar;
2622 			SCTP_MAX_RTO(sctp, fp);
2623 			/* Resend the ZWP */
2624 			pkt = sctp_rexmit_packet(sctp, &meta, &mp1, fp,
2625 			    &pkt_len);
2626 			if (pkt == NULL) {
2627 				SCTP_KSTAT(sctps, sctp_ss_rexmit_failed);
2628 				return (0);
2629 			}
2630 			ASSERT(pkt_len <= fp->sfa_pmss);
2631 			sctp->sctp_zero_win_probe = B_FALSE;
2632 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2633 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2634 			sctp_set_iplen(sctp, pkt, fp->ixa);
2635 			(void) conn_ip_output(pkt, fp->ixa);
2636 			BUMP_LOCAL(sctp->sctp_opkts);
2637 		}
2638 	} else {
2639 		if (sctp->sctp_zero_win_probe) {
2640 			/*
2641 			 * Reset the RTO since we have been backing-off
2642 			 * to send the ZWP.
2643 			 */
2644 			fp = sctp->sctp_current;
2645 			fp->rto = fp->srtt + 4 * fp->rttvar;
2646 			SCTP_MAX_RTO(sctp, fp);
2647 			sctp->sctp_zero_win_probe = B_FALSE;
2648 			/* This is probably not required */
2649 			if (!sctp->sctp_rexmitting) {
2650 				sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
2651 				sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
2652 			}
2653 		}
2654 		acked = sctp_cumack(sctp, cumtsn, &mp);
2655 		sctp->sctp_xmit_unacked = mp;
2656 		if (acked > 0) {
2657 			trysend = 1;
2658 			cumack_forward = B_TRUE;
2659 			if (fwd_tsn && SEQ_GEQ(sctp->sctp_lastack_rxd,
2660 			    sctp->sctp_adv_pap)) {
2661 				cumack_forward = B_FALSE;
2662 			}
2663 		}
2664 	}
2665 	num_gaps = ntohs(sc->ssc_numfrags);
2666 	UPDATE_LOCAL(sctp->sctp_gapcnt, num_gaps);
2667 	if (num_gaps == 0 || mp == NULL || !SCTP_CHUNK_ISSENT(mp) ||
2668 	    chunklen < (sizeof (*sch) + sizeof (*sc) +
2669 	    num_gaps * sizeof (*ssf))) {
2670 		goto ret;
2671 	}
2672 #ifdef	DEBUG
2673 	/*
2674 	 * Since we delete any message that has been acked completely,
2675 	 * the unacked chunk must belong to sctp_xmit_head (as
2676 	 * we don't have a back pointer from the mp to the meta data
2677 	 * we do this).
2678 	 */
2679 	{
2680 		mblk_t	*mp2 = sctp->sctp_xmit_head->b_cont;
2681 
2682 		while (mp2 != NULL) {
2683 			if (mp2 == mp)
2684 				break;
2685 			mp2 = mp2->b_next;
2686 		}
2687 		ASSERT(mp2 != NULL);
2688 	}
2689 #endif
2690 	ump = sctp->sctp_xmit_head;
2691 
2692 	/*
2693 	 * Just remember where we started from, in case we need to call
2694 	 * sctp_process_uo_gaps() if the gap blocks are unordered.
2695 	 */
2696 	mp1 = mp;
2697 
2698 	sdc = (sctp_data_hdr_t *)mp->b_rptr;
2699 	xtsn = ntohl(sdc->sdh_tsn);
2700 	ASSERT(xtsn == cumtsn + 1);
2701 
2702 	/*
2703 	 * Go through SACK gaps. They are ordered based on start TSN.
2704 	 */
2705 	ssf = (sctp_sack_frag_t *)(sc + 1);
2706 	for (i = 0; i < num_gaps; i++, ssf++) {
2707 		if (gapstart != 0) {
2708 			/* check for unordered gap */
2709 			if (SEQ_LEQ(cumtsn + ntohs(ssf->ssf_start), gapstart)) {
2710 				acked += sctp_process_uo_gaps(sctp,
2711 				    cumtsn, ssf, num_gaps - i,
2712 				    sctp->sctp_xmit_head, mp1,
2713 				    &trysend, &fast_recovery, gapstart);
2714 				if (trysend < 0) {
2715 					BUMP_MIB(&sctps->sctps_mib,
2716 					    sctpInAckUnsent);
2717 					return (-1);
2718 				}
2719 				break;
2720 			}
2721 		}
2722 		gapstart = cumtsn + ntohs(ssf->ssf_start);
2723 		gapend = cumtsn + ntohs(ssf->ssf_end);
2724 
2725 		/*
2726 		 * Sanity checks:
2727 		 *
2728 		 * 1. SACK for TSN we have not sent - ABORT
2729 		 * 2. Invalid or spurious gaps, ignore all gaps
2730 		 */
2731 		if (SEQ_GT(gapstart, sctp->sctp_ltsn - 1) ||
2732 		    SEQ_GT(gapend, sctp->sctp_ltsn - 1)) {
2733 			BUMP_MIB(&sctps->sctps_mib, sctpInAckUnsent);
2734 			return (-1);
2735 		} else if (SEQ_LT(gapend, gapstart) ||
2736 		    SEQ_LEQ(gapstart, cumtsn)) {
2737 			break;
2738 		}
2739 		/*
2740 		 * Let's start at the current TSN (for the 1st gap we start
2741 		 * from the cumulative TSN, for subsequent ones we start from
2742 		 * where the previous gapend was found - second while loop
2743 		 * below) and walk the transmit list till we find the TSN
2744 		 * corresponding to gapstart. All the unacked chunks till we
2745 		 * get to the chunk with TSN == gapstart will have their
2746 		 * SACKCNT incremented by 1. Note since the gap blocks are
2747 		 * ordered, we won't be incrementing the SACKCNT for an
2748 		 * unacked chunk by more than one while processing the gap
2749 		 * blocks. If the SACKCNT for any unacked chunk exceeds
2750 		 * the fast retransmit threshold, we will fast retransmit
2751 		 * after processing all the gap blocks.
2752 		 */
2753 		ASSERT(SEQ_LEQ(xtsn, gapstart));
2754 		while (xtsn != gapstart) {
2755 			SCTP_CHUNK_SET_SACKCNT(mp, SCTP_CHUNK_SACKCNT(mp) + 1);
2756 			if (SCTP_CHUNK_SACKCNT(mp) ==
2757 			    sctps->sctps_fast_rxt_thresh) {
2758 				SCTP_CHUNK_REXMIT(mp);
2759 				sctp->sctp_chk_fast_rexmit = B_TRUE;
2760 				trysend = 1;
2761 				if (!fast_recovery) {
2762 					/*
2763 					 * Entering fast recovery.
2764 					 */
2765 					fp = SCTP_CHUNK_DEST(mp);
2766 					fp->ssthresh = fp->cwnd / 2;
2767 					if (fp->ssthresh < 2 * fp->sfa_pmss) {
2768 						fp->ssthresh =
2769 						    2 * fp->sfa_pmss;
2770 					}
2771 					fp->cwnd = fp->ssthresh;
2772 					fp->pba = 0;
2773 					sctp->sctp_recovery_tsn =
2774 					    sctp->sctp_ltsn - 1;
2775 					fast_recovery = B_TRUE;
2776 				}
2777 			}
2778 
2779 			/*
2780 			 * Peer may have reneged on this chunk, so un-sack
2781 			 * it now. If the peer did renege, we need to
2782 			 * readjust unacked.
2783 			 */
2784 			if (SCTP_CHUNK_ISACKED(mp)) {
2785 				chunklen = ntohs(sdc->sdh_len);
2786 				fp = SCTP_CHUNK_DEST(mp);
2787 				fp->suna += chunklen;
2788 				sctp->sctp_unacked += chunklen - sizeof (*sdc);
2789 				SCTP_CHUNK_CLEAR_ACKED(mp);
2790 				if (!fp->timer_running) {
2791 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
2792 					    fp->rto);
2793 				}
2794 			}
2795 
2796 			mp = mp->b_next;
2797 			if (mp == NULL) {
2798 				ump = ump->b_next;
2799 				/*
2800 				 * ump can't be NULL given the sanity check
2801 				 * above.  But if it is NULL, it means that
2802 				 * there is a data corruption.  We'd better
2803 				 * panic.
2804 				 */
2805 				if (ump == NULL) {
2806 					panic("Memory corruption detected: gap "
2807 					    "start TSN 0x%x missing from the "
2808 					    "xmit list: %p", gapstart,
2809 					    (void *)sctp);
2810 				}
2811 				mp = ump->b_cont;
2812 			}
2813 			/*
2814 			 * mp can't be unsent given the sanity check above.
2815 			 */
2816 			ASSERT(SCTP_CHUNK_ISSENT(mp));
2817 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2818 			xtsn = ntohl(sdc->sdh_tsn);
2819 		}
2820 		/*
2821 		 * Now that we have found the chunk with TSN == 'gapstart',
2822 		 * let's walk till we hit the chunk with TSN == 'gapend'.
2823 		 * All intermediate chunks will be marked ACKED, if they
2824 		 * haven't already been.
2825 		 */
2826 		while (SEQ_LEQ(xtsn, gapend)) {
2827 			/*
2828 			 * SACKed
2829 			 */
2830 			SCTP_CHUNK_SET_SACKCNT(mp, 0);
2831 			if (!SCTP_CHUNK_ISACKED(mp)) {
2832 				SCTP_CHUNK_ACKED(mp);
2833 
2834 				fp = SCTP_CHUNK_DEST(mp);
2835 				chunklen = ntohs(sdc->sdh_len);
2836 				ASSERT(fp->suna >= chunklen);
2837 				fp->suna -= chunklen;
2838 				if (fp->suna == 0) {
2839 					/* All outstanding data acked. */
2840 					fp->pba = 0;
2841 					SCTP_FADDR_TIMER_STOP(fp);
2842 				}
2843 				fp->acked += chunklen;
2844 				acked += chunklen;
2845 				sctp->sctp_unacked -= chunklen - sizeof (*sdc);
2846 				ASSERT(sctp->sctp_unacked >= 0);
2847 			}
2848 			/* Go to the next chunk of the current message */
2849 			mp = mp->b_next;
2850 			/*
2851 			 * Move to the next message in the transmit list
2852 			 * if we are done with all the chunks from the current
2853 			 * message. Note, it is possible to hit the end of the
2854 			 * transmit list here, i.e. if we have already completed
2855 			 * processing the gap block.  But the TSN must be equal
2856 			 * to the gapend because of the above sanity check.
2857 			 * If it is not equal, it means that some data is
2858 			 * missing.
2859 			 * Also, note that we break here, which means we
2860 			 * continue processing gap blocks, if any. In case of
2861 			 * ordered gap blocks there can't be any following
2862 			 * this (if there is it will fail the sanity check
2863 			 * above). In case of un-ordered gap blocks we will
2864 			 * switch to sctp_process_uo_gaps().  In either case
2865 			 * it should be fine to continue with NULL ump/mp,
2866 			 * but we just reset it to xmit_head.
2867 			 */
2868 			if (mp == NULL) {
2869 				ump = ump->b_next;
2870 				if (ump == NULL) {
2871 					if (xtsn != gapend) {
2872 						panic("Memory corruption "
2873 						    "detected: gap end TSN "
2874 						    "0x%x missing from the "
2875 						    "xmit list: %p", gapend,
2876 						    (void *)sctp);
2877 					}
2878 					ump = sctp->sctp_xmit_head;
2879 					mp = mp1;
2880 					sdc = (sctp_data_hdr_t *)mp->b_rptr;
2881 					xtsn = ntohl(sdc->sdh_tsn);
2882 					break;
2883 				}
2884 				mp = ump->b_cont;
2885 			}
2886 			/*
2887 			 * Likewise, we could hit an unsent chunk once we have
2888 			 * completed processing the gap block. Again, it is
2889 			 * fine to continue processing gap blocks with mp
2890 			 * pointing to the unsent chunk, because if there
2891 			 * are more ordered gap blocks, they will fail the
2892 			 * sanity check, and if there are un-ordered gap blocks,
2893 			 * we will continue processing in sctp_process_uo_gaps()
2894 			 * We just reset the mp to the one we started with.
2895 			 */
2896 			if (!SCTP_CHUNK_ISSENT(mp)) {
2897 				ASSERT(xtsn == gapend);
2898 				ump = sctp->sctp_xmit_head;
2899 				mp = mp1;
2900 				sdc = (sctp_data_hdr_t *)mp->b_rptr;
2901 				xtsn = ntohl(sdc->sdh_tsn);
2902 				break;
2903 			}
2904 			sdc = (sctp_data_hdr_t *)mp->b_rptr;
2905 			xtsn = ntohl(sdc->sdh_tsn);
2906 		}
2907 	}
2908 	if (sctp->sctp_prsctp_aware)
2909 		sctp_check_abandoned_data(sctp, sctp->sctp_current);
2910 	if (sctp->sctp_chk_fast_rexmit)
2911 		sctp_fast_rexmit(sctp);
2912 ret:
2913 	trysend += sctp_set_frwnd(sctp, ntohl(sc->ssc_a_rwnd));
2914 
2915 	/*
2916 	 * If receive window is closed while there is unsent data,
2917 	 * set a timer for doing zero window probes.
2918 	 */
2919 	if (sctp->sctp_frwnd == 0 && sctp->sctp_unacked == 0 &&
2920 	    sctp->sctp_unsent != 0) {
2921 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
2922 		    sctp->sctp_current->rto);
2923 	}
2924 
2925 	/*
2926 	 * Set cwnd for all destinations.
2927 	 * Congestion window gets increased only when cumulative
2928 	 * TSN moves forward, we're not in fast recovery, and
2929 	 * cwnd has been fully utilized (almost fully, need to allow
2930 	 * some leeway due to non-MSS sized messages).
2931 	 */
2932 	if (sctp->sctp_current->acked == acked) {
2933 		/*
2934 		 * Fast-path, only data sent to sctp_current got acked.
2935 		 */
2936 		fp = sctp->sctp_current;
2937 		if (cumack_forward && !fast_recovery &&
2938 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
2939 			if (fp->cwnd < fp->ssthresh) {
2940 				/*
2941 				 * Slow start
2942 				 */
2943 				if (fp->acked > fp->sfa_pmss) {
2944 					fp->cwnd += fp->sfa_pmss;
2945 				} else {
2946 					fp->cwnd += fp->acked;
2947 				}
2948 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
2949 			} else {
2950 				/*
2951 				 * Congestion avoidance
2952 				 */
2953 				fp->pba += fp->acked;
2954 				if (fp->pba >= fp->cwnd) {
2955 					fp->pba -= fp->cwnd;
2956 					fp->cwnd += fp->sfa_pmss;
2957 					fp->cwnd = MIN(fp->cwnd,
2958 					    sctp->sctp_cwnd_max);
2959 				}
2960 			}
2961 		}
2962 		/*
2963 		 * Limit the burst of transmitted data segments.
2964 		 */
2965 		if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss <
2966 		    fp->cwnd) {
2967 			fp->cwnd = fp->suna + sctps->sctps_maxburst *
2968 			    fp->sfa_pmss;
2969 		}
2970 		fp->acked = 0;
2971 		goto check_ss_rxmit;
2972 	}
2973 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
2974 		if (cumack_forward && fp->acked && !fast_recovery &&
2975 		    (fp->acked + fp->suna > fp->cwnd - fp->sfa_pmss)) {
2976 			if (fp->cwnd < fp->ssthresh) {
2977 				if (fp->acked > fp->sfa_pmss) {
2978 					fp->cwnd += fp->sfa_pmss;
2979 				} else {
2980 					fp->cwnd += fp->acked;
2981 				}
2982 				fp->cwnd = MIN(fp->cwnd, sctp->sctp_cwnd_max);
2983 			} else {
2984 				fp->pba += fp->acked;
2985 				if (fp->pba >= fp->cwnd) {
2986 					fp->pba -= fp->cwnd;
2987 					fp->cwnd += fp->sfa_pmss;
2988 					fp->cwnd = MIN(fp->cwnd,
2989 					    sctp->sctp_cwnd_max);
2990 				}
2991 			}
2992 		}
2993 		if (fp->suna + sctps->sctps_maxburst * fp->sfa_pmss <
2994 		    fp->cwnd) {
2995 			fp->cwnd = fp->suna + sctps->sctps_maxburst *
2996 			    fp->sfa_pmss;
2997 		}
2998 		fp->acked = 0;
2999 	}
3000 	fp = sctp->sctp_current;
3001 check_ss_rxmit:
3002 	/*
3003 	 * If this is a SACK following a timeout, check if there are
3004 	 * still unacked chunks (sent before the timeout) that we can
3005 	 * send.
3006 	 */
3007 	if (sctp->sctp_rexmitting) {
3008 		if (SEQ_LT(sctp->sctp_lastack_rxd, sctp->sctp_rxt_maxtsn)) {
3009 			/*
3010 			 * As we are in retransmission phase, we may get a
3011 			 * SACK which indicates some new chunks are received
3012 			 * but cum_tsn does not advance.  During this
3013 			 * phase, the other side advances cum_tsn only because
3014 			 * it receives our retransmitted chunks.  Only
3015 			 * this signals that some chunks are still
3016 			 * missing.
3017 			 */
3018 			if (cumack_forward) {
3019 				fp->rxt_unacked -= acked;
3020 				sctp_ss_rexmit(sctp);
3021 			}
3022 		} else {
3023 			sctp->sctp_rexmitting = B_FALSE;
3024 			sctp->sctp_rxt_nxttsn = sctp->sctp_ltsn;
3025 			sctp->sctp_rxt_maxtsn = sctp->sctp_ltsn;
3026 			fp->rxt_unacked = 0;
3027 		}
3028 	}
3029 	return (trysend);
3030 }
3031 
3032 /*
3033  * Returns 0 if the caller should stop processing any more chunks,
3034  * 1 if the caller should skip this chunk and continue processing.
3035  */
3036 static int
3037 sctp_strange_chunk(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
3038 {
3039 	size_t len;
3040 
3041 	BUMP_LOCAL(sctp->sctp_ibchunks);
3042 	/* check top two bits for action required */
3043 	if (ch->sch_id & 0x40) {	/* also matches 0xc0 */
3044 		len = ntohs(ch->sch_len);
3045 		sctp_add_err(sctp, SCTP_ERR_UNREC_CHUNK, ch, len, fp);
3046 
3047 		if ((ch->sch_id & 0xc0) == 0xc0) {
3048 			/* skip and continue */
3049 			return (1);
3050 		} else {
3051 			/* stop processing */
3052 			return (0);
3053 		}
3054 	}
3055 	if (ch->sch_id & 0x80) {
3056 		/* skip and continue, no error */
3057 		return (1);
3058 	}
3059 	/* top two bits are clear; stop processing and no error */
3060 	return (0);
3061 }
3062 
3063 /*
3064  * Basic sanity checks on all input chunks and parameters: they must
3065  * be of legitimate size for their purported type, and must follow
3066  * ordering conventions as defined in rfc2960.
3067  *
3068  * Returns 1 if the chunk and all encloded params are legitimate,
3069  * 0 otherwise.
3070  */
3071 /*ARGSUSED*/
3072 static int
3073 sctp_check_input(sctp_t *sctp, sctp_chunk_hdr_t *ch, ssize_t len, int first)
3074 {
3075 	sctp_parm_hdr_t	*ph;
3076 	void		*p = NULL;
3077 	ssize_t		clen;
3078 	uint16_t	ch_len;
3079 
3080 	ch_len = ntohs(ch->sch_len);
3081 	if (ch_len > len) {
3082 		return (0);
3083 	}
3084 
3085 	switch (ch->sch_id) {
3086 	case CHUNK_DATA:
3087 		if (ch_len < sizeof (sctp_data_hdr_t)) {
3088 			return (0);
3089 		}
3090 		return (1);
3091 	case CHUNK_INIT:
3092 	case CHUNK_INIT_ACK:
3093 		{
3094 			ssize_t	remlen = len;
3095 
3096 			/*
3097 			 * INIT and INIT-ACK chunks must not be bundled with
3098 			 * any other.
3099 			 */
3100 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
3101 			    (ch_len < (sizeof (*ch) +
3102 			    sizeof (sctp_init_chunk_t)))) {
3103 				return (0);
3104 			}
3105 			/* may have params that need checking */
3106 			p = (char *)(ch + 1) + sizeof (sctp_init_chunk_t);
3107 			clen = ch_len - (sizeof (*ch) +
3108 			    sizeof (sctp_init_chunk_t));
3109 		}
3110 		break;
3111 	case CHUNK_SACK:
3112 		if (ch_len < (sizeof (*ch) + sizeof (sctp_sack_chunk_t))) {
3113 			return (0);
3114 		}
3115 		/* dup and gap reports checked by got_sack() */
3116 		return (1);
3117 	case CHUNK_SHUTDOWN:
3118 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t))) {
3119 			return (0);
3120 		}
3121 		return (1);
3122 	case CHUNK_ABORT:
3123 	case CHUNK_ERROR:
3124 		if (ch_len < sizeof (*ch)) {
3125 			return (0);
3126 		}
3127 		/* may have params that need checking */
3128 		p = ch + 1;
3129 		clen = ch_len - sizeof (*ch);
3130 		break;
3131 	case CHUNK_ECNE:
3132 	case CHUNK_CWR:
3133 	case CHUNK_HEARTBEAT:
3134 	case CHUNK_HEARTBEAT_ACK:
3135 	/* Full ASCONF chunk and parameter checks are in asconf.c */
3136 	case CHUNK_ASCONF:
3137 	case CHUNK_ASCONF_ACK:
3138 		if (ch_len < sizeof (*ch)) {
3139 			return (0);
3140 		}
3141 		/* heartbeat data checked by process_heartbeat() */
3142 		return (1);
3143 	case CHUNK_SHUTDOWN_COMPLETE:
3144 		{
3145 			ssize_t remlen = len;
3146 
3147 			/*
3148 			 * SHUTDOWN-COMPLETE chunk must not be bundled with any
3149 			 * other
3150 			 */
3151 			if (!first || sctp_next_chunk(ch, &remlen) != NULL ||
3152 			    ch_len < sizeof (*ch)) {
3153 				return (0);
3154 			}
3155 		}
3156 		return (1);
3157 	case CHUNK_COOKIE:
3158 	case CHUNK_COOKIE_ACK:
3159 	case CHUNK_SHUTDOWN_ACK:
3160 		if (ch_len < sizeof (*ch) || !first) {
3161 			return (0);
3162 		}
3163 		return (1);
3164 	case CHUNK_FORWARD_TSN:
3165 		if (ch_len < (sizeof (*ch) + sizeof (uint32_t)))
3166 			return (0);
3167 		return (1);
3168 	default:
3169 		return (1);	/* handled by strange_chunk() */
3170 	}
3171 
3172 	/* check and byteorder parameters */
3173 	if (clen <= 0) {
3174 		return (1);
3175 	}
3176 	ASSERT(p != NULL);
3177 
3178 	ph = p;
3179 	while (ph != NULL && clen > 0) {
3180 		ch_len = ntohs(ph->sph_len);
3181 		if (ch_len > len || ch_len < sizeof (*ph)) {
3182 			return (0);
3183 		}
3184 		ph = sctp_next_parm(ph, &clen);
3185 	}
3186 
3187 	/* All OK */
3188 	return (1);
3189 }
3190 
3191 static mblk_t *
3192 sctp_check_in_policy(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
3193 {
3194 	boolean_t policy_present;
3195 	ipha_t *ipha;
3196 	ip6_t *ip6h;
3197 	netstack_t	*ns = ipst->ips_netstack;
3198 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3199 
3200 	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
3201 		policy_present = ipss->ipsec_inbound_v4_policy_present;
3202 		ipha = (ipha_t *)mp->b_rptr;
3203 		ip6h = NULL;
3204 	} else {
3205 		policy_present = ipss->ipsec_inbound_v6_policy_present;
3206 		ipha = NULL;
3207 		ip6h = (ip6_t *)mp->b_rptr;
3208 	}
3209 
3210 	if (policy_present) {
3211 		/*
3212 		 * The conn_t parameter is NULL because we already know
3213 		 * nobody's home.
3214 		 */
3215 		mp = ipsec_check_global_policy(mp, (conn_t *)NULL,
3216 		    ipha, ip6h, ira, ns);
3217 		if (mp == NULL)
3218 			return (NULL);
3219 	}
3220 	return (mp);
3221 }
3222 
3223 /* Handle out-of-the-blue packets */
3224 void
3225 sctp_ootb_input(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
3226 {
3227 	sctp_t			*sctp;
3228 	sctp_chunk_hdr_t	*ch;
3229 	sctp_hdr_t		*sctph;
3230 	in6_addr_t		src, dst;
3231 	uint_t			ip_hdr_len = ira->ira_ip_hdr_length;
3232 	ssize_t			mlen;
3233 	sctp_stack_t		*sctps;
3234 	boolean_t		secure;
3235 	zoneid_t		zoneid = ira->ira_zoneid;
3236 	uchar_t			*rptr;
3237 
3238 	ASSERT(ira->ira_ill == NULL);
3239 
3240 	secure = ira->ira_flags & IRAF_IPSEC_SECURE;
3241 
3242 	sctps = ipst->ips_netstack->netstack_sctp;
3243 
3244 	BUMP_MIB(&sctps->sctps_mib, sctpOutOfBlue);
3245 	BUMP_MIB(&sctps->sctps_mib, sctpInSCTPPkts);
3246 
3247 	if (mp->b_cont != NULL) {
3248 		/*
3249 		 * All subsequent code is vastly simplified if it can
3250 		 * assume a single contiguous chunk of data.
3251 		 */
3252 		if (pullupmsg(mp, -1) == 0) {
3253 			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3254 			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3255 			freemsg(mp);
3256 			return;
3257 		}
3258 	}
3259 
3260 	rptr = mp->b_rptr;
3261 	sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]);
3262 	if (ira->ira_flags & IRAF_IS_IPV4) {
3263 		ipha_t *ipha;
3264 
3265 		ipha = (ipha_t *)rptr;
3266 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src);
3267 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst);
3268 	} else {
3269 		ip6_t *ip6h;
3270 
3271 		ip6h = (ip6_t *)rptr;
3272 		src = ip6h->ip6_src;
3273 		dst = ip6h->ip6_dst;
3274 	}
3275 
3276 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
3277 	if ((ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen)) == NULL) {
3278 		dprint(3, ("sctp_ootb_input: invalid packet\n"));
3279 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3280 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3281 		freemsg(mp);
3282 		return;
3283 	}
3284 
3285 	switch (ch->sch_id) {
3286 	case CHUNK_INIT:
3287 		/* no listener; send abort  */
3288 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3289 			return;
3290 		sctp_ootb_send_abort(sctp_init2vtag(ch), 0,
3291 		    NULL, 0, mp, 0, B_TRUE, ira, ipst);
3292 		break;
3293 	case CHUNK_INIT_ACK:
3294 		/* check for changed src addr */
3295 		sctp = sctp_addrlist2sctp(mp, sctph, ch, zoneid, sctps);
3296 		if (sctp != NULL) {
3297 			/* success; proceed to normal path */
3298 			mutex_enter(&sctp->sctp_lock);
3299 			if (sctp->sctp_running) {
3300 				sctp_add_recvq(sctp, mp, B_FALSE, ira);
3301 				mutex_exit(&sctp->sctp_lock);
3302 			} else {
3303 				/*
3304 				 * If the source address is changed, we
3305 				 * don't need to worry too much about
3306 				 * out of order processing.  So we don't
3307 				 * check if the recvq is empty or not here.
3308 				 */
3309 				sctp->sctp_running = B_TRUE;
3310 				mutex_exit(&sctp->sctp_lock);
3311 				sctp_input_data(sctp, mp, ira);
3312 				WAKE_SCTP(sctp);
3313 			}
3314 			SCTP_REFRELE(sctp);
3315 			return;
3316 		}
3317 		/* else bogus init ack; drop it */
3318 		break;
3319 	case CHUNK_SHUTDOWN_ACK:
3320 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3321 			return;
3322 		sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst);
3323 		return;
3324 	case CHUNK_ERROR:
3325 	case CHUNK_ABORT:
3326 	case CHUNK_COOKIE_ACK:
3327 	case CHUNK_SHUTDOWN_COMPLETE:
3328 		break;
3329 	default:
3330 		if (secure && sctp_check_in_policy(mp, ira, ipst) == NULL)
3331 			return;
3332 		sctp_ootb_send_abort(sctph->sh_verf, 0,
3333 		    NULL, 0, mp, 0, B_TRUE, ira, ipst);
3334 		break;
3335 	}
3336 	freemsg(mp);
3337 }
3338 
3339 /*
3340  * Handle sctp packets.
3341  * Note that we rele the sctp_t (the caller got a reference on it).
3342  */
3343 void
3344 sctp_input(conn_t *connp, ipha_t *ipha, ip6_t *ip6h, mblk_t *mp,
3345     ip_recv_attr_t *ira)
3346 {
3347 	sctp_t		*sctp = CONN2SCTP(connp);
3348 	boolean_t	secure;
3349 	ill_t		*ill = ira->ira_ill;
3350 	ip_stack_t	*ipst = ill->ill_ipst;
3351 	ipsec_stack_t	*ipss = ipst->ips_netstack->netstack_ipsec;
3352 	iaflags_t	iraflags = ira->ira_flags;
3353 	ill_t		*rill = ira->ira_rill;
3354 
3355 	secure = iraflags & IRAF_IPSEC_SECURE;
3356 
3357 	/*
3358 	 * We check some fields in conn_t without holding a lock.
3359 	 * This should be fine.
3360 	 */
3361 	if (((iraflags & IRAF_IS_IPV4) ?
3362 	    CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
3363 	    CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
3364 	    secure) {
3365 		mp = ipsec_check_inbound_policy(mp, connp, ipha,
3366 		    ip6h, ira);
3367 		if (mp == NULL) {
3368 			BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
3369 			/* Note that mp is NULL */
3370 			ip_drop_input("ipIfStatsInDiscards", mp, ill);
3371 			SCTP_REFRELE(sctp);
3372 			return;
3373 		}
3374 	}
3375 
3376 	ira->ira_ill = ira->ira_rill = NULL;
3377 
3378 	mutex_enter(&sctp->sctp_lock);
3379 	if (sctp->sctp_running) {
3380 		sctp_add_recvq(sctp, mp, B_FALSE, ira);
3381 		mutex_exit(&sctp->sctp_lock);
3382 		goto done;
3383 	} else {
3384 		sctp->sctp_running = B_TRUE;
3385 		mutex_exit(&sctp->sctp_lock);
3386 
3387 		mutex_enter(&sctp->sctp_recvq_lock);
3388 		if (sctp->sctp_recvq != NULL) {
3389 			sctp_add_recvq(sctp, mp, B_TRUE, ira);
3390 			mutex_exit(&sctp->sctp_recvq_lock);
3391 			WAKE_SCTP(sctp);
3392 			goto done;
3393 		}
3394 	}
3395 	mutex_exit(&sctp->sctp_recvq_lock);
3396 	if (ira->ira_flags & IRAF_ICMP_ERROR)
3397 		sctp_icmp_error(sctp, mp);
3398 	else
3399 		sctp_input_data(sctp, mp, ira);
3400 	WAKE_SCTP(sctp);
3401 
3402 done:
3403 	SCTP_REFRELE(sctp);
3404 	ira->ira_ill = ill;
3405 	ira->ira_rill = rill;
3406 }
3407 
3408 static void
3409 sctp_process_abort(sctp_t *sctp, sctp_chunk_hdr_t *ch, int err)
3410 {
3411 	sctp_stack_t	*sctps = sctp->sctp_sctps;
3412 
3413 	BUMP_MIB(&sctps->sctps_mib, sctpAborted);
3414 	BUMP_LOCAL(sctp->sctp_ibchunks);
3415 
3416 	sctp_assoc_event(sctp, SCTP_COMM_LOST,
3417 	    ntohs(((sctp_parm_hdr_t *)(ch + 1))->sph_type), ch);
3418 	sctp_clean_death(sctp, err);
3419 }
3420 
3421 void
3422 sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
3423 {
3424 	sctp_chunk_hdr_t	*ch;
3425 	ssize_t			mlen;
3426 	int			gotdata;
3427 	int			trysend;
3428 	sctp_faddr_t		*fp;
3429 	sctp_init_chunk_t	*iack;
3430 	uint32_t		tsn;
3431 	sctp_data_hdr_t		*sdc;
3432 	ip_pkt_t		ipp;
3433 	in6_addr_t		src;
3434 	in6_addr_t		dst;
3435 	uint_t			ifindex;
3436 	sctp_hdr_t		*sctph;
3437 	uint_t			ip_hdr_len = ira->ira_ip_hdr_length;
3438 	mblk_t			*dups = NULL;
3439 	int			recv_adaptation;
3440 	boolean_t		wake_eager = B_FALSE;
3441 	in6_addr_t		peer_src;
3442 	int64_t			now;
3443 	sctp_stack_t		*sctps = sctp->sctp_sctps;
3444 	ip_stack_t		*ipst = sctps->sctps_netstack->netstack_ip;
3445 	boolean_t		hb_already = B_FALSE;
3446 	cred_t			*cr;
3447 	pid_t			cpid;
3448 	uchar_t			*rptr;
3449 	conn_t			*connp = sctp->sctp_connp;
3450 
3451 	ASSERT(DB_TYPE(mp) == M_DATA);
3452 	ASSERT(ira->ira_ill == NULL);
3453 
3454 	if (mp->b_cont != NULL) {
3455 		/*
3456 		 * All subsequent code is vastly simplified if it can
3457 		 * assume a single contiguous chunk of data.
3458 		 */
3459 		if (pullupmsg(mp, -1) == 0) {
3460 			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3461 			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3462 			freemsg(mp);
3463 			return;
3464 		}
3465 	}
3466 
3467 	BUMP_LOCAL(sctp->sctp_ipkts);
3468 	ifindex = ira->ira_ruifindex;
3469 
3470 	rptr = mp->b_rptr;
3471 
3472 	ipp.ipp_fields = 0;
3473 	if (connp->conn_recv_ancillary.crb_all != 0) {
3474 		/*
3475 		 * Record packet information in the ip_pkt_t
3476 		 */
3477 		if (ira->ira_flags & IRAF_IS_IPV4) {
3478 			(void) ip_find_hdr_v4((ipha_t *)rptr, &ipp,
3479 			    B_FALSE);
3480 		} else {
3481 			uint8_t nexthdrp;
3482 
3483 			/*
3484 			 * IPv6 packets can only be received by applications
3485 			 * that are prepared to receive IPv6 addresses.
3486 			 * The IP fanout must ensure this.
3487 			 */
3488 			ASSERT(connp->conn_family == AF_INET6);
3489 
3490 			(void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp,
3491 			    &nexthdrp);
3492 			ASSERT(nexthdrp == IPPROTO_SCTP);
3493 
3494 			/* Could have caused a pullup? */
3495 			rptr = mp->b_rptr;
3496 		}
3497 	}
3498 
3499 	sctph = ((sctp_hdr_t *)&rptr[ip_hdr_len]);
3500 
3501 	if (ira->ira_flags & IRAF_IS_IPV4) {
3502 		ipha_t *ipha;
3503 
3504 		ipha = (ipha_t *)rptr;
3505 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &src);
3506 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &dst);
3507 	} else {
3508 		ip6_t *ip6h;
3509 
3510 		ip6h = (ip6_t *)rptr;
3511 		src = ip6h->ip6_src;
3512 		dst = ip6h->ip6_dst;
3513 	}
3514 
3515 	mlen = mp->b_wptr - (uchar_t *)(sctph + 1);
3516 	ch = sctp_first_chunk((uchar_t *)(sctph + 1), mlen);
3517 	if (ch == NULL) {
3518 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3519 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3520 		freemsg(mp);
3521 		return;
3522 	}
3523 
3524 	if (!sctp_check_input(sctp, ch, mlen, 1)) {
3525 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
3526 		ip_drop_input("ipIfStatsInDiscards", mp, NULL);
3527 		goto done;
3528 	}
3529 	/*
3530 	 * Check verfication tag (special handling for INIT,
3531 	 * COOKIE, SHUTDOWN_COMPLETE and SHUTDOWN_ACK chunks).
3532 	 * ABORTs are handled in the chunk processing loop, since
3533 	 * may not appear first. All other checked chunks must
3534 	 * appear first, or will have been dropped by check_input().
3535 	 */
3536 	switch (ch->sch_id) {
3537 	case CHUNK_INIT:
3538 		if (sctph->sh_verf != 0) {
3539 			/* drop it */
3540 			goto done;
3541 		}
3542 		break;
3543 	case CHUNK_SHUTDOWN_COMPLETE:
3544 		if (sctph->sh_verf == sctp->sctp_lvtag)
3545 			break;
3546 		if (sctph->sh_verf == sctp->sctp_fvtag &&
3547 		    SCTP_GET_TBIT(ch)) {
3548 			break;
3549 		}
3550 		/* else drop it */
3551 		goto done;
3552 	case CHUNK_ABORT:
3553 	case CHUNK_COOKIE:
3554 		/* handled below */
3555 		break;
3556 	case CHUNK_SHUTDOWN_ACK:
3557 		if (sctp->sctp_state > SCTPS_BOUND &&
3558 		    sctp->sctp_state < SCTPS_ESTABLISHED) {
3559 			/* treat as OOTB */
3560 			sctp_ootb_shutdown_ack(mp, ip_hdr_len, ira, ipst);
3561 			return;
3562 		}
3563 		/* else fallthru */
3564 	default:
3565 		/*
3566 		 * All other packets must have a valid
3567 		 * verification tag, however if this is a
3568 		 * listener, we use a refined version of
3569 		 * out-of-the-blue logic.
3570 		 */
3571 		if (sctph->sh_verf != sctp->sctp_lvtag &&
3572 		    sctp->sctp_state != SCTPS_LISTEN) {
3573 			/* drop it */
3574 			goto done;
3575 		}
3576 		break;
3577 	}
3578 
3579 	/* Have a valid sctp for this packet */
3580 	fp = sctp_lookup_faddr(sctp, &src);
3581 	dprint(2, ("sctp_dispatch_rput: mp=%p fp=%p sctp=%p\n", (void *)mp,
3582 	    (void *)fp, (void *)sctp));
3583 
3584 	gotdata = 0;
3585 	trysend = 0;
3586 
3587 	now = ddi_get_lbolt64();
3588 	/* Process the chunks */
3589 	do {
3590 		dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n",
3591 		    sctp->sctp_state, (int)(ch->sch_id)));
3592 
3593 		if (ch->sch_id == CHUNK_ABORT) {
3594 			if (sctph->sh_verf != sctp->sctp_lvtag &&
3595 			    sctph->sh_verf != sctp->sctp_fvtag) {
3596 				/* drop it */
3597 				goto done;
3598 			}
3599 		}
3600 
3601 		switch (sctp->sctp_state) {
3602 
3603 		case SCTPS_ESTABLISHED:
3604 		case SCTPS_SHUTDOWN_PENDING:
3605 		case SCTPS_SHUTDOWN_SENT:
3606 			switch (ch->sch_id) {
3607 			case CHUNK_DATA:
3608 				/* 0-length data chunks are not allowed */
3609 				if (ntohs(ch->sch_len) == sizeof (*sdc)) {
3610 					sdc = (sctp_data_hdr_t *)ch;
3611 					tsn = sdc->sdh_tsn;
3612 					sctp_send_abort(sctp, sctp->sctp_fvtag,
3613 					    SCTP_ERR_NO_USR_DATA, (char *)&tsn,
3614 					    sizeof (tsn), mp, 0, B_FALSE, ira);
3615 					sctp_assoc_event(sctp, SCTP_COMM_LOST,
3616 					    0, NULL);
3617 					sctp_clean_death(sctp, ECONNABORTED);
3618 					goto done;
3619 				}
3620 
3621 				ASSERT(fp != NULL);
3622 				sctp->sctp_lastdata = fp;
3623 				sctp_data_chunk(sctp, ch, mp, &dups, fp,
3624 				    &ipp, ira);
3625 				gotdata = 1;
3626 				/* Restart shutdown timer if shutting down */
3627 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
3628 					/*
3629 					 * If we have exceeded our max
3630 					 * wait bound for waiting for a
3631 					 * shutdown ack from the peer,
3632 					 * abort the association.
3633 					 */
3634 					if (sctps->sctps_shutack_wait_bound !=
3635 					    0 &&
3636 					    TICK_TO_MSEC(now -
3637 					    sctp->sctp_out_time) >
3638 					    sctps->sctps_shutack_wait_bound) {
3639 						sctp_send_abort(sctp,
3640 						    sctp->sctp_fvtag, 0, NULL,
3641 						    0, mp, 0, B_FALSE, ira);
3642 						sctp_assoc_event(sctp,
3643 						    SCTP_COMM_LOST, 0, NULL);
3644 						sctp_clean_death(sctp,
3645 						    ECONNABORTED);
3646 						goto done;
3647 					}
3648 					SCTP_FADDR_TIMER_RESTART(sctp, fp,
3649 					    fp->rto);
3650 				}
3651 				break;
3652 			case CHUNK_SACK:
3653 				ASSERT(fp != NULL);
3654 				/*
3655 				 * Peer is real and alive if it can ack our
3656 				 * data.
3657 				 */
3658 				sctp_faddr_alive(sctp, fp);
3659 				trysend = sctp_got_sack(sctp, ch);
3660 				if (trysend < 0) {
3661 					sctp_send_abort(sctp, sctph->sh_verf,
3662 					    0, NULL, 0, mp, 0, B_FALSE, ira);
3663 					sctp_assoc_event(sctp,
3664 					    SCTP_COMM_LOST, 0, NULL);
3665 					sctp_clean_death(sctp,
3666 					    ECONNABORTED);
3667 					goto done;
3668 				}
3669 				break;
3670 			case CHUNK_HEARTBEAT:
3671 				if (!hb_already) {
3672 					/*
3673 					 * In any one packet, there should
3674 					 * only be one heartbeat chunk.  So
3675 					 * we should not process more than
3676 					 * once.
3677 					 */
3678 					sctp_return_heartbeat(sctp, ch, mp);
3679 					hb_already = B_TRUE;
3680 				}
3681 				break;
3682 			case CHUNK_HEARTBEAT_ACK:
3683 				sctp_process_heartbeat(sctp, ch);
3684 				break;
3685 			case CHUNK_SHUTDOWN:
3686 				sctp_shutdown_event(sctp);
3687 				trysend = sctp_shutdown_received(sctp, ch,
3688 				    B_FALSE, B_FALSE, fp);
3689 				BUMP_LOCAL(sctp->sctp_ibchunks);
3690 				break;
3691 			case CHUNK_SHUTDOWN_ACK:
3692 				BUMP_LOCAL(sctp->sctp_ibchunks);
3693 				if (sctp->sctp_state == SCTPS_SHUTDOWN_SENT) {
3694 					sctp_shutdown_complete(sctp);
3695 					BUMP_MIB(&sctps->sctps_mib,
3696 					    sctpShutdowns);
3697 					sctp_assoc_event(sctp,
3698 					    SCTP_SHUTDOWN_COMP, 0, NULL);
3699 					sctp_clean_death(sctp, 0);
3700 					goto done;
3701 				}
3702 				break;
3703 			case CHUNK_ABORT: {
3704 				sctp_saddr_ipif_t *sp;
3705 
3706 				/* Ignore if delete pending */
3707 				sp = sctp_saddr_lookup(sctp, &dst, 0);
3708 				ASSERT(sp != NULL);
3709 				if (sp->saddr_ipif_delete_pending) {
3710 					BUMP_LOCAL(sctp->sctp_ibchunks);
3711 					break;
3712 				}
3713 
3714 				sctp_process_abort(sctp, ch, ECONNRESET);
3715 				goto done;
3716 			}
3717 			case CHUNK_INIT:
3718 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3719 				break;
3720 			case CHUNK_COOKIE:
3721 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3722 				    sctph, &recv_adaptation, NULL, ira) != -1) {
3723 					sctp_send_cookie_ack(sctp);
3724 					sctp_assoc_event(sctp, SCTP_RESTART,
3725 					    0, NULL);
3726 					if (recv_adaptation) {
3727 						sctp->sctp_recv_adaptation = 1;
3728 						sctp_adaptation_event(sctp);
3729 					}
3730 				} else {
3731 					BUMP_MIB(&sctps->sctps_mib,
3732 					    sctpInInvalidCookie);
3733 				}
3734 				break;
3735 			case CHUNK_ERROR: {
3736 				int error;
3737 
3738 				BUMP_LOCAL(sctp->sctp_ibchunks);
3739 				error = sctp_handle_error(sctp, sctph, ch, mp,
3740 				    ira);
3741 				if (error != 0) {
3742 					sctp_assoc_event(sctp, SCTP_COMM_LOST,
3743 					    0, NULL);
3744 					sctp_clean_death(sctp, error);
3745 					goto done;
3746 				}
3747 				break;
3748 			}
3749 			case CHUNK_ASCONF:
3750 				ASSERT(fp != NULL);
3751 				sctp_input_asconf(sctp, ch, fp);
3752 				BUMP_LOCAL(sctp->sctp_ibchunks);
3753 				break;
3754 			case CHUNK_ASCONF_ACK:
3755 				ASSERT(fp != NULL);
3756 				sctp_faddr_alive(sctp, fp);
3757 				sctp_input_asconf_ack(sctp, ch, fp);
3758 				BUMP_LOCAL(sctp->sctp_ibchunks);
3759 				break;
3760 			case CHUNK_FORWARD_TSN:
3761 				ASSERT(fp != NULL);
3762 				sctp->sctp_lastdata = fp;
3763 				sctp_process_forward_tsn(sctp, ch, fp,
3764 				    &ipp, ira);
3765 				gotdata = 1;
3766 				BUMP_LOCAL(sctp->sctp_ibchunks);
3767 				break;
3768 			default:
3769 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
3770 					goto nomorechunks;
3771 				} /* else skip and continue processing */
3772 				break;
3773 			}
3774 			break;
3775 
3776 		case SCTPS_LISTEN:
3777 			switch (ch->sch_id) {
3778 			case CHUNK_INIT:
3779 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3780 				break;
3781 			case CHUNK_COOKIE: {
3782 				sctp_t *eager;
3783 
3784 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3785 				    sctph, &recv_adaptation, &peer_src,
3786 				    ira) == -1) {
3787 					BUMP_MIB(&sctps->sctps_mib,
3788 					    sctpInInvalidCookie);
3789 					goto done;
3790 				}
3791 
3792 				/*
3793 				 * The cookie is good; ensure that
3794 				 * the peer used the verification
3795 				 * tag from the init ack in the header.
3796 				 */
3797 				if (iack->sic_inittag != sctph->sh_verf)
3798 					goto done;
3799 
3800 				eager = sctp_conn_request(sctp, mp, ifindex,
3801 				    ip_hdr_len, iack, ira);
3802 				if (eager == NULL) {
3803 					sctp_send_abort(sctp, sctph->sh_verf,
3804 					    SCTP_ERR_NO_RESOURCES, NULL, 0, mp,
3805 					    0, B_FALSE, ira);
3806 					goto done;
3807 				}
3808 
3809 				/*
3810 				 * If there were extra chunks
3811 				 * bundled with the cookie,
3812 				 * they must be processed
3813 				 * on the eager's queue. We
3814 				 * accomplish this by refeeding
3815 				 * the whole packet into the
3816 				 * state machine on the right
3817 				 * q. The packet (mp) gets
3818 				 * there via the eager's
3819 				 * cookie_mp field (overloaded
3820 				 * with the active open role).
3821 				 * This is picked up when
3822 				 * processing the null bind
3823 				 * request put on the eager's
3824 				 * q by sctp_accept(). We must
3825 				 * first revert the cookie
3826 				 * chunk's length field to network
3827 				 * byteorder so it can be
3828 				 * properly reprocessed on the
3829 				 * eager's queue.
3830 				 */
3831 				BUMP_MIB(&sctps->sctps_mib, sctpPassiveEstab);
3832 				if (mlen > ntohs(ch->sch_len)) {
3833 					eager->sctp_cookie_mp = dupb(mp);
3834 					/*
3835 					 * If no mem, just let
3836 					 * the peer retransmit.
3837 					 */
3838 				}
3839 				sctp_assoc_event(eager, SCTP_COMM_UP, 0, NULL);
3840 				if (recv_adaptation) {
3841 					eager->sctp_recv_adaptation = 1;
3842 					eager->sctp_rx_adaptation_code =
3843 					    sctp->sctp_rx_adaptation_code;
3844 					sctp_adaptation_event(eager);
3845 				}
3846 
3847 				eager->sctp_active = now;
3848 				sctp_send_cookie_ack(eager);
3849 
3850 				wake_eager = B_TRUE;
3851 
3852 				/*
3853 				 * Process rest of the chunks with eager.
3854 				 */
3855 				sctp = eager;
3856 				fp = sctp_lookup_faddr(sctp, &peer_src);
3857 				/*
3858 				 * Confirm peer's original source.  fp can
3859 				 * only be NULL if peer does not use the
3860 				 * original source as one of its addresses...
3861 				 */
3862 				if (fp == NULL)
3863 					fp = sctp_lookup_faddr(sctp, &src);
3864 				else
3865 					sctp_faddr_alive(sctp, fp);
3866 
3867 				/*
3868 				 * Validate the peer addresses.  It also starts
3869 				 * the heartbeat timer.
3870 				 */
3871 				sctp_validate_peer(sctp);
3872 				break;
3873 			}
3874 			/* Anything else is considered out-of-the-blue */
3875 			case CHUNK_ERROR:
3876 			case CHUNK_ABORT:
3877 			case CHUNK_COOKIE_ACK:
3878 			case CHUNK_SHUTDOWN_COMPLETE:
3879 				BUMP_LOCAL(sctp->sctp_ibchunks);
3880 				goto done;
3881 			default:
3882 				BUMP_LOCAL(sctp->sctp_ibchunks);
3883 				sctp_send_abort(sctp, sctph->sh_verf, 0, NULL,
3884 				    0, mp, 0, B_TRUE, ira);
3885 				goto done;
3886 			}
3887 			break;
3888 
3889 		case SCTPS_COOKIE_WAIT:
3890 			switch (ch->sch_id) {
3891 			case CHUNK_INIT_ACK:
3892 				sctp_stop_faddr_timers(sctp);
3893 				sctp_faddr_alive(sctp, sctp->sctp_current);
3894 				sctp_send_cookie_echo(sctp, ch, mp, ira);
3895 				BUMP_LOCAL(sctp->sctp_ibchunks);
3896 				break;
3897 			case CHUNK_ABORT:
3898 				sctp_process_abort(sctp, ch, ECONNREFUSED);
3899 				goto done;
3900 			case CHUNK_INIT:
3901 				sctp_send_initack(sctp, sctph, ch, mp, ira);
3902 				break;
3903 			case CHUNK_COOKIE:
3904 				cr = ira->ira_cred;
3905 				cpid = ira->ira_cpid;
3906 
3907 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3908 				    sctph, &recv_adaptation, NULL, ira) == -1) {
3909 					BUMP_MIB(&sctps->sctps_mib,
3910 					    sctpInInvalidCookie);
3911 					break;
3912 				}
3913 				sctp_send_cookie_ack(sctp);
3914 				sctp_stop_faddr_timers(sctp);
3915 				if (!SCTP_IS_DETACHED(sctp)) {
3916 					sctp->sctp_ulp_connected(
3917 					    sctp->sctp_ulpd, 0, cr, cpid);
3918 					sctp_set_ulp_prop(sctp);
3919 
3920 				}
3921 				sctp->sctp_state = SCTPS_ESTABLISHED;
3922 				sctp->sctp_assoc_start_time =
3923 				    (uint32_t)ddi_get_lbolt();
3924 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
3925 				if (sctp->sctp_cookie_mp) {
3926 					freemsg(sctp->sctp_cookie_mp);
3927 					sctp->sctp_cookie_mp = NULL;
3928 				}
3929 
3930 				/* Validate the peer addresses. */
3931 				sctp->sctp_active = now;
3932 				sctp_validate_peer(sctp);
3933 
3934 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
3935 				if (recv_adaptation) {
3936 					sctp->sctp_recv_adaptation = 1;
3937 					sctp_adaptation_event(sctp);
3938 				}
3939 				/* Try sending queued data, or ASCONFs */
3940 				trysend = 1;
3941 				break;
3942 			default:
3943 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
3944 					goto nomorechunks;
3945 				} /* else skip and continue processing */
3946 				break;
3947 			}
3948 			break;
3949 
3950 		case SCTPS_COOKIE_ECHOED:
3951 			switch (ch->sch_id) {
3952 			case CHUNK_COOKIE_ACK:
3953 				cr = ira->ira_cred;
3954 				cpid = ira->ira_cpid;
3955 
3956 				if (!SCTP_IS_DETACHED(sctp)) {
3957 					sctp->sctp_ulp_connected(
3958 					    sctp->sctp_ulpd, 0, cr, cpid);
3959 					sctp_set_ulp_prop(sctp);
3960 				}
3961 				if (sctp->sctp_unacked == 0)
3962 					sctp_stop_faddr_timers(sctp);
3963 				sctp->sctp_state = SCTPS_ESTABLISHED;
3964 				sctp->sctp_assoc_start_time =
3965 				    (uint32_t)ddi_get_lbolt();
3966 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
3967 				BUMP_LOCAL(sctp->sctp_ibchunks);
3968 				if (sctp->sctp_cookie_mp) {
3969 					freemsg(sctp->sctp_cookie_mp);
3970 					sctp->sctp_cookie_mp = NULL;
3971 				}
3972 				sctp_faddr_alive(sctp, fp);
3973 				/* Validate the peer addresses. */
3974 				sctp->sctp_active = now;
3975 				sctp_validate_peer(sctp);
3976 
3977 				/* Try sending queued data, or ASCONFs */
3978 				trysend = 1;
3979 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
3980 				sctp_adaptation_event(sctp);
3981 				break;
3982 			case CHUNK_ABORT:
3983 				sctp_process_abort(sctp, ch, ECONNREFUSED);
3984 				goto done;
3985 			case CHUNK_COOKIE:
3986 				cr = ira->ira_cred;
3987 				cpid = ira->ira_cpid;
3988 
3989 				if (sctp_process_cookie(sctp, ch, mp, &iack,
3990 				    sctph, &recv_adaptation, NULL, ira) == -1) {
3991 					BUMP_MIB(&sctps->sctps_mib,
3992 					    sctpInInvalidCookie);
3993 					break;
3994 				}
3995 				sctp_send_cookie_ack(sctp);
3996 
3997 				if (!SCTP_IS_DETACHED(sctp)) {
3998 					sctp->sctp_ulp_connected(
3999 					    sctp->sctp_ulpd, 0, cr, cpid);
4000 					sctp_set_ulp_prop(sctp);
4001 
4002 				}
4003 				if (sctp->sctp_unacked == 0)
4004 					sctp_stop_faddr_timers(sctp);
4005 				sctp->sctp_state = SCTPS_ESTABLISHED;
4006 				sctp->sctp_assoc_start_time =
4007 				    (uint32_t)ddi_get_lbolt();
4008 				BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
4009 				if (sctp->sctp_cookie_mp) {
4010 					freemsg(sctp->sctp_cookie_mp);
4011 					sctp->sctp_cookie_mp = NULL;
4012 				}
4013 				/* Validate the peer addresses. */
4014 				sctp->sctp_active = now;
4015 				sctp_validate_peer(sctp);
4016 
4017 				sctp_assoc_event(sctp, SCTP_COMM_UP, 0, NULL);
4018 				if (recv_adaptation) {
4019 					sctp->sctp_recv_adaptation = 1;
4020 					sctp_adaptation_event(sctp);
4021 				}
4022 				/* Try sending queued data, or ASCONFs */
4023 				trysend = 1;
4024 				break;
4025 			case CHUNK_INIT:
4026 				sctp_send_initack(sctp, sctph, ch, mp, ira);
4027 				break;
4028 			case CHUNK_ERROR: {
4029 				sctp_parm_hdr_t *p;
4030 
4031 				BUMP_LOCAL(sctp->sctp_ibchunks);
4032 				/* check for a stale cookie */
4033 				if (ntohs(ch->sch_len) >=
4034 				    (sizeof (*p) + sizeof (*ch)) +
4035 				    sizeof (uint32_t)) {
4036 
4037 					p = (sctp_parm_hdr_t *)(ch + 1);
4038 					if (p->sph_type ==
4039 					    htons(SCTP_ERR_STALE_COOKIE)) {
4040 						BUMP_MIB(&sctps->sctps_mib,
4041 						    sctpAborted);
4042 						sctp_error_event(sctp, ch);
4043 						sctp_assoc_event(sctp,
4044 						    SCTP_COMM_LOST, 0, NULL);
4045 						sctp_clean_death(sctp,
4046 						    ECONNREFUSED);
4047 						goto done;
4048 					}
4049 				}
4050 				break;
4051 			}
4052 			case CHUNK_HEARTBEAT:
4053 				if (!hb_already) {
4054 					sctp_return_heartbeat(sctp, ch, mp);
4055 					hb_already = B_TRUE;
4056 				}
4057 				break;
4058 			default:
4059 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4060 					goto nomorechunks;
4061 				} /* else skip and continue processing */
4062 			} /* switch (ch->sch_id) */
4063 			break;
4064 
4065 		case SCTPS_SHUTDOWN_ACK_SENT:
4066 			switch (ch->sch_id) {
4067 			case CHUNK_ABORT:
4068 				/* Pass gathered wisdom to IP for keeping */
4069 				sctp_update_dce(sctp);
4070 				sctp_process_abort(sctp, ch, 0);
4071 				goto done;
4072 			case CHUNK_SHUTDOWN_COMPLETE:
4073 				BUMP_LOCAL(sctp->sctp_ibchunks);
4074 				BUMP_MIB(&sctps->sctps_mib, sctpShutdowns);
4075 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
4076 				    NULL);
4077 
4078 				/* Pass gathered wisdom to IP for keeping */
4079 				sctp_update_dce(sctp);
4080 				sctp_clean_death(sctp, 0);
4081 				goto done;
4082 			case CHUNK_SHUTDOWN_ACK:
4083 				sctp_shutdown_complete(sctp);
4084 				BUMP_LOCAL(sctp->sctp_ibchunks);
4085 				BUMP_MIB(&sctps->sctps_mib, sctpShutdowns);
4086 				sctp_assoc_event(sctp, SCTP_SHUTDOWN_COMP, 0,
4087 				    NULL);
4088 				sctp_clean_death(sctp, 0);
4089 				goto done;
4090 			case CHUNK_COOKIE:
4091 				(void) sctp_shutdown_received(sctp, NULL,
4092 				    B_TRUE, B_FALSE, fp);
4093 				BUMP_LOCAL(sctp->sctp_ibchunks);
4094 				break;
4095 			case CHUNK_HEARTBEAT:
4096 				if (!hb_already) {
4097 					sctp_return_heartbeat(sctp, ch, mp);
4098 					hb_already = B_TRUE;
4099 				}
4100 				break;
4101 			default:
4102 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4103 					goto nomorechunks;
4104 				} /* else skip and continue processing */
4105 				break;
4106 			}
4107 			break;
4108 
4109 		case SCTPS_SHUTDOWN_RECEIVED:
4110 			switch (ch->sch_id) {
4111 			case CHUNK_SHUTDOWN:
4112 				trysend = sctp_shutdown_received(sctp, ch,
4113 				    B_FALSE, B_FALSE, fp);
4114 				break;
4115 			case CHUNK_SACK:
4116 				trysend = sctp_got_sack(sctp, ch);
4117 				if (trysend < 0) {
4118 					sctp_send_abort(sctp, sctph->sh_verf,
4119 					    0, NULL, 0, mp, 0, B_FALSE, ira);
4120 					sctp_assoc_event(sctp,
4121 					    SCTP_COMM_LOST, 0, NULL);
4122 					sctp_clean_death(sctp,
4123 					    ECONNABORTED);
4124 					goto done;
4125 				}
4126 				break;
4127 			case CHUNK_ABORT:
4128 				sctp_process_abort(sctp, ch, ECONNRESET);
4129 				goto done;
4130 			case CHUNK_HEARTBEAT:
4131 				if (!hb_already) {
4132 					sctp_return_heartbeat(sctp, ch, mp);
4133 					hb_already = B_TRUE;
4134 				}
4135 				break;
4136 			default:
4137 				if (sctp_strange_chunk(sctp, ch, fp) == 0) {
4138 					goto nomorechunks;
4139 				} /* else skip and continue processing */
4140 				break;
4141 			}
4142 			break;
4143 
4144 		default:
4145 			/*
4146 			 * The only remaining states are SCTPS_IDLE and
4147 			 * SCTPS_BOUND, and we should not be getting here
4148 			 * for these.
4149 			 */
4150 			ASSERT(0);
4151 		} /* switch (sctp->sctp_state) */
4152 
4153 		ch = sctp_next_chunk(ch, &mlen);
4154 		if (ch != NULL && !sctp_check_input(sctp, ch, mlen, 0))
4155 			goto done;
4156 	} while (ch != NULL);
4157 
4158 	/* Finished processing all chunks in packet */
4159 
4160 nomorechunks:
4161 	/* SACK if necessary */
4162 	if (gotdata) {
4163 		boolean_t sack_sent;
4164 
4165 		(sctp->sctp_sack_toggle)++;
4166 		sack_sent = sctp_sack(sctp, dups);
4167 		dups = NULL;
4168 
4169 		/* If a SACK is sent, no need to restart the timer. */
4170 		if (!sack_sent && !sctp->sctp_ack_timer_running) {
4171 			sctp->sctp_ack_timer_running = B_TRUE;
4172 			sctp_timer(sctp, sctp->sctp_ack_mp,
4173 			    MSEC_TO_TICK(sctps->sctps_deferred_ack_interval));
4174 		}
4175 	}
4176 
4177 	if (trysend) {
4178 		sctp_output(sctp, UINT_MAX);
4179 		if (sctp->sctp_cxmit_list != NULL)
4180 			sctp_wput_asconf(sctp, NULL);
4181 	}
4182 	/* If there is unsent data, make sure a timer is running */
4183 	if (sctp->sctp_unsent > 0 && !sctp->sctp_current->timer_running) {
4184 		SCTP_FADDR_TIMER_RESTART(sctp, sctp->sctp_current,
4185 		    sctp->sctp_current->rto);
4186 	}
4187 
4188 done:
4189 	if (dups != NULL)
4190 		freeb(dups);
4191 	freemsg(mp);
4192 
4193 	if (sctp->sctp_err_chunks != NULL)
4194 		sctp_process_err(sctp);
4195 
4196 	if (wake_eager) {
4197 		/*
4198 		 * sctp points to newly created control block, need to
4199 		 * release it before exiting.
4200 		 */
4201 		WAKE_SCTP(sctp);
4202 	}
4203 }
4204 
4205 /*
4206  * Some amount of data got removed from rx q.
4207  * Check if we should send a window update.
4208  *
4209  * Due to way sctp_rwnd updates are made, ULP can give reports out-of-order.
4210  * To keep from dropping incoming data due to this, we only update
4211  * sctp_rwnd when if it's larger than what we've reported to peer earlier.
4212  */
4213 void
4214 sctp_recvd(sctp_t *sctp, int len)
4215 {
4216 	int32_t old, new;
4217 	sctp_stack_t	*sctps = sctp->sctp_sctps;
4218 
4219 	ASSERT(sctp != NULL);
4220 	RUN_SCTP(sctp);
4221 
4222 	if (len < sctp->sctp_rwnd) {
4223 		WAKE_SCTP(sctp);
4224 		return;
4225 	}
4226 	ASSERT(sctp->sctp_rwnd >= sctp->sctp_rxqueued);
4227 	old = sctp->sctp_rwnd - sctp->sctp_rxqueued;
4228 	new = len - sctp->sctp_rxqueued;
4229 	sctp->sctp_rwnd = len;
4230 
4231 	if (sctp->sctp_state >= SCTPS_ESTABLISHED &&
4232 	    ((old <= new >> 1) || (old < sctp->sctp_mss))) {
4233 		sctp->sctp_force_sack = 1;
4234 		BUMP_MIB(&sctps->sctps_mib, sctpOutWinUpdate);
4235 		(void) sctp_sack(sctp, NULL);
4236 	}
4237 	WAKE_SCTP(sctp);
4238 }
4239