xref: /freebsd/sys/netinet/sctp_input.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_indata.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_bsd_addr.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_crc32.h>
50 #include <netinet/udp.h>
51 
52 
53 
54 static void
55 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
56 {
57 	struct sctp_nets *net;
58 
59 	/*
60 	 * This now not only stops all cookie timers it also stops any INIT
61 	 * timers as well. This will make sure that the timers are stopped
62 	 * in all collision cases.
63 	 */
64 	SCTP_TCB_LOCK_ASSERT(stcb);
65 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
66 		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
67 			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
68 			    stcb->sctp_ep,
69 			    stcb,
70 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
71 		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
72 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
73 			    stcb->sctp_ep,
74 			    stcb,
75 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
76 		}
77 	}
78 }
79 
80 /* INIT handler */
81 static void
82 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
83     struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
84     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
85 {
86 	struct sctp_init *init;
87 	struct mbuf *op_err;
88 	uint32_t init_limit;
89 
90 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91 	    stcb);
92 	if (stcb == NULL) {
93 		SCTP_INP_RLOCK(inp);
94 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
95 			goto outnow;
96 		}
97 	}
98 	op_err = NULL;
99 	init = &cp->init;
100 	/* First are we accepting? */
101 	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
102 		SCTPDBG(SCTP_DEBUG_INPUT2,
103 		    "sctp_handle_init: Abort, so_qlimit:%d\n",
104 		    inp->sctp_socket->so_qlimit);
105 		/*
106 		 * FIX ME ?? What about TCP model and we have a
107 		 * match/restart case? Actually no fix is needed. the lookup
108 		 * will always find the existing assoc so stcb would not be
109 		 * NULL. It may be questionable to do this since we COULD
110 		 * just send back the INIT-ACK and hope that the app did
111 		 * accept()'s by the time the COOKIE was sent. But there is
112 		 * a price to pay for COOKIE generation and I don't want to
113 		 * pay it on the chance that the app will actually do some
114 		 * accepts(). The App just looses and should NOT be in this
115 		 * state :-)
116 		 */
117 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
118 		    vrf_id, port);
119 		if (stcb)
120 			*abort_no_unlock = 1;
121 		goto outnow;
122 	}
123 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
124 		/* Invalid length */
125 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
126 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
127 		    vrf_id, port);
128 		if (stcb)
129 			*abort_no_unlock = 1;
130 		goto outnow;
131 	}
132 	/* validate parameters */
133 	if (init->initiate_tag == 0) {
134 		/* protocol error... send abort */
135 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
136 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
137 		    vrf_id, port);
138 		if (stcb)
139 			*abort_no_unlock = 1;
140 		goto outnow;
141 	}
142 	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
143 		/* invalid parameter... send abort */
144 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
145 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
146 		    vrf_id, port);
147 		if (stcb)
148 			*abort_no_unlock = 1;
149 		goto outnow;
150 	}
151 	if (init->num_inbound_streams == 0) {
152 		/* protocol error... send abort */
153 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
154 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
155 		    vrf_id, port);
156 		if (stcb)
157 			*abort_no_unlock = 1;
158 		goto outnow;
159 	}
160 	if (init->num_outbound_streams == 0) {
161 		/* protocol error... send abort */
162 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
163 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
164 		    vrf_id, port);
165 		if (stcb)
166 			*abort_no_unlock = 1;
167 		goto outnow;
168 	}
169 	init_limit = offset + ntohs(cp->ch.chunk_length);
170 	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
171 	    init_limit)) {
172 		/* auth parameter(s) error... send abort */
173 		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
174 		if (stcb)
175 			*abort_no_unlock = 1;
176 		goto outnow;
177 	}
178 	/* send an INIT-ACK w/cookie */
179 	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
180 	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
181 	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
182 outnow:
183 	if (stcb == NULL) {
184 		SCTP_INP_RUNLOCK(inp);
185 	}
186 }
187 
188 /*
189  * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
190  */
191 
192 int
193 sctp_is_there_unsent_data(struct sctp_tcb *stcb)
194 {
195 	int unsent_data = 0;
196 	struct sctp_stream_queue_pending *sp;
197 	struct sctp_stream_out *strq;
198 	struct sctp_association *asoc;
199 
200 	/*
201 	 * This function returns the number of streams that have true unsent
202 	 * data on them. Note that as it looks through it will clean up any
203 	 * places that have old data that has been sent but left at top of
204 	 * stream queue.
205 	 */
206 	asoc = &stcb->asoc;
207 	SCTP_TCB_SEND_LOCK(stcb);
208 	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
209 		/* Check to see if some data queued */
210 		TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
211 	is_there_another:
212 			/* sa_ignore FREED_MEMORY */
213 			sp = TAILQ_FIRST(&strq->outqueue);
214 			if (sp == NULL) {
215 				continue;
216 			}
217 			if ((sp->msg_is_complete) &&
218 			    (sp->length == 0) &&
219 			    (sp->sender_all_done)) {
220 				/*
221 				 * We are doing differed cleanup. Last time
222 				 * through when we took all the data the
223 				 * sender_all_done was not set.
224 				 */
225 				if (sp->put_last_out == 0) {
226 					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
227 					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
228 					    sp->sender_all_done,
229 					    sp->length,
230 					    sp->msg_is_complete,
231 					    sp->put_last_out);
232 				}
233 				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
234 				TAILQ_REMOVE(&strq->outqueue, sp, next);
235 				sctp_free_remote_addr(sp->net);
236 				if (sp->data) {
237 					sctp_m_freem(sp->data);
238 					sp->data = NULL;
239 				}
240 				sctp_free_a_strmoq(stcb, sp);
241 				goto is_there_another;
242 			} else {
243 				unsent_data++;
244 				continue;
245 			}
246 		}
247 	}
248 	SCTP_TCB_SEND_UNLOCK(stcb);
249 	return (unsent_data);
250 }
251 
252 static int
253 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
254     struct sctp_nets *net)
255 {
256 	struct sctp_init *init;
257 	struct sctp_association *asoc;
258 	struct sctp_nets *lnet;
259 	unsigned int i;
260 
261 	init = &cp->init;
262 	asoc = &stcb->asoc;
263 	/* save off parameters */
264 	asoc->peer_vtag = ntohl(init->initiate_tag);
265 	asoc->peers_rwnd = ntohl(init->a_rwnd);
266 	if (TAILQ_FIRST(&asoc->nets)) {
267 		/* update any ssthresh's that may have a default */
268 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
269 			lnet->ssthresh = asoc->peers_rwnd;
270 
271 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
272 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
273 			}
274 		}
275 	}
276 	SCTP_TCB_SEND_LOCK(stcb);
277 	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
278 		unsigned int newcnt;
279 		struct sctp_stream_out *outs;
280 		struct sctp_stream_queue_pending *sp;
281 		struct sctp_tmit_chunk *chk, *chk_next;
282 
283 		/* abandon the upper streams */
284 		newcnt = ntohs(init->num_inbound_streams);
285 		if (!TAILQ_EMPTY(&asoc->send_queue)) {
286 			chk = TAILQ_FIRST(&asoc->send_queue);
287 			while (chk) {
288 				chk_next = TAILQ_NEXT(chk, sctp_next);
289 				if (chk->rec.data.stream_number >= newcnt) {
290 					TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
291 					asoc->send_queue_cnt--;
292 					if (chk->data != NULL) {
293 						sctp_free_bufspace(stcb, asoc, chk, 1);
294 						sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
295 						    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
296 						if (chk->data) {
297 							sctp_m_freem(chk->data);
298 							chk->data = NULL;
299 						}
300 					}
301 					sctp_free_a_chunk(stcb, chk);
302 					/* sa_ignore FREED_MEMORY */
303 				}
304 				chk = chk_next;
305 			}
306 		}
307 		if (asoc->strmout) {
308 			for (i = newcnt; i < asoc->pre_open_streams; i++) {
309 				outs = &asoc->strmout[i];
310 				sp = TAILQ_FIRST(&outs->outqueue);
311 				while (sp) {
312 					TAILQ_REMOVE(&outs->outqueue, sp, next);
313 					asoc->stream_queue_cnt--;
314 					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
315 					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
316 					    sp, SCTP_SO_NOT_LOCKED);
317 					if (sp->data) {
318 						sctp_m_freem(sp->data);
319 						sp->data = NULL;
320 					}
321 					sctp_free_remote_addr(sp->net);
322 					sp->net = NULL;
323 					/* Free the chunk */
324 					sctp_free_a_strmoq(stcb, sp);
325 					/* sa_ignore FREED_MEMORY */
326 					sp = TAILQ_FIRST(&outs->outqueue);
327 				}
328 			}
329 		}
330 		/* cut back the count */
331 		asoc->pre_open_streams = newcnt;
332 	}
333 	SCTP_TCB_SEND_UNLOCK(stcb);
334 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
335 	/* init tsn's */
336 	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
337 	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
338 	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
339 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
340 		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
341 	}
342 	/* This is the next one we expect */
343 	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
344 
345 	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
346 	/*
347 	 * EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
348 	 * like above
349 	 */
350 	asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
351 	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
352 	asoc->last_echo_tsn = asoc->asconf_seq_in;
353 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
354 	/* open the requested streams */
355 
356 	if (asoc->strmin != NULL) {
357 		/* Free the old ones */
358 		struct sctp_queued_to_read *ctl;
359 
360 		for (i = 0; i < asoc->streamincnt; i++) {
361 			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
362 			while (ctl) {
363 				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
364 				sctp_free_remote_addr(ctl->whoFrom);
365 				ctl->whoFrom = NULL;
366 				sctp_m_freem(ctl->data);
367 				ctl->data = NULL;
368 				sctp_free_a_readq(stcb, ctl);
369 				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
370 			}
371 		}
372 		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
373 	}
374 	asoc->streamincnt = ntohs(init->num_outbound_streams);
375 	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
376 		asoc->streamincnt = MAX_SCTP_STREAMS;
377 	}
378 	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
379 	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
380 	if (asoc->strmin == NULL) {
381 		/* we didn't get memory for the streams! */
382 		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
383 		return (-1);
384 	}
385 	for (i = 0; i < asoc->streamincnt; i++) {
386 		asoc->strmin[i].stream_no = i;
387 		asoc->strmin[i].last_sequence_delivered = 0xffff;
388 		/*
389 		 * U-stream ranges will be set when the cookie is unpacked.
390 		 * Or for the INIT sender they are un set (if pr-sctp not
391 		 * supported) when the INIT-ACK arrives.
392 		 */
393 		TAILQ_INIT(&asoc->strmin[i].inqueue);
394 		asoc->strmin[i].delivery_started = 0;
395 	}
396 	/*
397 	 * load_address_from_init will put the addresses into the
398 	 * association when the COOKIE is processed or the INIT-ACK is
399 	 * processed. Both types of COOKIE's existing and new call this
400 	 * routine. It will remove addresses that are no longer in the
401 	 * association (for the restarting case where addresses are
402 	 * removed). Up front when the INIT arrives we will discard it if it
403 	 * is a restart and new addresses have been added.
404 	 */
405 	/* sa_ignore MEMLEAK */
406 	return (0);
407 }
408 
409 /*
410  * INIT-ACK message processing/consumption returns value < 0 on error
411  */
412 static int
413 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
414     struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
415     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
416 {
417 	struct sctp_association *asoc;
418 	struct mbuf *op_err;
419 	int retval, abort_flag;
420 	uint32_t initack_limit;
421 	int nat_friendly = 0;
422 
423 	/* First verify that we have no illegal param's */
424 	abort_flag = 0;
425 	op_err = NULL;
426 
427 	op_err = sctp_arethere_unrecognized_parameters(m,
428 	    (offset + sizeof(struct sctp_init_chunk)),
429 	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
430 	if (abort_flag) {
431 		/* Send an abort and notify peer */
432 		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
433 		*abort_no_unlock = 1;
434 		return (-1);
435 	}
436 	asoc = &stcb->asoc;
437 	asoc->peer_supports_nat = (uint8_t) nat_friendly;
438 	/* process the peer's parameters in the INIT-ACK */
439 	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
440 	if (retval < 0) {
441 		return (retval);
442 	}
443 	initack_limit = offset + ntohs(cp->ch.chunk_length);
444 	/* load all addresses */
445 	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
446 	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
447 	    NULL))) {
448 		/* Huh, we should abort */
449 		SCTPDBG(SCTP_DEBUG_INPUT1,
450 		    "Load addresses from INIT causes an abort %d\n",
451 		    retval);
452 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
453 		    NULL, 0, net->port);
454 		*abort_no_unlock = 1;
455 		return (-1);
456 	}
457 	/* if the peer doesn't support asconf, flush the asconf queue */
458 	if (asoc->peer_supports_asconf == 0) {
459 		struct sctp_asconf_addr *aparam;
460 
461 		while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
462 			/* sa_ignore FREED_MEMORY */
463 			aparam = TAILQ_FIRST(&asoc->asconf_queue);
464 			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
465 			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
466 		}
467 	}
468 	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
469 	    stcb->asoc.local_hmacs);
470 	if (op_err) {
471 		sctp_queue_op_err(stcb, op_err);
472 		/* queuing will steal away the mbuf chain to the out queue */
473 		op_err = NULL;
474 	}
475 	/* extract the cookie and queue it to "echo" it back... */
476 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
477 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
478 		    stcb->asoc.overall_error_count,
479 		    0,
480 		    SCTP_FROM_SCTP_INPUT,
481 		    __LINE__);
482 	}
483 	stcb->asoc.overall_error_count = 0;
484 	net->error_count = 0;
485 
486 	/*
487 	 * Cancel the INIT timer, We do this first before queueing the
488 	 * cookie. We always cancel at the primary to assue that we are
489 	 * canceling the timer started by the INIT which always goes to the
490 	 * primary.
491 	 */
492 	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
493 	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
494 
495 	/* calculate the RTO */
496 	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
497 
498 	retval = sctp_send_cookie_echo(m, offset, stcb, net);
499 	if (retval < 0) {
500 		/*
501 		 * No cookie, we probably should send a op error. But in any
502 		 * case if there is no cookie in the INIT-ACK, we can
503 		 * abandon the peer, its broke.
504 		 */
505 		if (retval == -3) {
506 			/* We abort with an error of missing mandatory param */
507 			op_err =
508 			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
509 			if (op_err) {
510 				/*
511 				 * Expand beyond to include the mandatory
512 				 * param cookie
513 				 */
514 				struct sctp_inv_mandatory_param *mp;
515 
516 				SCTP_BUF_LEN(op_err) =
517 				    sizeof(struct sctp_inv_mandatory_param);
518 				mp = mtod(op_err,
519 				    struct sctp_inv_mandatory_param *);
520 				/* Subtract the reserved param */
521 				mp->length =
522 				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
523 				mp->num_param = htonl(1);
524 				mp->param = htons(SCTP_STATE_COOKIE);
525 				mp->resv = 0;
526 			}
527 			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
528 			    sh, op_err, 0, net->port);
529 			*abort_no_unlock = 1;
530 		}
531 		return (retval);
532 	}
533 	return (0);
534 }
535 
536 static void
537 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
538     struct sctp_tcb *stcb, struct sctp_nets *net)
539 {
540 	struct sockaddr_storage store;
541 	struct sockaddr_in *sin;
542 	struct sockaddr_in6 *sin6;
543 	struct sctp_nets *r_net;
544 	struct timeval tv;
545 	int req_prim = 0;
546 
547 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
548 		/* Invalid length */
549 		return;
550 	}
551 	sin = (struct sockaddr_in *)&store;
552 	sin6 = (struct sockaddr_in6 *)&store;
553 
554 	memset(&store, 0, sizeof(store));
555 	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
556 	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
557 		sin->sin_family = cp->heartbeat.hb_info.addr_family;
558 		sin->sin_len = cp->heartbeat.hb_info.addr_len;
559 		sin->sin_port = stcb->rport;
560 		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
561 		    sizeof(sin->sin_addr));
562 	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
563 	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
564 		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
565 		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
566 		sin6->sin6_port = stcb->rport;
567 		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
568 		    sizeof(sin6->sin6_addr));
569 	} else {
570 		return;
571 	}
572 	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
573 	if (r_net == NULL) {
574 		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
575 		return;
576 	}
577 	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
578 	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
579 	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
580 		/*
581 		 * If the its a HB and it's random value is correct when can
582 		 * confirm the destination.
583 		 */
584 		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
585 		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
586 			stcb->asoc.primary_destination = r_net;
587 			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
588 			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
589 			r_net = TAILQ_FIRST(&stcb->asoc.nets);
590 			if (r_net != stcb->asoc.primary_destination) {
591 				/*
592 				 * first one on the list is NOT the primary
593 				 * sctp_cmpaddr() is much more efficent if
594 				 * the primary is the first on the list,
595 				 * make it so.
596 				 */
597 				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
598 				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
599 			}
600 			req_prim = 1;
601 		}
602 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
603 		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
604 	}
605 	r_net->error_count = 0;
606 	r_net->hb_responded = 1;
607 	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
608 	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
609 	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
610 		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
611 		r_net->dest_state |= SCTP_ADDR_REACHABLE;
612 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
613 		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
614 		/* now was it the primary? if so restore */
615 		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
616 			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
617 		}
618 	}
619 	/*
620 	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
621 	 * set the destination to active state and set the cwnd to one or
622 	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
623 	 * timer is running, for the destination, stop the timer because a
624 	 * PF-heartbeat was received.
625 	 */
626 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
627 	    SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
628 	    (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
629 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
630 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
631 			    stcb, net,
632 			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
633 		}
634 		net->dest_state &= ~SCTP_ADDR_PF;
635 		net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
636 		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
637 		    net, net->cwnd);
638 	}
639 	/* Now lets do a RTO with this */
640 	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
641 	/* Mobility adaptation */
642 	if (req_prim) {
643 		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
644 		    SCTP_MOBILITY_BASE) ||
645 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
646 		    SCTP_MOBILITY_FASTHANDOFF)) &&
647 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
648 		    SCTP_MOBILITY_PRIM_DELETED)) {
649 
650 			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
651 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
652 			    SCTP_MOBILITY_FASTHANDOFF)) {
653 				sctp_assoc_immediate_retrans(stcb,
654 				    stcb->asoc.primary_destination);
655 			}
656 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
657 			    SCTP_MOBILITY_BASE)) {
658 				sctp_move_chunks_from_deleted_prim(stcb,
659 				    stcb->asoc.primary_destination);
660 			}
661 			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
662 			    stcb->asoc.deleted_primary);
663 		}
664 	}
665 }
666 
667 static int
668 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
669 {
670 	/*
671 	 * return 0 means we want you to proceed with the abort non-zero
672 	 * means no abort processing
673 	 */
674 	struct sctpasochead *head;
675 
676 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
677 		/* generate a new vtag and send init */
678 		LIST_REMOVE(stcb, sctp_asocs);
679 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
680 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
681 		/*
682 		 * put it in the bucket in the vtag hash of assoc's for the
683 		 * system
684 		 */
685 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
686 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
687 		return (1);
688 	}
689 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
690 		/*
691 		 * treat like a case where the cookie expired i.e.: - dump
692 		 * current cookie. - generate a new vtag. - resend init.
693 		 */
694 		/* generate a new vtag and send init */
695 		LIST_REMOVE(stcb, sctp_asocs);
696 		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
697 		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
698 		sctp_stop_all_cookie_timers(stcb);
699 		sctp_toss_old_cookies(stcb, &stcb->asoc);
700 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
701 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
702 		/*
703 		 * put it in the bucket in the vtag hash of assoc's for the
704 		 * system
705 		 */
706 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
707 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
708 		return (1);
709 	}
710 	return (0);
711 }
712 
713 static int
714 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
715     struct sctp_nets *net)
716 {
717 	/*
718 	 * return 0 means we want you to proceed with the abort non-zero
719 	 * means no abort processing
720 	 */
721 	if (stcb->asoc.peer_supports_auth == 0) {
722 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
723 		return (0);
724 	}
725 	sctp_asconf_send_nat_state_update(stcb, net);
726 	return (1);
727 }
728 
729 
730 static void
731 sctp_handle_abort(struct sctp_abort_chunk *cp,
732     struct sctp_tcb *stcb, struct sctp_nets *net)
733 {
734 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
735 	struct socket *so;
736 
737 #endif
738 	uint16_t len;
739 
740 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
741 	if (stcb == NULL)
742 		return;
743 
744 	len = ntohs(cp->ch.chunk_length);
745 	if (len > sizeof(struct sctp_chunkhdr)) {
746 		/*
747 		 * Need to check the cause codes for our two magic nat
748 		 * aborts which don't kill the assoc necessarily.
749 		 */
750 		struct sctp_abort_chunk *cpnext;
751 		struct sctp_missing_nat_state *natc;
752 		uint16_t cause;
753 
754 		cpnext = cp;
755 		cpnext++;
756 		natc = (struct sctp_missing_nat_state *)cpnext;
757 		cause = ntohs(natc->cause);
758 		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
759 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
760 			    cp->ch.chunk_flags);
761 			if (sctp_handle_nat_colliding_state(stcb)) {
762 				return;
763 			}
764 		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
765 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
766 			    cp->ch.chunk_flags);
767 			if (sctp_handle_nat_missing_state(stcb, net)) {
768 				return;
769 			}
770 		}
771 	}
772 	/* stop any receive timers */
773 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
774 	/* notify user of the abort and clean up... */
775 	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
776 	/* free the tcb */
777 #if defined(SCTP_PANIC_ON_ABORT)
778 	printf("stcb:%p state:%d rport:%d net:%p\n",
779 	    stcb, stcb->asoc.state, stcb->rport, net);
780 	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
781 		panic("Received an ABORT");
782 	} else {
783 		printf("No panic its in state %x closed\n", stcb->asoc.state);
784 	}
785 #endif
786 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
787 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
788 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
789 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
790 	}
791 #ifdef SCTP_ASOCLOG_OF_TSNS
792 	sctp_print_out_track_log(stcb);
793 #endif
794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
795 	so = SCTP_INP_SO(stcb->sctp_ep);
796 	atomic_add_int(&stcb->asoc.refcnt, 1);
797 	SCTP_TCB_UNLOCK(stcb);
798 	SCTP_SOCKET_LOCK(so, 1);
799 	SCTP_TCB_LOCK(stcb);
800 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
801 #endif
802 	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
803 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
804 	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
805 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
806 	SCTP_SOCKET_UNLOCK(so, 1);
807 #endif
808 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
809 }
810 
811 static void
812 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
813     struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
814 {
815 	struct sctp_association *asoc;
816 	int some_on_streamwheel;
817 
818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
819 	struct socket *so;
820 
821 #endif
822 
823 	SCTPDBG(SCTP_DEBUG_INPUT2,
824 	    "sctp_handle_shutdown: handling SHUTDOWN\n");
825 	if (stcb == NULL)
826 		return;
827 	asoc = &stcb->asoc;
828 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
829 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
830 		return;
831 	}
832 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
833 		/* Shutdown NOT the expected size */
834 		return;
835 	} else {
836 		sctp_update_acked(stcb, cp, net, abort_flag);
837 		if (*abort_flag) {
838 			return;
839 		}
840 	}
841 	if (asoc->control_pdapi) {
842 		/*
843 		 * With a normal shutdown we assume the end of last record.
844 		 */
845 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
846 		asoc->control_pdapi->end_added = 1;
847 		asoc->control_pdapi->pdapi_aborted = 1;
848 		asoc->control_pdapi = NULL;
849 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
851 		so = SCTP_INP_SO(stcb->sctp_ep);
852 		atomic_add_int(&stcb->asoc.refcnt, 1);
853 		SCTP_TCB_UNLOCK(stcb);
854 		SCTP_SOCKET_LOCK(so, 1);
855 		SCTP_TCB_LOCK(stcb);
856 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
857 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
858 			/* assoc was freed while we were unlocked */
859 			SCTP_SOCKET_UNLOCK(so, 1);
860 			return;
861 		}
862 #endif
863 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
864 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
865 		SCTP_SOCKET_UNLOCK(so, 1);
866 #endif
867 	}
868 	/* goto SHUTDOWN_RECEIVED state to block new requests */
869 	if (stcb->sctp_socket) {
870 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
871 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
872 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
873 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
874 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
875 			/*
876 			 * notify upper layer that peer has initiated a
877 			 * shutdown
878 			 */
879 			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
880 
881 			/* reset time */
882 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
883 		}
884 	}
885 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
886 		/*
887 		 * stop the shutdown timer, since we WILL move to
888 		 * SHUTDOWN-ACK-SENT.
889 		 */
890 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
891 	}
892 	/* Now is there unsent data on a stream somewhere? */
893 	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
894 
895 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
896 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
897 	    some_on_streamwheel) {
898 		/* By returning we will push more data out */
899 		return;
900 	} else {
901 		/* no outstanding data to send, so move on... */
902 		/* send SHUTDOWN-ACK */
903 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
904 		/* move to SHUTDOWN-ACK-SENT state */
905 		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
906 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
907 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
908 		}
909 		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
910 		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
911 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
912 		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
913 		/* start SHUTDOWN timer */
914 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
915 		    stcb, net);
916 	}
917 }
918 
919 static void
920 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
921     struct sctp_tcb *stcb,
922     struct sctp_nets *net)
923 {
924 	struct sctp_association *asoc;
925 
926 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
927 	struct socket *so;
928 
929 	so = SCTP_INP_SO(stcb->sctp_ep);
930 #endif
931 	SCTPDBG(SCTP_DEBUG_INPUT2,
932 	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
933 	if (stcb == NULL)
934 		return;
935 
936 	asoc = &stcb->asoc;
937 	/* process according to association state */
938 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
939 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
940 		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
941 		sctp_send_shutdown_complete(stcb, net, 1);
942 		SCTP_TCB_UNLOCK(stcb);
943 		return;
944 	}
945 	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
946 	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
947 		/* unexpected SHUTDOWN-ACK... so ignore... */
948 		SCTP_TCB_UNLOCK(stcb);
949 		return;
950 	}
951 	if (asoc->control_pdapi) {
952 		/*
953 		 * With a normal shutdown we assume the end of last record.
954 		 */
955 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
956 		asoc->control_pdapi->end_added = 1;
957 		asoc->control_pdapi->pdapi_aborted = 1;
958 		asoc->control_pdapi = NULL;
959 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
960 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
961 		atomic_add_int(&stcb->asoc.refcnt, 1);
962 		SCTP_TCB_UNLOCK(stcb);
963 		SCTP_SOCKET_LOCK(so, 1);
964 		SCTP_TCB_LOCK(stcb);
965 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
966 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
967 			/* assoc was freed while we were unlocked */
968 			SCTP_SOCKET_UNLOCK(so, 1);
969 			return;
970 		}
971 #endif
972 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
973 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
974 		SCTP_SOCKET_UNLOCK(so, 1);
975 #endif
976 	}
977 	/* are the queues empty? */
978 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
979 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
980 	    !TAILQ_EMPTY(&asoc->out_wheel)) {
981 		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
982 	}
983 	/* stop the timer */
984 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
985 	/* send SHUTDOWN-COMPLETE */
986 	sctp_send_shutdown_complete(stcb, net, 0);
987 	/* notify upper layer protocol */
988 	if (stcb->sctp_socket) {
989 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
990 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
991 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
992 			/* Set the connected flag to disconnected */
993 			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
994 		}
995 	}
996 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
997 	/* free the TCB but first save off the ep */
998 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
999 	atomic_add_int(&stcb->asoc.refcnt, 1);
1000 	SCTP_TCB_UNLOCK(stcb);
1001 	SCTP_SOCKET_LOCK(so, 1);
1002 	SCTP_TCB_LOCK(stcb);
1003 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1004 #endif
1005 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1006 	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1007 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1008 	SCTP_SOCKET_UNLOCK(so, 1);
1009 #endif
1010 }
1011 
1012 /*
1013  * Skip past the param header and then we will find the chunk that caused the
1014  * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1015  * our peer must be broken.
1016  */
1017 static void
1018 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1019     struct sctp_nets *net)
1020 {
1021 	struct sctp_chunkhdr *chk;
1022 
1023 	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1024 	switch (chk->chunk_type) {
1025 	case SCTP_ASCONF_ACK:
1026 	case SCTP_ASCONF:
1027 		sctp_asconf_cleanup(stcb, net);
1028 		break;
1029 	case SCTP_FORWARD_CUM_TSN:
1030 		stcb->asoc.peer_supports_prsctp = 0;
1031 		break;
1032 	default:
1033 		SCTPDBG(SCTP_DEBUG_INPUT2,
1034 		    "Peer does not support chunk type %d(%x)??\n",
1035 		    chk->chunk_type, (uint32_t) chk->chunk_type);
1036 		break;
1037 	}
1038 }
1039 
1040 /*
1041  * Skip past the param header and then we will find the param that caused the
1042  * problem.  There are a number of param's in a ASCONF OR the prsctp param
1043  * these will turn of specific features.
1044  */
1045 static void
1046 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1047 {
1048 	struct sctp_paramhdr *pbad;
1049 
1050 	pbad = phdr + 1;
1051 	switch (ntohs(pbad->param_type)) {
1052 		/* pr-sctp draft */
1053 	case SCTP_PRSCTP_SUPPORTED:
1054 		stcb->asoc.peer_supports_prsctp = 0;
1055 		break;
1056 	case SCTP_SUPPORTED_CHUNK_EXT:
1057 		break;
1058 		/* draft-ietf-tsvwg-addip-sctp */
1059 	case SCTP_HAS_NAT_SUPPORT:
1060 		stcb->asoc.peer_supports_nat = 0;
1061 		break;
1062 	case SCTP_ECN_NONCE_SUPPORTED:
1063 		stcb->asoc.peer_supports_ecn_nonce = 0;
1064 		stcb->asoc.ecn_nonce_allowed = 0;
1065 		stcb->asoc.ecn_allowed = 0;
1066 		break;
1067 	case SCTP_ADD_IP_ADDRESS:
1068 	case SCTP_DEL_IP_ADDRESS:
1069 	case SCTP_SET_PRIM_ADDR:
1070 		stcb->asoc.peer_supports_asconf = 0;
1071 		break;
1072 	case SCTP_SUCCESS_REPORT:
1073 	case SCTP_ERROR_CAUSE_IND:
1074 		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1075 		SCTPDBG(SCTP_DEBUG_INPUT2,
1076 		    "Turning off ASCONF to this strange peer\n");
1077 		stcb->asoc.peer_supports_asconf = 0;
1078 		break;
1079 	default:
1080 		SCTPDBG(SCTP_DEBUG_INPUT2,
1081 		    "Peer does not support param type %d(%x)??\n",
1082 		    pbad->param_type, (uint32_t) pbad->param_type);
1083 		break;
1084 	}
1085 }
1086 
1087 static int
1088 sctp_handle_error(struct sctp_chunkhdr *ch,
1089     struct sctp_tcb *stcb, struct sctp_nets *net)
1090 {
1091 	int chklen;
1092 	struct sctp_paramhdr *phdr;
1093 	uint16_t error_type;
1094 	uint16_t error_len;
1095 	struct sctp_association *asoc;
1096 	int adjust;
1097 
1098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1099 	struct socket *so;
1100 
1101 #endif
1102 
1103 	/* parse through all of the errors and process */
1104 	asoc = &stcb->asoc;
1105 	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1106 	    sizeof(struct sctp_chunkhdr));
1107 	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1108 	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1109 		/* Process an Error Cause */
1110 		error_type = ntohs(phdr->param_type);
1111 		error_len = ntohs(phdr->param_length);
1112 		if ((error_len > chklen) || (error_len == 0)) {
1113 			/* invalid param length for this param */
1114 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1115 			    chklen, error_len);
1116 			return (0);
1117 		}
1118 		switch (error_type) {
1119 		case SCTP_CAUSE_INVALID_STREAM:
1120 		case SCTP_CAUSE_MISSING_PARAM:
1121 		case SCTP_CAUSE_INVALID_PARAM:
1122 		case SCTP_CAUSE_NO_USER_DATA:
1123 			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1124 			    error_type);
1125 			break;
1126 		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1127 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1128 			    ch->chunk_flags);
1129 			if (sctp_handle_nat_colliding_state(stcb)) {
1130 				return (0);
1131 			}
1132 			break;
1133 		case SCTP_CAUSE_NAT_MISSING_STATE:
1134 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1135 			    ch->chunk_flags);
1136 			if (sctp_handle_nat_missing_state(stcb, net)) {
1137 				return (0);
1138 			}
1139 			break;
1140 		case SCTP_CAUSE_STALE_COOKIE:
1141 			/*
1142 			 * We only act if we have echoed a cookie and are
1143 			 * waiting.
1144 			 */
1145 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1146 				int *p;
1147 
1148 				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1149 				/* Save the time doubled */
1150 				asoc->cookie_preserve_req = ntohl(*p) << 1;
1151 				asoc->stale_cookie_count++;
1152 				if (asoc->stale_cookie_count >
1153 				    asoc->max_init_times) {
1154 					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1155 					/* now free the asoc */
1156 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1157 					so = SCTP_INP_SO(stcb->sctp_ep);
1158 					atomic_add_int(&stcb->asoc.refcnt, 1);
1159 					SCTP_TCB_UNLOCK(stcb);
1160 					SCTP_SOCKET_LOCK(so, 1);
1161 					SCTP_TCB_LOCK(stcb);
1162 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1163 #endif
1164 					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1165 					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1166 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1167 					SCTP_SOCKET_UNLOCK(so, 1);
1168 #endif
1169 					return (-1);
1170 				}
1171 				/* blast back to INIT state */
1172 				sctp_toss_old_cookies(stcb, &stcb->asoc);
1173 				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1174 				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1175 				sctp_stop_all_cookie_timers(stcb);
1176 				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1177 			}
1178 			break;
1179 		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1180 			/*
1181 			 * Nothing we can do here, we don't do hostname
1182 			 * addresses so if the peer does not like my IPv6
1183 			 * (or IPv4 for that matter) it does not matter. If
1184 			 * they don't support that type of address, they can
1185 			 * NOT possibly get that packet type... i.e. with no
1186 			 * IPv6 you can't recieve a IPv6 packet. so we can
1187 			 * safely ignore this one. If we ever added support
1188 			 * for HOSTNAME Addresses, then we would need to do
1189 			 * something here.
1190 			 */
1191 			break;
1192 		case SCTP_CAUSE_UNRECOG_CHUNK:
1193 			sctp_process_unrecog_chunk(stcb, phdr, net);
1194 			break;
1195 		case SCTP_CAUSE_UNRECOG_PARAM:
1196 			sctp_process_unrecog_param(stcb, phdr);
1197 			break;
1198 		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1199 			/*
1200 			 * We ignore this since the timer will drive out a
1201 			 * new cookie anyway and there timer will drive us
1202 			 * to send a SHUTDOWN_COMPLETE. We can't send one
1203 			 * here since we don't have their tag.
1204 			 */
1205 			break;
1206 		case SCTP_CAUSE_DELETING_LAST_ADDR:
1207 		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1208 		case SCTP_CAUSE_DELETING_SRC_ADDR:
1209 			/*
1210 			 * We should NOT get these here, but in a
1211 			 * ASCONF-ACK.
1212 			 */
1213 			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1214 			    error_type);
1215 			break;
1216 		case SCTP_CAUSE_OUT_OF_RESC:
1217 			/*
1218 			 * And what, pray tell do we do with the fact that
1219 			 * the peer is out of resources? Not really sure we
1220 			 * could do anything but abort. I suspect this
1221 			 * should have came WITH an abort instead of in a
1222 			 * OP-ERROR.
1223 			 */
1224 			break;
1225 		default:
1226 			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1227 			    error_type);
1228 			break;
1229 		}
1230 		adjust = SCTP_SIZE32(error_len);
1231 		chklen -= adjust;
1232 		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1233 	}
1234 	return (0);
1235 }
1236 
1237 static int
1238 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1239     struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1240     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1241 {
1242 	struct sctp_init_ack *init_ack;
1243 	struct mbuf *op_err;
1244 
1245 	SCTPDBG(SCTP_DEBUG_INPUT2,
1246 	    "sctp_handle_init_ack: handling INIT-ACK\n");
1247 
1248 	if (stcb == NULL) {
1249 		SCTPDBG(SCTP_DEBUG_INPUT2,
1250 		    "sctp_handle_init_ack: TCB is null\n");
1251 		return (-1);
1252 	}
1253 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1254 		/* Invalid length */
1255 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1256 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1257 		    op_err, 0, net->port);
1258 		*abort_no_unlock = 1;
1259 		return (-1);
1260 	}
1261 	init_ack = &cp->init;
1262 	/* validate parameters */
1263 	if (init_ack->initiate_tag == 0) {
1264 		/* protocol error... send an abort */
1265 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1266 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1267 		    op_err, 0, net->port);
1268 		*abort_no_unlock = 1;
1269 		return (-1);
1270 	}
1271 	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1272 		/* protocol error... send an abort */
1273 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1274 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1275 		    op_err, 0, net->port);
1276 		*abort_no_unlock = 1;
1277 		return (-1);
1278 	}
1279 	if (init_ack->num_inbound_streams == 0) {
1280 		/* protocol error... send an abort */
1281 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1282 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1283 		    op_err, 0, net->port);
1284 		*abort_no_unlock = 1;
1285 		return (-1);
1286 	}
1287 	if (init_ack->num_outbound_streams == 0) {
1288 		/* protocol error... send an abort */
1289 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1290 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1291 		    op_err, 0, net->port);
1292 		*abort_no_unlock = 1;
1293 		return (-1);
1294 	}
1295 	/* process according to association state... */
1296 	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1297 	case SCTP_STATE_COOKIE_WAIT:
1298 		/* this is the expected state for this chunk */
1299 		/* process the INIT-ACK parameters */
1300 		if (stcb->asoc.primary_destination->dest_state &
1301 		    SCTP_ADDR_UNCONFIRMED) {
1302 			/*
1303 			 * The primary is where we sent the INIT, we can
1304 			 * always consider it confirmed when the INIT-ACK is
1305 			 * returned. Do this before we load addresses
1306 			 * though.
1307 			 */
1308 			stcb->asoc.primary_destination->dest_state &=
1309 			    ~SCTP_ADDR_UNCONFIRMED;
1310 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1311 			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1312 		}
1313 		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1314 		    net, abort_no_unlock, vrf_id) < 0) {
1315 			/* error in parsing parameters */
1316 			return (-1);
1317 		}
1318 		/* update our state */
1319 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1320 		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1321 
1322 		/* reset the RTO calc */
1323 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1324 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1325 			    stcb->asoc.overall_error_count,
1326 			    0,
1327 			    SCTP_FROM_SCTP_INPUT,
1328 			    __LINE__);
1329 		}
1330 		stcb->asoc.overall_error_count = 0;
1331 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1332 		/*
1333 		 * collapse the init timer back in case of a exponential
1334 		 * backoff
1335 		 */
1336 		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1337 		    stcb, net);
1338 		/*
1339 		 * the send at the end of the inbound data processing will
1340 		 * cause the cookie to be sent
1341 		 */
1342 		break;
1343 	case SCTP_STATE_SHUTDOWN_SENT:
1344 		/* incorrect state... discard */
1345 		break;
1346 	case SCTP_STATE_COOKIE_ECHOED:
1347 		/* incorrect state... discard */
1348 		break;
1349 	case SCTP_STATE_OPEN:
1350 		/* incorrect state... discard */
1351 		break;
1352 	case SCTP_STATE_EMPTY:
1353 	case SCTP_STATE_INUSE:
1354 	default:
1355 		/* incorrect state... discard */
1356 		return (-1);
1357 		break;
1358 	}
1359 	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1360 	return (0);
1361 }
1362 
1363 static struct sctp_tcb *
1364 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1365     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1366     struct sctp_inpcb *inp, struct sctp_nets **netp,
1367     struct sockaddr *init_src, int *notification,
1368     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1369     uint32_t vrf_id, uint16_t port);
1370 
1371 
1372 /*
1373  * handle a state cookie for an existing association m: input packet mbuf
1374  * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1375  * "split" mbuf and the cookie signature does not exist offset: offset into
1376  * mbuf to the cookie-echo chunk
1377  */
1378 static struct sctp_tcb *
1379 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1380     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1381     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1382     struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1383     uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1384 {
1385 	struct sctp_association *asoc;
1386 	struct sctp_init_chunk *init_cp, init_buf;
1387 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1388 	struct sctp_nets *net;
1389 	struct mbuf *op_err;
1390 	struct sctp_paramhdr *ph;
1391 	int chk_length;
1392 	int init_offset, initack_offset, i;
1393 	int retval;
1394 	int spec_flag = 0;
1395 	uint32_t how_indx;
1396 
1397 	net = *netp;
1398 	/* I know that the TCB is non-NULL from the caller */
1399 	asoc = &stcb->asoc;
1400 	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1401 		if (asoc->cookie_how[how_indx] == 0)
1402 			break;
1403 	}
1404 	if (how_indx < sizeof(asoc->cookie_how)) {
1405 		asoc->cookie_how[how_indx] = 1;
1406 	}
1407 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1408 		/* SHUTDOWN came in after sending INIT-ACK */
1409 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1410 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1411 		    0, M_DONTWAIT, 1, MT_DATA);
1412 		if (op_err == NULL) {
1413 			/* FOOBAR */
1414 			return (NULL);
1415 		}
1416 		/* Set the len */
1417 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1418 		ph = mtod(op_err, struct sctp_paramhdr *);
1419 		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1420 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1421 		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1422 		    vrf_id, net->port);
1423 		if (how_indx < sizeof(asoc->cookie_how))
1424 			asoc->cookie_how[how_indx] = 2;
1425 		return (NULL);
1426 	}
1427 	/*
1428 	 * find and validate the INIT chunk in the cookie (peer's info) the
1429 	 * INIT should start after the cookie-echo header struct (chunk
1430 	 * header, state cookie header struct)
1431 	 */
1432 	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1433 
1434 	init_cp = (struct sctp_init_chunk *)
1435 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1436 	    (uint8_t *) & init_buf);
1437 	if (init_cp == NULL) {
1438 		/* could not pull a INIT chunk in cookie */
1439 		return (NULL);
1440 	}
1441 	chk_length = ntohs(init_cp->ch.chunk_length);
1442 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1443 		return (NULL);
1444 	}
1445 	/*
1446 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1447 	 * INIT-ACK follows the INIT chunk
1448 	 */
1449 	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1450 	initack_cp = (struct sctp_init_ack_chunk *)
1451 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1452 	    (uint8_t *) & initack_buf);
1453 	if (initack_cp == NULL) {
1454 		/* could not pull INIT-ACK chunk in cookie */
1455 		return (NULL);
1456 	}
1457 	chk_length = ntohs(initack_cp->ch.chunk_length);
1458 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1459 		return (NULL);
1460 	}
1461 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1462 	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1463 		/*
1464 		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1465 		 * to get into the OPEN state
1466 		 */
1467 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1468 			/*-
1469 			 * Opps, this means that we somehow generated two vtag's
1470 			 * the same. I.e. we did:
1471 			 *  Us               Peer
1472 			 *   <---INIT(tag=a)------
1473 			 *   ----INIT-ACK(tag=t)-->
1474 			 *   ----INIT(tag=t)------> *1
1475 			 *   <---INIT-ACK(tag=a)---
1476                          *   <----CE(tag=t)------------- *2
1477 			 *
1478 			 * At point *1 we should be generating a different
1479 			 * tag t'. Which means we would throw away the CE and send
1480 			 * ours instead. Basically this is case C (throw away side).
1481 			 */
1482 			if (how_indx < sizeof(asoc->cookie_how))
1483 				asoc->cookie_how[how_indx] = 17;
1484 			return (NULL);
1485 
1486 		}
1487 		switch SCTP_GET_STATE
1488 			(asoc) {
1489 		case SCTP_STATE_COOKIE_WAIT:
1490 		case SCTP_STATE_COOKIE_ECHOED:
1491 			/*
1492 			 * INIT was sent but got a COOKIE_ECHO with the
1493 			 * correct tags... just accept it...but we must
1494 			 * process the init so that we can make sure we have
1495 			 * the right seq no's.
1496 			 */
1497 			/* First we must process the INIT !! */
1498 			retval = sctp_process_init(init_cp, stcb, net);
1499 			if (retval < 0) {
1500 				if (how_indx < sizeof(asoc->cookie_how))
1501 					asoc->cookie_how[how_indx] = 3;
1502 				return (NULL);
1503 			}
1504 			/* we have already processed the INIT so no problem */
1505 			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1506 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1507 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1508 			/* update current state */
1509 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1510 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1511 			else
1512 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1513 
1514 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1515 			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1516 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1517 				    stcb->sctp_ep, stcb, asoc->primary_destination);
1518 			}
1519 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1520 			sctp_stop_all_cookie_timers(stcb);
1521 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1522 			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1523 			    (inp->sctp_socket->so_qlimit == 0)
1524 			    ) {
1525 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1526 				struct socket *so;
1527 
1528 #endif
1529 				/*
1530 				 * Here is where collision would go if we
1531 				 * did a connect() and instead got a
1532 				 * init/init-ack/cookie done before the
1533 				 * init-ack came back..
1534 				 */
1535 				stcb->sctp_ep->sctp_flags |=
1536 				    SCTP_PCB_FLAGS_CONNECTED;
1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1538 				so = SCTP_INP_SO(stcb->sctp_ep);
1539 				atomic_add_int(&stcb->asoc.refcnt, 1);
1540 				SCTP_TCB_UNLOCK(stcb);
1541 				SCTP_SOCKET_LOCK(so, 1);
1542 				SCTP_TCB_LOCK(stcb);
1543 				atomic_add_int(&stcb->asoc.refcnt, -1);
1544 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1545 					SCTP_SOCKET_UNLOCK(so, 1);
1546 					return (NULL);
1547 				}
1548 #endif
1549 				soisconnected(stcb->sctp_socket);
1550 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1551 				SCTP_SOCKET_UNLOCK(so, 1);
1552 #endif
1553 			}
1554 			/* notify upper layer */
1555 			*notification = SCTP_NOTIFY_ASSOC_UP;
1556 			/*
1557 			 * since we did not send a HB make sure we don't
1558 			 * double things
1559 			 */
1560 			net->hb_responded = 1;
1561 			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1562 			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1563 
1564 			if (stcb->asoc.sctp_autoclose_ticks &&
1565 			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1566 				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1567 				    inp, stcb, NULL);
1568 			}
1569 			break;
1570 		default:
1571 			/*
1572 			 * we're in the OPEN state (or beyond), so peer must
1573 			 * have simply lost the COOKIE-ACK
1574 			 */
1575 			break;
1576 			}	/* end switch */
1577 		sctp_stop_all_cookie_timers(stcb);
1578 		/*
1579 		 * We ignore the return code here.. not sure if we should
1580 		 * somehow abort.. but we do have an existing asoc. This
1581 		 * really should not fail.
1582 		 */
1583 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1584 		    init_offset + sizeof(struct sctp_init_chunk),
1585 		    initack_offset, sh, init_src)) {
1586 			if (how_indx < sizeof(asoc->cookie_how))
1587 				asoc->cookie_how[how_indx] = 4;
1588 			return (NULL);
1589 		}
1590 		/* respond with a COOKIE-ACK */
1591 		sctp_toss_old_cookies(stcb, asoc);
1592 		sctp_send_cookie_ack(stcb);
1593 		if (how_indx < sizeof(asoc->cookie_how))
1594 			asoc->cookie_how[how_indx] = 5;
1595 		return (stcb);
1596 	}
1597 	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1598 	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1599 	    cookie->tie_tag_my_vtag == 0 &&
1600 	    cookie->tie_tag_peer_vtag == 0) {
1601 		/*
1602 		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1603 		 */
1604 		if (how_indx < sizeof(asoc->cookie_how))
1605 			asoc->cookie_how[how_indx] = 6;
1606 		return (NULL);
1607 	}
1608 	/*
1609 	 * If nat support, and the below and stcb is established, send back
1610 	 * a ABORT(colliding state) if we are established.
1611 	 */
1612 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1613 	    (asoc->peer_supports_nat) &&
1614 	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1615 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1616 	    (asoc->peer_vtag == 0)))) {
1617 		/*
1618 		 * Special case - Peer's support nat. We may have two init's
1619 		 * that we gave out the same tag on since one was not
1620 		 * established.. i.e. we get INIT from host-1 behind the nat
1621 		 * and we respond tag-a, we get a INIT from host-2 behind
1622 		 * the nat and we get tag-a again. Then we bring up host-1
1623 		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1624 		 * Now we have colliding state. We must send an abort here
1625 		 * with colliding state indication.
1626 		 */
1627 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1628 		    0, M_DONTWAIT, 1, MT_DATA);
1629 		if (op_err == NULL) {
1630 			/* FOOBAR */
1631 			return (NULL);
1632 		}
1633 		/* pre-reserve some space */
1634 #ifdef INET6
1635 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1636 #else
1637 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1638 #endif
1639 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1640 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1641 		/* Set the len */
1642 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1643 		ph = mtod(op_err, struct sctp_paramhdr *);
1644 		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1645 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1646 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1647 		return (NULL);
1648 	}
1649 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1650 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1651 	    (asoc->peer_vtag == 0))) {
1652 		/*
1653 		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1654 		 * should be ok, re-accept peer info
1655 		 */
1656 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1657 			/*
1658 			 * Extension of case C. If we hit this, then the
1659 			 * random number generator returned the same vtag
1660 			 * when we first sent our INIT-ACK and when we later
1661 			 * sent our INIT. The side with the seq numbers that
1662 			 * are different will be the one that normnally
1663 			 * would have hit case C. This in effect "extends"
1664 			 * our vtags in this collision case to be 64 bits.
1665 			 * The same collision could occur aka you get both
1666 			 * vtag and seq number the same twice in a row.. but
1667 			 * is much less likely. If it did happen then we
1668 			 * would proceed through and bring up the assoc.. we
1669 			 * may end up with the wrong stream setup however..
1670 			 * which would be bad.. but there is no way to
1671 			 * tell.. until we send on a stream that does not
1672 			 * exist :-)
1673 			 */
1674 			if (how_indx < sizeof(asoc->cookie_how))
1675 				asoc->cookie_how[how_indx] = 7;
1676 
1677 			return (NULL);
1678 		}
1679 		if (how_indx < sizeof(asoc->cookie_how))
1680 			asoc->cookie_how[how_indx] = 8;
1681 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1682 		sctp_stop_all_cookie_timers(stcb);
1683 		/*
1684 		 * since we did not send a HB make sure we don't double
1685 		 * things
1686 		 */
1687 		net->hb_responded = 1;
1688 		if (stcb->asoc.sctp_autoclose_ticks &&
1689 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1690 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1691 			    NULL);
1692 		}
1693 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1694 		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1695 
1696 		/* Note last_cwr_tsn? where is this used? */
1697 		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1698 		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1699 			/*
1700 			 * Ok the peer probably discarded our data (if we
1701 			 * echoed a cookie+data). So anything on the
1702 			 * sent_queue should be marked for retransmit, we
1703 			 * may not get something to kick us so it COULD
1704 			 * still take a timeout to move these.. but it can't
1705 			 * hurt to mark them.
1706 			 */
1707 			struct sctp_tmit_chunk *chk;
1708 
1709 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1710 				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1711 					chk->sent = SCTP_DATAGRAM_RESEND;
1712 					sctp_flight_size_decrease(chk);
1713 					sctp_total_flight_decrease(stcb, chk);
1714 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1715 					spec_flag++;
1716 				}
1717 			}
1718 
1719 		}
1720 		/* process the INIT info (peer's info) */
1721 		retval = sctp_process_init(init_cp, stcb, net);
1722 		if (retval < 0) {
1723 			if (how_indx < sizeof(asoc->cookie_how))
1724 				asoc->cookie_how[how_indx] = 9;
1725 			return (NULL);
1726 		}
1727 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1728 		    init_offset + sizeof(struct sctp_init_chunk),
1729 		    initack_offset, sh, init_src)) {
1730 			if (how_indx < sizeof(asoc->cookie_how))
1731 				asoc->cookie_how[how_indx] = 10;
1732 			return (NULL);
1733 		}
1734 		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1735 		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1736 			*notification = SCTP_NOTIFY_ASSOC_UP;
1737 
1738 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1739 			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1740 			    (inp->sctp_socket->so_qlimit == 0)) {
1741 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1742 				struct socket *so;
1743 
1744 #endif
1745 				stcb->sctp_ep->sctp_flags |=
1746 				    SCTP_PCB_FLAGS_CONNECTED;
1747 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1748 				so = SCTP_INP_SO(stcb->sctp_ep);
1749 				atomic_add_int(&stcb->asoc.refcnt, 1);
1750 				SCTP_TCB_UNLOCK(stcb);
1751 				SCTP_SOCKET_LOCK(so, 1);
1752 				SCTP_TCB_LOCK(stcb);
1753 				atomic_add_int(&stcb->asoc.refcnt, -1);
1754 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1755 					SCTP_SOCKET_UNLOCK(so, 1);
1756 					return (NULL);
1757 				}
1758 #endif
1759 				soisconnected(stcb->sctp_socket);
1760 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1761 				SCTP_SOCKET_UNLOCK(so, 1);
1762 #endif
1763 			}
1764 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1765 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1766 			else
1767 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1768 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1769 		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1770 			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1771 		} else {
1772 			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1773 		}
1774 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1775 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1776 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1777 			    stcb->sctp_ep, stcb, asoc->primary_destination);
1778 		}
1779 		sctp_stop_all_cookie_timers(stcb);
1780 		sctp_toss_old_cookies(stcb, asoc);
1781 		sctp_send_cookie_ack(stcb);
1782 		if (spec_flag) {
1783 			/*
1784 			 * only if we have retrans set do we do this. What
1785 			 * this call does is get only the COOKIE-ACK out and
1786 			 * then when we return the normal call to
1787 			 * sctp_chunk_output will get the retrans out behind
1788 			 * this.
1789 			 */
1790 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1791 		}
1792 		if (how_indx < sizeof(asoc->cookie_how))
1793 			asoc->cookie_how[how_indx] = 11;
1794 
1795 		return (stcb);
1796 	}
1797 	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1798 	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1799 	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1800 	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1801 	    cookie->tie_tag_peer_vtag != 0) {
1802 		struct sctpasochead *head;
1803 
1804 		if (asoc->peer_supports_nat) {
1805 			/*
1806 			 * This is a gross gross hack. just call the
1807 			 * cookie_new code since we are allowing a duplicate
1808 			 * association. I hope this works...
1809 			 */
1810 			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1811 			    inp, netp, init_src, notification,
1812 			    auth_skipped, auth_offset, auth_len,
1813 			    vrf_id, port));
1814 		}
1815 		/*
1816 		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1817 		 */
1818 		/* temp code */
1819 		if (how_indx < sizeof(asoc->cookie_how))
1820 			asoc->cookie_how[how_indx] = 12;
1821 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1822 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1823 
1824 		*sac_assoc_id = sctp_get_associd(stcb);
1825 		/* notify upper layer */
1826 		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1827 		atomic_add_int(&stcb->asoc.refcnt, 1);
1828 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1829 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1830 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1831 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1832 		}
1833 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1834 			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1835 		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1836 			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1837 		}
1838 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1839 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1840 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1841 			    stcb->sctp_ep, stcb, asoc->primary_destination);
1842 
1843 		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1844 			/* move to OPEN state, if not in SHUTDOWN_SENT */
1845 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1846 		}
1847 		asoc->pre_open_streams =
1848 		    ntohs(initack_cp->init.num_outbound_streams);
1849 		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1850 		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1851 		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1852 
1853 		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1854 		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1855 
1856 		asoc->str_reset_seq_in = asoc->init_seq_number;
1857 
1858 		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1859 		if (asoc->mapping_array) {
1860 			memset(asoc->mapping_array, 0,
1861 			    asoc->mapping_array_size);
1862 		}
1863 		/* EY 05/13/08 - nr_sack version of the above if statement */
1864 		if (asoc->nr_mapping_array && SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)
1865 		    && asoc->peer_supports_nr_sack) {
1866 			memset(asoc->nr_mapping_array, 0,
1867 			    asoc->nr_mapping_array_size);
1868 		}
1869 		SCTP_TCB_UNLOCK(stcb);
1870 		SCTP_INP_INFO_WLOCK();
1871 		SCTP_INP_WLOCK(stcb->sctp_ep);
1872 		SCTP_TCB_LOCK(stcb);
1873 		atomic_add_int(&stcb->asoc.refcnt, -1);
1874 		/* send up all the data */
1875 		SCTP_TCB_SEND_LOCK(stcb);
1876 
1877 		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1878 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1879 			stcb->asoc.strmout[i].stream_no = i;
1880 			stcb->asoc.strmout[i].next_sequence_sent = 0;
1881 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1882 		}
1883 		/* process the INIT-ACK info (my info) */
1884 		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1885 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1886 
1887 		/* pull from vtag hash */
1888 		LIST_REMOVE(stcb, sctp_asocs);
1889 		/* re-insert to new vtag position */
1890 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1891 		    SCTP_BASE_INFO(hashasocmark))];
1892 		/*
1893 		 * put it in the bucket in the vtag hash of assoc's for the
1894 		 * system
1895 		 */
1896 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1897 
1898 		/* process the INIT info (peer's info) */
1899 		SCTP_TCB_SEND_UNLOCK(stcb);
1900 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1901 		SCTP_INP_INFO_WUNLOCK();
1902 
1903 		retval = sctp_process_init(init_cp, stcb, net);
1904 		if (retval < 0) {
1905 			if (how_indx < sizeof(asoc->cookie_how))
1906 				asoc->cookie_how[how_indx] = 13;
1907 
1908 			return (NULL);
1909 		}
1910 		/*
1911 		 * since we did not send a HB make sure we don't double
1912 		 * things
1913 		 */
1914 		net->hb_responded = 1;
1915 
1916 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1917 		    init_offset + sizeof(struct sctp_init_chunk),
1918 		    initack_offset, sh, init_src)) {
1919 			if (how_indx < sizeof(asoc->cookie_how))
1920 				asoc->cookie_how[how_indx] = 14;
1921 
1922 			return (NULL);
1923 		}
1924 		/* respond with a COOKIE-ACK */
1925 		sctp_stop_all_cookie_timers(stcb);
1926 		sctp_toss_old_cookies(stcb, asoc);
1927 		sctp_send_cookie_ack(stcb);
1928 		if (how_indx < sizeof(asoc->cookie_how))
1929 			asoc->cookie_how[how_indx] = 15;
1930 
1931 		return (stcb);
1932 	}
1933 	if (how_indx < sizeof(asoc->cookie_how))
1934 		asoc->cookie_how[how_indx] = 16;
1935 	/* all other cases... */
1936 	return (NULL);
1937 }
1938 
1939 
1940 /*
1941  * handle a state cookie for a new association m: input packet mbuf chain--
1942  * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1943  * and the cookie signature does not exist offset: offset into mbuf to the
1944  * cookie-echo chunk length: length of the cookie chunk to: where the init
1945  * was from returns a new TCB
1946  */
1947 struct sctp_tcb *
1948 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1949     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1950     struct sctp_inpcb *inp, struct sctp_nets **netp,
1951     struct sockaddr *init_src, int *notification,
1952     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1953     uint32_t vrf_id, uint16_t port)
1954 {
1955 	struct sctp_tcb *stcb;
1956 	struct sctp_init_chunk *init_cp, init_buf;
1957 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1958 	struct sockaddr_storage sa_store;
1959 	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1960 	struct sockaddr_in *sin;
1961 	struct sockaddr_in6 *sin6;
1962 	struct sctp_association *asoc;
1963 	int chk_length;
1964 	int init_offset, initack_offset, initack_limit;
1965 	int retval;
1966 	int error = 0;
1967 	uint32_t old_tag;
1968 	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1969 
1970 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1971 	struct socket *so;
1972 
1973 	so = SCTP_INP_SO(inp);
1974 #endif
1975 
1976 	/*
1977 	 * find and validate the INIT chunk in the cookie (peer's info) the
1978 	 * INIT should start after the cookie-echo header struct (chunk
1979 	 * header, state cookie header struct)
1980 	 */
1981 	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1982 	init_cp = (struct sctp_init_chunk *)
1983 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1984 	    (uint8_t *) & init_buf);
1985 	if (init_cp == NULL) {
1986 		/* could not pull a INIT chunk in cookie */
1987 		SCTPDBG(SCTP_DEBUG_INPUT1,
1988 		    "process_cookie_new: could not pull INIT chunk hdr\n");
1989 		return (NULL);
1990 	}
1991 	chk_length = ntohs(init_cp->ch.chunk_length);
1992 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1993 		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1994 		return (NULL);
1995 	}
1996 	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1997 	/*
1998 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1999 	 * INIT-ACK follows the INIT chunk
2000 	 */
2001 	initack_cp = (struct sctp_init_ack_chunk *)
2002 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2003 	    (uint8_t *) & initack_buf);
2004 	if (initack_cp == NULL) {
2005 		/* could not pull INIT-ACK chunk in cookie */
2006 		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2007 		return (NULL);
2008 	}
2009 	chk_length = ntohs(initack_cp->ch.chunk_length);
2010 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2011 		return (NULL);
2012 	}
2013 	/*
2014 	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2015 	 * "initack_limit" value.  This is because the chk_length field
2016 	 * includes the length of the cookie, but the cookie is omitted when
2017 	 * the INIT and INIT_ACK are tacked onto the cookie...
2018 	 */
2019 	initack_limit = offset + cookie_len;
2020 
2021 	/*
2022 	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2023 	 * and popluate
2024 	 */
2025 
2026 	/*
2027 	 * Here we do a trick, we set in NULL for the proc/thread argument.
2028 	 * We do this since in effect we only use the p argument when the
2029 	 * socket is unbound and we must do an implicit bind. Since we are
2030 	 * getting a cookie, we cannot be unbound.
2031 	 */
2032 	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
2033 	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2034 	    (struct thread *)NULL
2035 	    );
2036 	if (stcb == NULL) {
2037 		struct mbuf *op_err;
2038 
2039 		/* memory problem? */
2040 		SCTPDBG(SCTP_DEBUG_INPUT1,
2041 		    "process_cookie_new: no room for another TCB!\n");
2042 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2043 
2044 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2045 		    sh, op_err, vrf_id, port);
2046 		return (NULL);
2047 	}
2048 	/* get the correct sctp_nets */
2049 	if (netp)
2050 		*netp = sctp_findnet(stcb, init_src);
2051 
2052 	asoc = &stcb->asoc;
2053 	/* get scope variables out of cookie */
2054 	asoc->ipv4_local_scope = cookie->ipv4_scope;
2055 	asoc->site_scope = cookie->site_scope;
2056 	asoc->local_scope = cookie->local_scope;
2057 	asoc->loopback_scope = cookie->loopback_scope;
2058 
2059 	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2060 	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2061 		struct mbuf *op_err;
2062 
2063 		/*
2064 		 * Houston we have a problem. The EP changed while the
2065 		 * cookie was in flight. Only recourse is to abort the
2066 		 * association.
2067 		 */
2068 		atomic_add_int(&stcb->asoc.refcnt, 1);
2069 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2070 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2071 		    sh, op_err, vrf_id, port);
2072 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2073 		SCTP_TCB_UNLOCK(stcb);
2074 		SCTP_SOCKET_LOCK(so, 1);
2075 		SCTP_TCB_LOCK(stcb);
2076 #endif
2077 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2078 		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2079 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2080 		SCTP_SOCKET_UNLOCK(so, 1);
2081 #endif
2082 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2083 		return (NULL);
2084 	}
2085 	/* process the INIT-ACK info (my info) */
2086 	old_tag = asoc->my_vtag;
2087 	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2088 	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2089 	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2090 	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2091 	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2092 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2093 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
2094 	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2095 	asoc->str_reset_seq_in = asoc->init_seq_number;
2096 
2097 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2098 
2099 	/* process the INIT info (peer's info) */
2100 	if (netp)
2101 		retval = sctp_process_init(init_cp, stcb, *netp);
2102 	else
2103 		retval = 0;
2104 	if (retval < 0) {
2105 		atomic_add_int(&stcb->asoc.refcnt, 1);
2106 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2107 		SCTP_TCB_UNLOCK(stcb);
2108 		SCTP_SOCKET_LOCK(so, 1);
2109 		SCTP_TCB_LOCK(stcb);
2110 #endif
2111 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2112 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2113 		SCTP_SOCKET_UNLOCK(so, 1);
2114 #endif
2115 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2116 		return (NULL);
2117 	}
2118 	/* load all addresses */
2119 	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2120 	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2121 	    init_src)) {
2122 		atomic_add_int(&stcb->asoc.refcnt, 1);
2123 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2124 		SCTP_TCB_UNLOCK(stcb);
2125 		SCTP_SOCKET_LOCK(so, 1);
2126 		SCTP_TCB_LOCK(stcb);
2127 #endif
2128 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2129 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2130 		SCTP_SOCKET_UNLOCK(so, 1);
2131 #endif
2132 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2133 		return (NULL);
2134 	}
2135 	/*
2136 	 * verify any preceding AUTH chunk that was skipped
2137 	 */
2138 	/* pull the local authentication parameters from the cookie/init-ack */
2139 	sctp_auth_get_cookie_params(stcb, m,
2140 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2141 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2142 	if (auth_skipped) {
2143 		struct sctp_auth_chunk *auth;
2144 
2145 		auth = (struct sctp_auth_chunk *)
2146 		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2147 		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2148 			/* auth HMAC failed, dump the assoc and packet */
2149 			SCTPDBG(SCTP_DEBUG_AUTH1,
2150 			    "COOKIE-ECHO: AUTH failed\n");
2151 			atomic_add_int(&stcb->asoc.refcnt, 1);
2152 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2153 			SCTP_TCB_UNLOCK(stcb);
2154 			SCTP_SOCKET_LOCK(so, 1);
2155 			SCTP_TCB_LOCK(stcb);
2156 #endif
2157 			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2158 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2159 			SCTP_SOCKET_UNLOCK(so, 1);
2160 #endif
2161 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2162 			return (NULL);
2163 		} else {
2164 			/* remaining chunks checked... good to go */
2165 			stcb->asoc.authenticated = 1;
2166 		}
2167 	}
2168 	/* update current state */
2169 	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2170 	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2171 	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2172 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2173 		    stcb->sctp_ep, stcb, asoc->primary_destination);
2174 	}
2175 	sctp_stop_all_cookie_timers(stcb);
2176 	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2177 	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2178 
2179 	/*
2180 	 * if we're doing ASCONFs, check to see if we have any new local
2181 	 * addresses that need to get added to the peer (eg. addresses
2182 	 * changed while cookie echo in flight).  This needs to be done
2183 	 * after we go to the OPEN state to do the correct asconf
2184 	 * processing. else, make sure we have the correct addresses in our
2185 	 * lists
2186 	 */
2187 
2188 	/* warning, we re-use sin, sin6, sa_store here! */
2189 	/* pull in local_address (our "from" address) */
2190 	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2191 		/* source addr is IPv4 */
2192 		sin = (struct sockaddr_in *)initack_src;
2193 		memset(sin, 0, sizeof(*sin));
2194 		sin->sin_family = AF_INET;
2195 		sin->sin_len = sizeof(struct sockaddr_in);
2196 		sin->sin_addr.s_addr = cookie->laddress[0];
2197 	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2198 		/* source addr is IPv6 */
2199 		sin6 = (struct sockaddr_in6 *)initack_src;
2200 		memset(sin6, 0, sizeof(*sin6));
2201 		sin6->sin6_family = AF_INET6;
2202 		sin6->sin6_len = sizeof(struct sockaddr_in6);
2203 		sin6->sin6_scope_id = cookie->scope_id;
2204 		memcpy(&sin6->sin6_addr, cookie->laddress,
2205 		    sizeof(sin6->sin6_addr));
2206 	} else {
2207 		atomic_add_int(&stcb->asoc.refcnt, 1);
2208 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2209 		SCTP_TCB_UNLOCK(stcb);
2210 		SCTP_SOCKET_LOCK(so, 1);
2211 		SCTP_TCB_LOCK(stcb);
2212 #endif
2213 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2214 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2215 		SCTP_SOCKET_UNLOCK(so, 1);
2216 #endif
2217 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2218 		return (NULL);
2219 	}
2220 
2221 	/* set up to notify upper layer */
2222 	*notification = SCTP_NOTIFY_ASSOC_UP;
2223 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2224 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2225 	    (inp->sctp_socket->so_qlimit == 0)) {
2226 		/*
2227 		 * This is an endpoint that called connect() how it got a
2228 		 * cookie that is NEW is a bit of a mystery. It must be that
2229 		 * the INIT was sent, but before it got there.. a complete
2230 		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2231 		 * should have went to the other code.. not here.. oh well..
2232 		 * a bit of protection is worth having..
2233 		 */
2234 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2235 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2236 		atomic_add_int(&stcb->asoc.refcnt, 1);
2237 		SCTP_TCB_UNLOCK(stcb);
2238 		SCTP_SOCKET_LOCK(so, 1);
2239 		SCTP_TCB_LOCK(stcb);
2240 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2241 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2242 			SCTP_SOCKET_UNLOCK(so, 1);
2243 			return (NULL);
2244 		}
2245 #endif
2246 		soisconnected(stcb->sctp_socket);
2247 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2248 		SCTP_SOCKET_UNLOCK(so, 1);
2249 #endif
2250 	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2251 	    (inp->sctp_socket->so_qlimit)) {
2252 		/*
2253 		 * We don't want to do anything with this one. Since it is
2254 		 * the listening guy. The timer will get started for
2255 		 * accepted connections in the caller.
2256 		 */
2257 		;
2258 	}
2259 	/* since we did not send a HB make sure we don't double things */
2260 	if ((netp) && (*netp))
2261 		(*netp)->hb_responded = 1;
2262 
2263 	if (stcb->asoc.sctp_autoclose_ticks &&
2264 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2265 		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2266 	}
2267 	/* calculate the RTT */
2268 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2269 	if ((netp) && (*netp)) {
2270 		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2271 		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2272 	}
2273 	/* respond with a COOKIE-ACK */
2274 	sctp_send_cookie_ack(stcb);
2275 
2276 	/*
2277 	 * check the address lists for any ASCONFs that need to be sent
2278 	 * AFTER the cookie-ack is sent
2279 	 */
2280 	sctp_check_address_list(stcb, m,
2281 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2282 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2283 	    initack_src, cookie->local_scope, cookie->site_scope,
2284 	    cookie->ipv4_scope, cookie->loopback_scope);
2285 
2286 
2287 	return (stcb);
2288 }
2289 
2290 /*
2291  * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2292  * we NEED to make sure we are not already using the vtag. If so we
2293  * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2294 	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2295 							    SCTP_BASE_INFO(hashasocmark))];
2296 	LIST_FOREACH(stcb, head, sctp_asocs) {
2297 	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2298 		       -- SEND ABORT - TRY AGAIN --
2299 		}
2300 	}
2301 */
2302 
2303 /*
2304  * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2305  * existing (non-NULL) TCB
2306  */
2307 static struct mbuf *
2308 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2309     struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2310     struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2311     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2312     struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2313 {
2314 	struct sctp_state_cookie *cookie;
2315 	struct sockaddr_in6 sin6;
2316 	struct sockaddr_in sin;
2317 	struct sctp_tcb *l_stcb = *stcb;
2318 	struct sctp_inpcb *l_inp;
2319 	struct sockaddr *to;
2320 	sctp_assoc_t sac_restart_id;
2321 	struct sctp_pcb *ep;
2322 	struct mbuf *m_sig;
2323 	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2324 	uint8_t *sig;
2325 	uint8_t cookie_ok = 0;
2326 	unsigned int size_of_pkt, sig_offset, cookie_offset;
2327 	unsigned int cookie_len;
2328 	struct timeval now;
2329 	struct timeval time_expires;
2330 	struct sockaddr_storage dest_store;
2331 	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2332 	struct ip *iph;
2333 	int notification = 0;
2334 	struct sctp_nets *netl;
2335 	int had_a_existing_tcb = 0;
2336 
2337 	SCTPDBG(SCTP_DEBUG_INPUT2,
2338 	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2339 
2340 	if (inp_p == NULL) {
2341 		return (NULL);
2342 	}
2343 	/* First get the destination address setup too. */
2344 	iph = mtod(m, struct ip *);
2345 	switch (iph->ip_v) {
2346 	case IPVERSION:
2347 		{
2348 			/* its IPv4 */
2349 			struct sockaddr_in *lsin;
2350 
2351 			lsin = (struct sockaddr_in *)(localep_sa);
2352 			memset(lsin, 0, sizeof(*lsin));
2353 			lsin->sin_family = AF_INET;
2354 			lsin->sin_len = sizeof(*lsin);
2355 			lsin->sin_port = sh->dest_port;
2356 			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2357 			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2358 			break;
2359 		}
2360 #ifdef INET6
2361 	case IPV6_VERSION >> 4:
2362 		{
2363 			/* its IPv6 */
2364 			struct ip6_hdr *ip6;
2365 			struct sockaddr_in6 *lsin6;
2366 
2367 			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2368 			memset(lsin6, 0, sizeof(*lsin6));
2369 			lsin6->sin6_family = AF_INET6;
2370 			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2371 			ip6 = mtod(m, struct ip6_hdr *);
2372 			lsin6->sin6_port = sh->dest_port;
2373 			lsin6->sin6_addr = ip6->ip6_dst;
2374 			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2375 			break;
2376 		}
2377 #endif
2378 	default:
2379 		return (NULL);
2380 	}
2381 
2382 	cookie = &cp->cookie;
2383 	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2384 	cookie_len = ntohs(cp->ch.chunk_length);
2385 
2386 	if ((cookie->peerport != sh->src_port) &&
2387 	    (cookie->myport != sh->dest_port) &&
2388 	    (cookie->my_vtag != sh->v_tag)) {
2389 		/*
2390 		 * invalid ports or bad tag.  Note that we always leave the
2391 		 * v_tag in the header in network order and when we stored
2392 		 * it in the my_vtag slot we also left it in network order.
2393 		 * This maintains the match even though it may be in the
2394 		 * opposite byte order of the machine :->
2395 		 */
2396 		return (NULL);
2397 	}
2398 	if (cookie_len > size_of_pkt ||
2399 	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2400 	    sizeof(struct sctp_init_chunk) +
2401 	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2402 		/* cookie too long!  or too small */
2403 		return (NULL);
2404 	}
2405 	/*
2406 	 * split off the signature into its own mbuf (since it should not be
2407 	 * calculated in the sctp_hmac_m() call).
2408 	 */
2409 	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2410 	if (sig_offset > size_of_pkt) {
2411 		/* packet not correct size! */
2412 		/* XXX this may already be accounted for earlier... */
2413 		return (NULL);
2414 	}
2415 	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2416 	if (m_sig == NULL) {
2417 		/* out of memory or ?? */
2418 		return (NULL);
2419 	}
2420 #ifdef SCTP_MBUF_LOGGING
2421 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2422 		struct mbuf *mat;
2423 
2424 		mat = m_sig;
2425 		while (mat) {
2426 			if (SCTP_BUF_IS_EXTENDED(mat)) {
2427 				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2428 			}
2429 			mat = SCTP_BUF_NEXT(mat);
2430 		}
2431 	}
2432 #endif
2433 
2434 	/*
2435 	 * compute the signature/digest for the cookie
2436 	 */
2437 	ep = &(*inp_p)->sctp_ep;
2438 	l_inp = *inp_p;
2439 	if (l_stcb) {
2440 		SCTP_TCB_UNLOCK(l_stcb);
2441 	}
2442 	SCTP_INP_RLOCK(l_inp);
2443 	if (l_stcb) {
2444 		SCTP_TCB_LOCK(l_stcb);
2445 	}
2446 	/* which cookie is it? */
2447 	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2448 	    (ep->current_secret_number != ep->last_secret_number)) {
2449 		/* it's the old cookie */
2450 		(void)sctp_hmac_m(SCTP_HMAC,
2451 		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2452 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2453 	} else {
2454 		/* it's the current cookie */
2455 		(void)sctp_hmac_m(SCTP_HMAC,
2456 		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2457 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2458 	}
2459 	/* get the signature */
2460 	SCTP_INP_RUNLOCK(l_inp);
2461 	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2462 	if (sig == NULL) {
2463 		/* couldn't find signature */
2464 		sctp_m_freem(m_sig);
2465 		return (NULL);
2466 	}
2467 	/* compare the received digest with the computed digest */
2468 	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2469 		/* try the old cookie? */
2470 		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2471 		    (ep->current_secret_number != ep->last_secret_number)) {
2472 			/* compute digest with old */
2473 			(void)sctp_hmac_m(SCTP_HMAC,
2474 			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2475 			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2476 			/* compare */
2477 			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2478 				cookie_ok = 1;
2479 		}
2480 	} else {
2481 		cookie_ok = 1;
2482 	}
2483 
2484 	/*
2485 	 * Now before we continue we must reconstruct our mbuf so that
2486 	 * normal processing of any other chunks will work.
2487 	 */
2488 	{
2489 		struct mbuf *m_at;
2490 
2491 		m_at = m;
2492 		while (SCTP_BUF_NEXT(m_at) != NULL) {
2493 			m_at = SCTP_BUF_NEXT(m_at);
2494 		}
2495 		SCTP_BUF_NEXT(m_at) = m_sig;
2496 	}
2497 
2498 	if (cookie_ok == 0) {
2499 		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2500 		SCTPDBG(SCTP_DEBUG_INPUT2,
2501 		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2502 		    (uint32_t) offset, cookie_offset, sig_offset);
2503 		return (NULL);
2504 	}
2505 	/*
2506 	 * check the cookie timestamps to be sure it's not stale
2507 	 */
2508 	(void)SCTP_GETTIME_TIMEVAL(&now);
2509 	/* Expire time is in Ticks, so we convert to seconds */
2510 	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2511 	time_expires.tv_usec = cookie->time_entered.tv_usec;
2512 	/*
2513 	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2514 	 * is undefined.
2515 	 */
2516 	if (timevalcmp(&now, &time_expires, >)) {
2517 		/* cookie is stale! */
2518 		struct mbuf *op_err;
2519 		struct sctp_stale_cookie_msg *scm;
2520 		uint32_t tim;
2521 
2522 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2523 		    0, M_DONTWAIT, 1, MT_DATA);
2524 		if (op_err == NULL) {
2525 			/* FOOBAR */
2526 			return (NULL);
2527 		}
2528 		/* Set the len */
2529 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2530 		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2531 		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2532 		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2533 		    (sizeof(uint32_t))));
2534 		/* seconds to usec */
2535 		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2536 		/* add in usec */
2537 		if (tim == 0)
2538 			tim = now.tv_usec - cookie->time_entered.tv_usec;
2539 		scm->time_usec = htonl(tim);
2540 		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2541 		    vrf_id, port);
2542 		return (NULL);
2543 	}
2544 	/*
2545 	 * Now we must see with the lookup address if we have an existing
2546 	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2547 	 * and a INIT collided with us and somewhere the peer sent the
2548 	 * cookie on another address besides the single address our assoc
2549 	 * had for him. In this case we will have one of the tie-tags set at
2550 	 * least AND the address field in the cookie can be used to look it
2551 	 * up.
2552 	 */
2553 	to = NULL;
2554 	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2555 		memset(&sin6, 0, sizeof(sin6));
2556 		sin6.sin6_family = AF_INET6;
2557 		sin6.sin6_len = sizeof(sin6);
2558 		sin6.sin6_port = sh->src_port;
2559 		sin6.sin6_scope_id = cookie->scope_id;
2560 		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2561 		    sizeof(sin6.sin6_addr.s6_addr));
2562 		to = (struct sockaddr *)&sin6;
2563 	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2564 		memset(&sin, 0, sizeof(sin));
2565 		sin.sin_family = AF_INET;
2566 		sin.sin_len = sizeof(sin);
2567 		sin.sin_port = sh->src_port;
2568 		sin.sin_addr.s_addr = cookie->address[0];
2569 		to = (struct sockaddr *)&sin;
2570 	} else {
2571 		/* This should not happen */
2572 		return (NULL);
2573 	}
2574 	if ((*stcb == NULL) && to) {
2575 		/* Yep, lets check */
2576 		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2577 		if (*stcb == NULL) {
2578 			/*
2579 			 * We should have only got back the same inp. If we
2580 			 * got back a different ep we have a problem. The
2581 			 * original findep got back l_inp and now
2582 			 */
2583 			if (l_inp != *inp_p) {
2584 				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2585 			}
2586 		} else {
2587 			if (*locked_tcb == NULL) {
2588 				/*
2589 				 * In this case we found the assoc only
2590 				 * after we locked the create lock. This
2591 				 * means we are in a colliding case and we
2592 				 * must make sure that we unlock the tcb if
2593 				 * its one of the cases where we throw away
2594 				 * the incoming packets.
2595 				 */
2596 				*locked_tcb = *stcb;
2597 
2598 				/*
2599 				 * We must also increment the inp ref count
2600 				 * since the ref_count flags was set when we
2601 				 * did not find the TCB, now we found it
2602 				 * which reduces the refcount.. we must
2603 				 * raise it back out to balance it all :-)
2604 				 */
2605 				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2606 				if ((*stcb)->sctp_ep != l_inp) {
2607 					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2608 					    (*stcb)->sctp_ep, l_inp);
2609 				}
2610 			}
2611 		}
2612 	}
2613 	if (to == NULL) {
2614 		return (NULL);
2615 	}
2616 	cookie_len -= SCTP_SIGNATURE_SIZE;
2617 	if (*stcb == NULL) {
2618 		/* this is the "normal" case... get a new TCB */
2619 		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2620 		    cookie_len, *inp_p, netp, to, &notification,
2621 		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2622 	} else {
2623 		/* this is abnormal... cookie-echo on existing TCB */
2624 		had_a_existing_tcb = 1;
2625 		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2626 		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2627 		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2628 	}
2629 
2630 	if (*stcb == NULL) {
2631 		/* still no TCB... must be bad cookie-echo */
2632 		return (NULL);
2633 	}
2634 	/*
2635 	 * Ok, we built an association so confirm the address we sent the
2636 	 * INIT-ACK to.
2637 	 */
2638 	netl = sctp_findnet(*stcb, to);
2639 	/*
2640 	 * This code should in theory NOT run but
2641 	 */
2642 	if (netl == NULL) {
2643 		/* TSNH! Huh, why do I need to add this address here? */
2644 		int ret;
2645 
2646 		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2647 		    SCTP_IN_COOKIE_PROC);
2648 		netl = sctp_findnet(*stcb, to);
2649 	}
2650 	if (netl) {
2651 		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2652 			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2653 			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2654 			    netl);
2655 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2656 			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2657 		}
2658 	}
2659 	if (*stcb) {
2660 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2661 		    *stcb, NULL);
2662 	}
2663 	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2664 		if (!had_a_existing_tcb ||
2665 		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2666 			/*
2667 			 * If we have a NEW cookie or the connect never
2668 			 * reached the connected state during collision we
2669 			 * must do the TCP accept thing.
2670 			 */
2671 			struct socket *so, *oso;
2672 			struct sctp_inpcb *inp;
2673 
2674 			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2675 				/*
2676 				 * For a restart we will keep the same
2677 				 * socket, no need to do anything. I THINK!!
2678 				 */
2679 				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2680 				return (m);
2681 			}
2682 			oso = (*inp_p)->sctp_socket;
2683 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2684 			SCTP_TCB_UNLOCK((*stcb));
2685 			so = sonewconn(oso, 0
2686 			    );
2687 			SCTP_TCB_LOCK((*stcb));
2688 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2689 
2690 			if (so == NULL) {
2691 				struct mbuf *op_err;
2692 
2693 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2694 				struct socket *pcb_so;
2695 
2696 #endif
2697 				/* Too many sockets */
2698 				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2699 				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2700 				sctp_abort_association(*inp_p, NULL, m, iphlen,
2701 				    sh, op_err, vrf_id, port);
2702 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2703 				pcb_so = SCTP_INP_SO(*inp_p);
2704 				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2705 				SCTP_TCB_UNLOCK((*stcb));
2706 				SCTP_SOCKET_LOCK(pcb_so, 1);
2707 				SCTP_TCB_LOCK((*stcb));
2708 				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2709 #endif
2710 				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2711 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2712 				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2713 #endif
2714 				return (NULL);
2715 			}
2716 			inp = (struct sctp_inpcb *)so->so_pcb;
2717 			SCTP_INP_INCR_REF(inp);
2718 			/*
2719 			 * We add the unbound flag here so that if we get an
2720 			 * soabort() before we get the move_pcb done, we
2721 			 * will properly cleanup.
2722 			 */
2723 			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2724 			    SCTP_PCB_FLAGS_CONNECTED |
2725 			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2726 			    SCTP_PCB_FLAGS_UNBOUND |
2727 			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2728 			    SCTP_PCB_FLAGS_DONT_WAKE);
2729 			inp->sctp_features = (*inp_p)->sctp_features;
2730 			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2731 			inp->sctp_socket = so;
2732 			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2733 			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2734 			inp->sctp_context = (*inp_p)->sctp_context;
2735 			inp->inp_starting_point_for_iterator = NULL;
2736 			/*
2737 			 * copy in the authentication parameters from the
2738 			 * original endpoint
2739 			 */
2740 			if (inp->sctp_ep.local_hmacs)
2741 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2742 			inp->sctp_ep.local_hmacs =
2743 			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2744 			if (inp->sctp_ep.local_auth_chunks)
2745 				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2746 			inp->sctp_ep.local_auth_chunks =
2747 			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2748 
2749 			/*
2750 			 * Now we must move it from one hash table to
2751 			 * another and get the tcb in the right place.
2752 			 */
2753 			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2754 
2755 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2756 			SCTP_TCB_UNLOCK((*stcb));
2757 
2758 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2759 			    0);
2760 			SCTP_TCB_LOCK((*stcb));
2761 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2762 
2763 
2764 			/*
2765 			 * now we must check to see if we were aborted while
2766 			 * the move was going on and the lock/unlock
2767 			 * happened.
2768 			 */
2769 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2770 				/*
2771 				 * yep it was, we leave the assoc attached
2772 				 * to the socket since the sctp_inpcb_free()
2773 				 * call will send an abort for us.
2774 				 */
2775 				SCTP_INP_DECR_REF(inp);
2776 				return (NULL);
2777 			}
2778 			SCTP_INP_DECR_REF(inp);
2779 			/* Switch over to the new guy */
2780 			*inp_p = inp;
2781 			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2782 
2783 			/*
2784 			 * Pull it from the incomplete queue and wake the
2785 			 * guy
2786 			 */
2787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2788 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2789 			SCTP_TCB_UNLOCK((*stcb));
2790 			SCTP_SOCKET_LOCK(so, 1);
2791 #endif
2792 			soisconnected(so);
2793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2794 			SCTP_TCB_LOCK((*stcb));
2795 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2796 			SCTP_SOCKET_UNLOCK(so, 1);
2797 #endif
2798 			return (m);
2799 		}
2800 	}
2801 	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2802 		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2803 	}
2804 	return (m);
2805 }
2806 
2807 static void
2808 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2809     struct sctp_tcb *stcb, struct sctp_nets *net)
2810 {
2811 	/* cp must not be used, others call this without a c-ack :-) */
2812 	struct sctp_association *asoc;
2813 
2814 	SCTPDBG(SCTP_DEBUG_INPUT2,
2815 	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2816 	if (stcb == NULL)
2817 		return;
2818 
2819 	asoc = &stcb->asoc;
2820 
2821 	sctp_stop_all_cookie_timers(stcb);
2822 	/* process according to association state */
2823 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2824 		/* state change only needed when I am in right state */
2825 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2826 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2827 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2828 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2829 			    stcb->sctp_ep, stcb, asoc->primary_destination);
2830 
2831 		}
2832 		/* update RTO */
2833 		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2834 		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2835 		if (asoc->overall_error_count == 0) {
2836 			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2837 			    &asoc->time_entered, sctp_align_safe_nocopy);
2838 		}
2839 		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2840 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2841 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2842 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2844 			struct socket *so;
2845 
2846 #endif
2847 			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2848 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2849 			so = SCTP_INP_SO(stcb->sctp_ep);
2850 			atomic_add_int(&stcb->asoc.refcnt, 1);
2851 			SCTP_TCB_UNLOCK(stcb);
2852 			SCTP_SOCKET_LOCK(so, 1);
2853 			SCTP_TCB_LOCK(stcb);
2854 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2855 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2856 				SCTP_SOCKET_UNLOCK(so, 1);
2857 				return;
2858 			}
2859 #endif
2860 			soisconnected(stcb->sctp_socket);
2861 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2862 			SCTP_SOCKET_UNLOCK(so, 1);
2863 #endif
2864 		}
2865 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2866 		    stcb, net);
2867 		/*
2868 		 * since we did not send a HB make sure we don't double
2869 		 * things
2870 		 */
2871 		net->hb_responded = 1;
2872 
2873 		if (stcb->asoc.sctp_autoclose_ticks &&
2874 		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2875 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2876 			    stcb->sctp_ep, stcb, NULL);
2877 		}
2878 		/*
2879 		 * send ASCONF if parameters are pending and ASCONFs are
2880 		 * allowed (eg. addresses changed when init/cookie echo were
2881 		 * in flight)
2882 		 */
2883 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2884 		    (stcb->asoc.peer_supports_asconf) &&
2885 		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2886 #ifdef SCTP_TIMER_BASED_ASCONF
2887 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2888 			    stcb->sctp_ep, stcb,
2889 			    stcb->asoc.primary_destination);
2890 #else
2891 			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2892 			    SCTP_ADDR_NOT_LOCKED);
2893 #endif
2894 		}
2895 	}
2896 	/* Toss the cookie if I can */
2897 	sctp_toss_old_cookies(stcb, asoc);
2898 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2899 		/* Restart the timer if we have pending data */
2900 		struct sctp_tmit_chunk *chk;
2901 
2902 		chk = TAILQ_FIRST(&asoc->sent_queue);
2903 		if (chk) {
2904 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2905 			    stcb, chk->whoTo);
2906 		}
2907 	}
2908 }
2909 
2910 static void
2911 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2912     struct sctp_tcb *stcb)
2913 {
2914 	struct sctp_nets *net;
2915 	struct sctp_tmit_chunk *lchk;
2916 	uint32_t tsn;
2917 
2918 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2919 		return;
2920 	}
2921 	SCTP_STAT_INCR(sctps_recvecne);
2922 	tsn = ntohl(cp->tsn);
2923 	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2924 	/* Also we make sure we disable the nonce_wait */
2925 	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2926 	if (lchk == NULL) {
2927 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2928 	} else {
2929 		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2930 	}
2931 	stcb->asoc.nonce_wait_for_ecne = 0;
2932 	stcb->asoc.nonce_sum_check = 0;
2933 
2934 	/* Find where it was sent, if possible */
2935 	net = NULL;
2936 	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2937 	while (lchk) {
2938 		if (lchk->rec.data.TSN_seq == tsn) {
2939 			net = lchk->whoTo;
2940 			break;
2941 		}
2942 		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2943 			break;
2944 		lchk = TAILQ_NEXT(lchk, sctp_next);
2945 	}
2946 	if (net == NULL)
2947 		/* default is we use the primary */
2948 		net = stcb->asoc.primary_destination;
2949 
2950 	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2951 		/*
2952 		 * JRS - Use the congestion control given in the pluggable
2953 		 * CC module
2954 		 */
2955 		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2956 		/*
2957 		 * we reduce once every RTT. So we will only lower cwnd at
2958 		 * the next sending seq i.e. the resync_tsn.
2959 		 */
2960 		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2961 	}
2962 	/*
2963 	 * We always send a CWR this way if our previous one was lost our
2964 	 * peer will get an update, or if it is not time again to reduce we
2965 	 * still get the cwr to the peer.
2966 	 */
2967 	sctp_send_cwr(stcb, net, tsn);
2968 }
2969 
2970 static void
2971 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2972 {
2973 	/*
2974 	 * Here we get a CWR from the peer. We must look in the outqueue and
2975 	 * make sure that we have a covered ECNE in teh control chunk part.
2976 	 * If so remove it.
2977 	 */
2978 	struct sctp_tmit_chunk *chk;
2979 	struct sctp_ecne_chunk *ecne;
2980 
2981 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2982 		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2983 			continue;
2984 		}
2985 		/*
2986 		 * Look for and remove if it is the right TSN. Since there
2987 		 * is only ONE ECNE on the control queue at any one time we
2988 		 * don't need to worry about more than one!
2989 		 */
2990 		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2991 		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2992 		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2993 			/* this covers this ECNE, we can remove it */
2994 			stcb->asoc.ecn_echo_cnt_onq--;
2995 			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2996 			    sctp_next);
2997 			if (chk->data) {
2998 				sctp_m_freem(chk->data);
2999 				chk->data = NULL;
3000 			}
3001 			stcb->asoc.ctrl_queue_cnt--;
3002 			sctp_free_a_chunk(stcb, chk);
3003 			break;
3004 		}
3005 	}
3006 }
3007 
3008 static void
3009 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
3010     struct sctp_tcb *stcb, struct sctp_nets *net)
3011 {
3012 	struct sctp_association *asoc;
3013 
3014 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3015 	struct socket *so;
3016 
3017 #endif
3018 
3019 	SCTPDBG(SCTP_DEBUG_INPUT2,
3020 	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3021 	if (stcb == NULL)
3022 		return;
3023 
3024 	asoc = &stcb->asoc;
3025 	/* process according to association state */
3026 	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3027 		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3028 		SCTPDBG(SCTP_DEBUG_INPUT2,
3029 		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3030 		SCTP_TCB_UNLOCK(stcb);
3031 		return;
3032 	}
3033 	/* notify upper layer protocol */
3034 	if (stcb->sctp_socket) {
3035 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3036 		/* are the queues empty? they should be */
3037 		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3038 		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3039 		    !TAILQ_EMPTY(&asoc->out_wheel)) {
3040 			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3041 		}
3042 	}
3043 	/* stop the timer */
3044 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3045 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3046 	/* free the TCB */
3047 	SCTPDBG(SCTP_DEBUG_INPUT2,
3048 	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3049 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3050 	so = SCTP_INP_SO(stcb->sctp_ep);
3051 	atomic_add_int(&stcb->asoc.refcnt, 1);
3052 	SCTP_TCB_UNLOCK(stcb);
3053 	SCTP_SOCKET_LOCK(so, 1);
3054 	SCTP_TCB_LOCK(stcb);
3055 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3056 #endif
3057 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3058 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3059 	SCTP_SOCKET_UNLOCK(so, 1);
3060 #endif
3061 	return;
3062 }
3063 
3064 static int
3065 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3066     struct sctp_nets *net, uint8_t flg)
3067 {
3068 	switch (desc->chunk_type) {
3069 		case SCTP_DATA:
3070 		/* find the tsn to resend (possibly */
3071 		{
3072 			uint32_t tsn;
3073 			struct sctp_tmit_chunk *tp1;
3074 
3075 			tsn = ntohl(desc->tsn_ifany);
3076 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3077 			while (tp1) {
3078 				if (tp1->rec.data.TSN_seq == tsn) {
3079 					/* found it */
3080 					break;
3081 				}
3082 				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
3083 				    MAX_TSN)) {
3084 					/* not found */
3085 					tp1 = NULL;
3086 					break;
3087 				}
3088 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3089 			}
3090 			if (tp1 == NULL) {
3091 				/*
3092 				 * Do it the other way , aka without paying
3093 				 * attention to queue seq order.
3094 				 */
3095 				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3096 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3097 				while (tp1) {
3098 					if (tp1->rec.data.TSN_seq == tsn) {
3099 						/* found it */
3100 						break;
3101 					}
3102 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3103 				}
3104 			}
3105 			if (tp1 == NULL) {
3106 				SCTP_STAT_INCR(sctps_pdrptsnnf);
3107 			}
3108 			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3109 				uint8_t *ddp;
3110 
3111 				if ((stcb->asoc.peers_rwnd == 0) &&
3112 				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3113 					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3114 					return (0);
3115 				}
3116 				if (stcb->asoc.peers_rwnd == 0 &&
3117 				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3118 					SCTP_STAT_INCR(sctps_pdrpdizrw);
3119 					return (0);
3120 				}
3121 				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3122 				    sizeof(struct sctp_data_chunk));
3123 				{
3124 					unsigned int iii;
3125 
3126 					for (iii = 0; iii < sizeof(desc->data_bytes);
3127 					    iii++) {
3128 						if (ddp[iii] != desc->data_bytes[iii]) {
3129 							SCTP_STAT_INCR(sctps_pdrpbadd);
3130 							return (-1);
3131 						}
3132 					}
3133 				}
3134 				/*
3135 				 * We zero out the nonce so resync not
3136 				 * needed
3137 				 */
3138 				tp1->rec.data.ect_nonce = 0;
3139 
3140 				if (tp1->do_rtt) {
3141 					/*
3142 					 * this guy had a RTO calculation
3143 					 * pending on it, cancel it
3144 					 */
3145 					tp1->do_rtt = 0;
3146 				}
3147 				SCTP_STAT_INCR(sctps_pdrpmark);
3148 				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3149 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3150 				tp1->sent = SCTP_DATAGRAM_RESEND;
3151 				/*
3152 				 * mark it as if we were doing a FR, since
3153 				 * we will be getting gap ack reports behind
3154 				 * the info from the router.
3155 				 */
3156 				tp1->rec.data.doing_fast_retransmit = 1;
3157 				/*
3158 				 * mark the tsn with what sequences can
3159 				 * cause a new FR.
3160 				 */
3161 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3162 					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3163 				} else {
3164 					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3165 				}
3166 
3167 				/* restart the timer */
3168 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3169 				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3170 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3171 				    stcb, tp1->whoTo);
3172 
3173 				/* fix counts and things */
3174 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3175 					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3176 					    tp1->whoTo->flight_size,
3177 					    tp1->book_size,
3178 					    (uintptr_t) stcb,
3179 					    tp1->rec.data.TSN_seq);
3180 				}
3181 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3182 					sctp_flight_size_decrease(tp1);
3183 					sctp_total_flight_decrease(stcb, tp1);
3184 				}
3185 			} {
3186 				/* audit code */
3187 				unsigned int audit;
3188 
3189 				audit = 0;
3190 				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3191 					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3192 						audit++;
3193 				}
3194 				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3195 				    sctp_next) {
3196 					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3197 						audit++;
3198 				}
3199 				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3200 					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3201 					    audit, stcb->asoc.sent_queue_retran_cnt);
3202 #ifndef SCTP_AUDITING_ENABLED
3203 					stcb->asoc.sent_queue_retran_cnt = audit;
3204 #endif
3205 				}
3206 			}
3207 		}
3208 		break;
3209 	case SCTP_ASCONF:
3210 		{
3211 			struct sctp_tmit_chunk *asconf;
3212 
3213 			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3214 			    sctp_next) {
3215 				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3216 					break;
3217 				}
3218 			}
3219 			if (asconf) {
3220 				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3221 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3222 				asconf->sent = SCTP_DATAGRAM_RESEND;
3223 				asconf->snd_count--;
3224 			}
3225 		}
3226 		break;
3227 	case SCTP_INITIATION:
3228 		/* resend the INIT */
3229 		stcb->asoc.dropped_special_cnt++;
3230 		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3231 			/*
3232 			 * If we can get it in, in a few attempts we do
3233 			 * this, otherwise we let the timer fire.
3234 			 */
3235 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3236 			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3237 			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3238 		}
3239 		break;
3240 	case SCTP_SELECTIVE_ACK:
3241 		/* resend the sack */
3242 		sctp_send_sack(stcb);
3243 		break;
3244 		/* EY for nr_sacks */
3245 	case SCTP_NR_SELECTIVE_ACK:
3246 		sctp_send_nr_sack(stcb);	/* EY resend the nr-sack */
3247 		break;
3248 	case SCTP_HEARTBEAT_REQUEST:
3249 		/* resend a demand HB */
3250 		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3251 			/*
3252 			 * Only retransmit if we KNOW we wont destroy the
3253 			 * tcb
3254 			 */
3255 			(void)sctp_send_hb(stcb, 1, net);
3256 		}
3257 		break;
3258 	case SCTP_SHUTDOWN:
3259 		sctp_send_shutdown(stcb, net);
3260 		break;
3261 	case SCTP_SHUTDOWN_ACK:
3262 		sctp_send_shutdown_ack(stcb, net);
3263 		break;
3264 	case SCTP_COOKIE_ECHO:
3265 		{
3266 			struct sctp_tmit_chunk *cookie;
3267 
3268 			cookie = NULL;
3269 			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3270 			    sctp_next) {
3271 				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3272 					break;
3273 				}
3274 			}
3275 			if (cookie) {
3276 				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3277 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3278 				cookie->sent = SCTP_DATAGRAM_RESEND;
3279 				sctp_stop_all_cookie_timers(stcb);
3280 			}
3281 		}
3282 		break;
3283 	case SCTP_COOKIE_ACK:
3284 		sctp_send_cookie_ack(stcb);
3285 		break;
3286 	case SCTP_ASCONF_ACK:
3287 		/* resend last asconf ack */
3288 		sctp_send_asconf_ack(stcb);
3289 		break;
3290 	case SCTP_FORWARD_CUM_TSN:
3291 		send_forward_tsn(stcb, &stcb->asoc);
3292 		break;
3293 		/* can't do anything with these */
3294 	case SCTP_PACKET_DROPPED:
3295 	case SCTP_INITIATION_ACK:	/* this should not happen */
3296 	case SCTP_HEARTBEAT_ACK:
3297 	case SCTP_ABORT_ASSOCIATION:
3298 	case SCTP_OPERATION_ERROR:
3299 	case SCTP_SHUTDOWN_COMPLETE:
3300 	case SCTP_ECN_ECHO:
3301 	case SCTP_ECN_CWR:
3302 	default:
3303 		break;
3304 	}
3305 	return (0);
3306 }
3307 
3308 void
3309 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3310 {
3311 	int i;
3312 	uint16_t temp;
3313 
3314 	/*
3315 	 * We set things to 0xffff since this is the last delivered sequence
3316 	 * and we will be sending in 0 after the reset.
3317 	 */
3318 
3319 	if (number_entries) {
3320 		for (i = 0; i < number_entries; i++) {
3321 			temp = ntohs(list[i]);
3322 			if (temp >= stcb->asoc.streamincnt) {
3323 				continue;
3324 			}
3325 			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3326 		}
3327 	} else {
3328 		list = NULL;
3329 		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3330 			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3331 		}
3332 	}
3333 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3334 }
3335 
3336 static void
3337 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3338 {
3339 	int i;
3340 
3341 	if (number_entries == 0) {
3342 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3343 			stcb->asoc.strmout[i].next_sequence_sent = 0;
3344 		}
3345 	} else if (number_entries) {
3346 		for (i = 0; i < number_entries; i++) {
3347 			uint16_t temp;
3348 
3349 			temp = ntohs(list[i]);
3350 			if (temp >= stcb->asoc.streamoutcnt) {
3351 				/* no such stream */
3352 				continue;
3353 			}
3354 			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3355 		}
3356 	}
3357 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3358 }
3359 
3360 
3361 struct sctp_stream_reset_out_request *
3362 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3363 {
3364 	struct sctp_association *asoc;
3365 	struct sctp_stream_reset_out_req *req;
3366 	struct sctp_stream_reset_out_request *r;
3367 	struct sctp_tmit_chunk *chk;
3368 	int len, clen;
3369 
3370 	asoc = &stcb->asoc;
3371 	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3372 		asoc->stream_reset_outstanding = 0;
3373 		return (NULL);
3374 	}
3375 	if (stcb->asoc.str_reset == NULL) {
3376 		asoc->stream_reset_outstanding = 0;
3377 		return (NULL);
3378 	}
3379 	chk = stcb->asoc.str_reset;
3380 	if (chk->data == NULL) {
3381 		return (NULL);
3382 	}
3383 	if (bchk) {
3384 		/* he wants a copy of the chk pointer */
3385 		*bchk = chk;
3386 	}
3387 	clen = chk->send_size;
3388 	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3389 	r = &req->sr_req;
3390 	if (ntohl(r->request_seq) == seq) {
3391 		/* found it */
3392 		return (r);
3393 	}
3394 	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3395 	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3396 		/* move to the next one, there can only be a max of two */
3397 		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3398 		if (ntohl(r->request_seq) == seq) {
3399 			return (r);
3400 		}
3401 	}
3402 	/* that seq is not here */
3403 	return (NULL);
3404 }
3405 
3406 static void
3407 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3408 {
3409 	struct sctp_association *asoc;
3410 	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3411 
3412 	if (stcb->asoc.str_reset == NULL) {
3413 		return;
3414 	}
3415 	asoc = &stcb->asoc;
3416 
3417 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3418 	TAILQ_REMOVE(&asoc->control_send_queue,
3419 	    chk,
3420 	    sctp_next);
3421 	if (chk->data) {
3422 		sctp_m_freem(chk->data);
3423 		chk->data = NULL;
3424 	}
3425 	asoc->ctrl_queue_cnt--;
3426 	sctp_free_a_chunk(stcb, chk);
3427 	/* sa_ignore NO_NULL_CHK */
3428 	stcb->asoc.str_reset = NULL;
3429 }
3430 
3431 
3432 static int
3433 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3434     uint32_t seq, uint32_t action,
3435     struct sctp_stream_reset_response *respin)
3436 {
3437 	uint16_t type;
3438 	int lparm_len;
3439 	struct sctp_association *asoc = &stcb->asoc;
3440 	struct sctp_tmit_chunk *chk;
3441 	struct sctp_stream_reset_out_request *srparam;
3442 	int number_entries;
3443 
3444 	if (asoc->stream_reset_outstanding == 0) {
3445 		/* duplicate */
3446 		return (0);
3447 	}
3448 	if (seq == stcb->asoc.str_reset_seq_out) {
3449 		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3450 		if (srparam) {
3451 			stcb->asoc.str_reset_seq_out++;
3452 			type = ntohs(srparam->ph.param_type);
3453 			lparm_len = ntohs(srparam->ph.param_length);
3454 			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3455 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3456 				asoc->stream_reset_out_is_outstanding = 0;
3457 				if (asoc->stream_reset_outstanding)
3458 					asoc->stream_reset_outstanding--;
3459 				if (action == SCTP_STREAM_RESET_PERFORMED) {
3460 					/* do it */
3461 					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3462 				} else {
3463 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3464 				}
3465 			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3466 				/* Answered my request */
3467 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3468 				if (asoc->stream_reset_outstanding)
3469 					asoc->stream_reset_outstanding--;
3470 				if (action != SCTP_STREAM_RESET_PERFORMED) {
3471 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3472 				}
3473 			} else if (type == SCTP_STR_RESET_ADD_STREAMS) {
3474 				/* Ok we now may have more streams */
3475 				if (asoc->stream_reset_outstanding)
3476 					asoc->stream_reset_outstanding--;
3477 				if (action == SCTP_STREAM_RESET_PERFORMED) {
3478 					/* Put the new streams into effect */
3479 					stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
3480 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
3481 					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3482 				} else {
3483 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
3484 					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3485 				}
3486 			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3487 				/**
3488 				 * a) Adopt the new in tsn.
3489 				 * b) reset the map
3490 				 * c) Adopt the new out-tsn
3491 				 */
3492 				struct sctp_stream_reset_response_tsn *resp;
3493 				struct sctp_forward_tsn_chunk fwdtsn;
3494 				int abort_flag = 0;
3495 
3496 				if (respin == NULL) {
3497 					/* huh ? */
3498 					return (0);
3499 				}
3500 				if (action == SCTP_STREAM_RESET_PERFORMED) {
3501 					resp = (struct sctp_stream_reset_response_tsn *)respin;
3502 					asoc->stream_reset_outstanding--;
3503 					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3504 					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3505 					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3506 					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3507 					if (abort_flag) {
3508 						return (1);
3509 					}
3510 					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3511 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3512 						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3513 					}
3514 					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3515 					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3516 					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3517 
3518 					/*
3519 					 * EY 05/13/08 - nr_sack: to keep
3520 					 * nr_mapping array be consistent
3521 					 * with mapping_array
3522 					 */
3523 					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3524 						stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3525 						stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
3526 						memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3527 					}
3528 					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3529 					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3530 
3531 					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3532 					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3533 
3534 				}
3535 			}
3536 			/* get rid of the request and get the request flags */
3537 			if (asoc->stream_reset_outstanding == 0) {
3538 				sctp_clean_up_stream_reset(stcb);
3539 			}
3540 		}
3541 	}
3542 	return (0);
3543 }
3544 
3545 static void
3546 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3547     struct sctp_tmit_chunk *chk,
3548     struct sctp_stream_reset_in_request *req, int trunc)
3549 {
3550 	uint32_t seq;
3551 	int len, i;
3552 	int number_entries;
3553 	uint16_t temp;
3554 
3555 	/*
3556 	 * peer wants me to send a str-reset to him for my outgoing seq's if
3557 	 * seq_in is right.
3558 	 */
3559 	struct sctp_association *asoc = &stcb->asoc;
3560 
3561 	seq = ntohl(req->request_seq);
3562 	if (asoc->str_reset_seq_in == seq) {
3563 		if (trunc) {
3564 			/* Can't do it, since they exceeded our buffer size  */
3565 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3566 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3567 			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3568 		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3569 			len = ntohs(req->ph.param_length);
3570 			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3571 			for (i = 0; i < number_entries; i++) {
3572 				temp = ntohs(req->list_of_streams[i]);
3573 				req->list_of_streams[i] = temp;
3574 			}
3575 			/* move the reset action back one */
3576 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3577 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3578 			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3579 			    asoc->str_reset_seq_out,
3580 			    seq, (asoc->sending_seq - 1));
3581 			asoc->stream_reset_out_is_outstanding = 1;
3582 			asoc->str_reset = chk;
3583 			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3584 			stcb->asoc.stream_reset_outstanding++;
3585 		} else {
3586 			/* Can't do it, since we have sent one out */
3587 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3588 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3589 			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3590 		}
3591 		asoc->str_reset_seq_in++;
3592 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3593 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3594 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3595 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3596 	} else {
3597 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3598 	}
3599 }
3600 
3601 static int
3602 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3603     struct sctp_tmit_chunk *chk,
3604     struct sctp_stream_reset_tsn_request *req)
3605 {
3606 	/* reset all in and out and update the tsn */
3607 	/*
3608 	 * A) reset my str-seq's on in and out. B) Select a receive next,
3609 	 * and set cum-ack to it. Also process this selected number as a
3610 	 * fwd-tsn as well. C) set in the response my next sending seq.
3611 	 */
3612 	struct sctp_forward_tsn_chunk fwdtsn;
3613 	struct sctp_association *asoc = &stcb->asoc;
3614 	int abort_flag = 0;
3615 	uint32_t seq;
3616 
3617 	seq = ntohl(req->request_seq);
3618 	if (asoc->str_reset_seq_in == seq) {
3619 		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3620 		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3621 		fwdtsn.ch.chunk_flags = 0;
3622 		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3623 		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3624 		if (abort_flag) {
3625 			return (1);
3626 		}
3627 		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3628 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3629 			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3630 		}
3631 		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3632 		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3633 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3634 		/*
3635 		 * EY 05/13/08 -nr_sack: to keep nr_mapping array consistent
3636 		 * with mapping array
3637 		 */
3638 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3639 			stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3640 			stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3641 			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3642 		}
3643 		atomic_add_int(&stcb->asoc.sending_seq, 1);
3644 		/* save off historical data for retrans */
3645 		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3646 		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3647 		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3648 		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3649 
3650 		sctp_add_stream_reset_result_tsn(chk,
3651 		    ntohl(req->request_seq),
3652 		    SCTP_STREAM_RESET_PERFORMED,
3653 		    stcb->asoc.sending_seq,
3654 		    stcb->asoc.mapping_array_base_tsn);
3655 		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3656 		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3657 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3658 		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3659 
3660 		asoc->str_reset_seq_in++;
3661 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3662 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3663 		    stcb->asoc.last_sending_seq[0],
3664 		    stcb->asoc.last_base_tsnsent[0]
3665 		    );
3666 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3667 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3668 		    stcb->asoc.last_sending_seq[1],
3669 		    stcb->asoc.last_base_tsnsent[1]
3670 		    );
3671 	} else {
3672 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3673 	}
3674 	return (0);
3675 }
3676 
3677 static void
3678 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3679     struct sctp_tmit_chunk *chk,
3680     struct sctp_stream_reset_out_request *req, int trunc)
3681 {
3682 	uint32_t seq, tsn;
3683 	int number_entries, len;
3684 	struct sctp_association *asoc = &stcb->asoc;
3685 
3686 	seq = ntohl(req->request_seq);
3687 
3688 	/* now if its not a duplicate we process it */
3689 	if (asoc->str_reset_seq_in == seq) {
3690 		len = ntohs(req->ph.param_length);
3691 		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3692 		/*
3693 		 * the sender is resetting, handle the list issue.. we must
3694 		 * a) verify if we can do the reset, if so no problem b) If
3695 		 * we can't do the reset we must copy the request. c) queue
3696 		 * it, and setup the data in processor to trigger it off
3697 		 * when needed and dequeue all the queued data.
3698 		 */
3699 		tsn = ntohl(req->send_reset_at_tsn);
3700 
3701 		/* move the reset action back one */
3702 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3703 		if (trunc) {
3704 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3705 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3706 		} else if ((tsn == asoc->cumulative_tsn) ||
3707 		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3708 			/* we can do it now */
3709 			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3710 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3711 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3712 		} else {
3713 			/*
3714 			 * we must queue it up and thus wait for the TSN's
3715 			 * to arrive that are at or before tsn
3716 			 */
3717 			struct sctp_stream_reset_list *liste;
3718 			int siz;
3719 
3720 			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3721 			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3722 			    siz, SCTP_M_STRESET);
3723 			if (liste == NULL) {
3724 				/* gak out of memory */
3725 				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3726 				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3727 				return;
3728 			}
3729 			liste->tsn = tsn;
3730 			liste->number_entries = number_entries;
3731 			memcpy(&liste->req, req,
3732 			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3733 			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3734 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3735 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3736 		}
3737 		asoc->str_reset_seq_in++;
3738 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3739 		/*
3740 		 * one seq back, just echo back last action since my
3741 		 * response was lost.
3742 		 */
3743 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3744 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3745 		/*
3746 		 * two seq back, just echo back last action since my
3747 		 * response was lost.
3748 		 */
3749 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3750 	} else {
3751 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3752 	}
3753 }
3754 
3755 static void
3756 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3757     struct sctp_stream_reset_add_strm *str_add)
3758 {
3759 	/*
3760 	 * Peer is requesting to add more streams. If its within our
3761 	 * max-streams we will allow it.
3762 	 */
3763 	uint16_t num_stream, i;
3764 	uint32_t seq;
3765 	struct sctp_association *asoc = &stcb->asoc;
3766 	struct sctp_queued_to_read *ctl;
3767 
3768 	/* Get the number. */
3769 	seq = ntohl(str_add->request_seq);
3770 	num_stream = ntohs(str_add->number_of_streams);
3771 	/* Now what would be the new total? */
3772 	if (asoc->str_reset_seq_in == seq) {
3773 		num_stream += stcb->asoc.streamincnt;
3774 		if (num_stream > stcb->asoc.max_inbound_streams) {
3775 			/* We must reject it they ask for to many */
3776 	denied:
3777 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3778 			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3779 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3780 		} else {
3781 			/* Ok, we can do that :-) */
3782 			struct sctp_stream_in *oldstrm;
3783 
3784 			/* save off the old */
3785 			oldstrm = stcb->asoc.strmin;
3786 			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3787 			    (num_stream * sizeof(struct sctp_stream_in)),
3788 			    SCTP_M_STRMI);
3789 			if (stcb->asoc.strmin == NULL) {
3790 				stcb->asoc.strmin = oldstrm;
3791 				goto denied;
3792 			}
3793 			/* copy off the old data */
3794 			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3795 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3796 				stcb->asoc.strmin[i].stream_no = i;
3797 				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3798 				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3799 				/* now anything on those queues? */
3800 				while (TAILQ_EMPTY(&oldstrm[i].inqueue) == 0) {
3801 					ctl = TAILQ_FIRST(&oldstrm[i].inqueue);
3802 					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3803 					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3804 				}
3805 			}
3806 			/* Init the new streams */
3807 			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3808 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3809 				stcb->asoc.strmin[i].stream_no = i;
3810 				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3811 				stcb->asoc.strmin[i].delivery_started = 0;
3812 			}
3813 			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3814 			/* update the size */
3815 			stcb->asoc.streamincnt = num_stream;
3816 			/* Send the ack */
3817 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3818 			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3819 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3820 			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
3821 			    (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
3822 		}
3823 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3824 		/*
3825 		 * one seq back, just echo back last action since my
3826 		 * response was lost.
3827 		 */
3828 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3829 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3830 		/*
3831 		 * two seq back, just echo back last action since my
3832 		 * response was lost.
3833 		 */
3834 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3835 	} else {
3836 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3837 
3838 	}
3839 }
3840 
3841 #ifdef __GNUC__
3842 __attribute__((noinline))
3843 #endif
3844 	static int
3845 	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3846         struct sctp_stream_reset_out_req *sr_req)
3847 {
3848 	int chk_length, param_len, ptype;
3849 	struct sctp_paramhdr pstore;
3850 	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3851 
3852 	uint32_t seq;
3853 	int num_req = 0;
3854 	int trunc = 0;
3855 	struct sctp_tmit_chunk *chk;
3856 	struct sctp_chunkhdr *ch;
3857 	struct sctp_paramhdr *ph;
3858 	int ret_code = 0;
3859 	int num_param = 0;
3860 
3861 	/* now it may be a reset or a reset-response */
3862 	chk_length = ntohs(sr_req->ch.chunk_length);
3863 
3864 	/* setup for adding the response */
3865 	sctp_alloc_a_chunk(stcb, chk);
3866 	if (chk == NULL) {
3867 		return (ret_code);
3868 	}
3869 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3870 	chk->rec.chunk_id.can_take_data = 0;
3871 	chk->asoc = &stcb->asoc;
3872 	chk->no_fr_allowed = 0;
3873 	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3874 	chk->book_size_scale = 0;
3875 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3876 	if (chk->data == NULL) {
3877 strres_nochunk:
3878 		if (chk->data) {
3879 			sctp_m_freem(chk->data);
3880 			chk->data = NULL;
3881 		}
3882 		sctp_free_a_chunk(stcb, chk);
3883 		return (ret_code);
3884 	}
3885 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3886 
3887 	/* setup chunk parameters */
3888 	chk->sent = SCTP_DATAGRAM_UNSENT;
3889 	chk->snd_count = 0;
3890 	chk->whoTo = stcb->asoc.primary_destination;
3891 	atomic_add_int(&chk->whoTo->ref_count, 1);
3892 
3893 	ch = mtod(chk->data, struct sctp_chunkhdr *);
3894 	ch->chunk_type = SCTP_STREAM_RESET;
3895 	ch->chunk_flags = 0;
3896 	ch->chunk_length = htons(chk->send_size);
3897 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3898 	offset += sizeof(struct sctp_chunkhdr);
3899 	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3900 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3901 		if (ph == NULL)
3902 			break;
3903 		param_len = ntohs(ph->param_length);
3904 		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3905 			/* bad param */
3906 			break;
3907 		}
3908 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3909 		    (uint8_t *) & cstore);
3910 		ptype = ntohs(ph->param_type);
3911 		num_param++;
3912 		if (param_len > (int)sizeof(cstore)) {
3913 			trunc = 1;
3914 		} else {
3915 			trunc = 0;
3916 		}
3917 
3918 		if (num_param > SCTP_MAX_RESET_PARAMS) {
3919 			/* hit the max of parameters already sorry.. */
3920 			break;
3921 		}
3922 		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3923 			struct sctp_stream_reset_out_request *req_out;
3924 
3925 			req_out = (struct sctp_stream_reset_out_request *)ph;
3926 			num_req++;
3927 			if (stcb->asoc.stream_reset_outstanding) {
3928 				seq = ntohl(req_out->response_seq);
3929 				if (seq == stcb->asoc.str_reset_seq_out) {
3930 					/* implicit ack */
3931 					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3932 				}
3933 			}
3934 			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3935 		} else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
3936 			struct sctp_stream_reset_add_strm *str_add;
3937 
3938 			str_add = (struct sctp_stream_reset_add_strm *)ph;
3939 			num_req++;
3940 			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
3941 		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3942 			struct sctp_stream_reset_in_request *req_in;
3943 
3944 			num_req++;
3945 
3946 			req_in = (struct sctp_stream_reset_in_request *)ph;
3947 
3948 			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3949 		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3950 			struct sctp_stream_reset_tsn_request *req_tsn;
3951 
3952 			num_req++;
3953 			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3954 
3955 			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3956 				ret_code = 1;
3957 				goto strres_nochunk;
3958 			}
3959 			/* no more */
3960 			break;
3961 		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3962 			struct sctp_stream_reset_response *resp;
3963 			uint32_t result;
3964 
3965 			resp = (struct sctp_stream_reset_response *)ph;
3966 			seq = ntohl(resp->response_seq);
3967 			result = ntohl(resp->result);
3968 			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3969 				ret_code = 1;
3970 				goto strres_nochunk;
3971 			}
3972 		} else {
3973 			break;
3974 		}
3975 		offset += SCTP_SIZE32(param_len);
3976 		chk_length -= SCTP_SIZE32(param_len);
3977 	}
3978 	if (num_req == 0) {
3979 		/* we have no response free the stuff */
3980 		goto strres_nochunk;
3981 	}
3982 	/* ok we have a chunk to link in */
3983 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3984 	    chk,
3985 	    sctp_next);
3986 	stcb->asoc.ctrl_queue_cnt++;
3987 	return (ret_code);
3988 }
3989 
3990 /*
3991  * Handle a router or endpoints report of a packet loss, there are two ways
3992  * to handle this, either we get the whole packet and must disect it
3993  * ourselves (possibly with truncation and or corruption) or it is a summary
3994  * from a middle box that did the disectting for us.
3995  */
3996 static void
3997 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3998     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3999 {
4000 	uint32_t bottle_bw, on_queue;
4001 	uint16_t trunc_len;
4002 	unsigned int chlen;
4003 	unsigned int at;
4004 	struct sctp_chunk_desc desc;
4005 	struct sctp_chunkhdr *ch;
4006 
4007 	chlen = ntohs(cp->ch.chunk_length);
4008 	chlen -= sizeof(struct sctp_pktdrop_chunk);
4009 	/* XXX possible chlen underflow */
4010 	if (chlen == 0) {
4011 		ch = NULL;
4012 		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4013 			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4014 	} else {
4015 		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4016 		chlen -= sizeof(struct sctphdr);
4017 		/* XXX possible chlen underflow */
4018 		memset(&desc, 0, sizeof(desc));
4019 	}
4020 	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4021 	if (trunc_len > limit) {
4022 		trunc_len = limit;
4023 	}
4024 	/* now the chunks themselves */
4025 	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4026 		desc.chunk_type = ch->chunk_type;
4027 		/* get amount we need to move */
4028 		at = ntohs(ch->chunk_length);
4029 		if (at < sizeof(struct sctp_chunkhdr)) {
4030 			/* corrupt chunk, maybe at the end? */
4031 			SCTP_STAT_INCR(sctps_pdrpcrupt);
4032 			break;
4033 		}
4034 		if (trunc_len == 0) {
4035 			/* we are supposed to have all of it */
4036 			if (at > chlen) {
4037 				/* corrupt skip it */
4038 				SCTP_STAT_INCR(sctps_pdrpcrupt);
4039 				break;
4040 			}
4041 		} else {
4042 			/* is there enough of it left ? */
4043 			if (desc.chunk_type == SCTP_DATA) {
4044 				if (chlen < (sizeof(struct sctp_data_chunk) +
4045 				    sizeof(desc.data_bytes))) {
4046 					break;
4047 				}
4048 			} else {
4049 				if (chlen < sizeof(struct sctp_chunkhdr)) {
4050 					break;
4051 				}
4052 			}
4053 		}
4054 		if (desc.chunk_type == SCTP_DATA) {
4055 			/* can we get out the tsn? */
4056 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4057 				SCTP_STAT_INCR(sctps_pdrpmbda);
4058 
4059 			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4060 				/* yep */
4061 				struct sctp_data_chunk *dcp;
4062 				uint8_t *ddp;
4063 				unsigned int iii;
4064 
4065 				dcp = (struct sctp_data_chunk *)ch;
4066 				ddp = (uint8_t *) (dcp + 1);
4067 				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4068 					desc.data_bytes[iii] = ddp[iii];
4069 				}
4070 				desc.tsn_ifany = dcp->dp.tsn;
4071 			} else {
4072 				/* nope we are done. */
4073 				SCTP_STAT_INCR(sctps_pdrpnedat);
4074 				break;
4075 			}
4076 		} else {
4077 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4078 				SCTP_STAT_INCR(sctps_pdrpmbct);
4079 		}
4080 
4081 		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4082 			SCTP_STAT_INCR(sctps_pdrppdbrk);
4083 			break;
4084 		}
4085 		if (SCTP_SIZE32(at) > chlen) {
4086 			break;
4087 		}
4088 		chlen -= SCTP_SIZE32(at);
4089 		if (chlen < sizeof(struct sctp_chunkhdr)) {
4090 			/* done, none left */
4091 			break;
4092 		}
4093 		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4094 	}
4095 	/* Now update any rwnd --- possibly */
4096 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4097 		/* From a peer, we get a rwnd report */
4098 		uint32_t a_rwnd;
4099 
4100 		SCTP_STAT_INCR(sctps_pdrpfehos);
4101 
4102 		bottle_bw = ntohl(cp->bottle_bw);
4103 		on_queue = ntohl(cp->current_onq);
4104 		if (bottle_bw && on_queue) {
4105 			/* a rwnd report is in here */
4106 			if (bottle_bw > on_queue)
4107 				a_rwnd = bottle_bw - on_queue;
4108 			else
4109 				a_rwnd = 0;
4110 
4111 			if (a_rwnd == 0)
4112 				stcb->asoc.peers_rwnd = 0;
4113 			else {
4114 				if (a_rwnd > stcb->asoc.total_flight) {
4115 					stcb->asoc.peers_rwnd =
4116 					    a_rwnd - stcb->asoc.total_flight;
4117 				} else {
4118 					stcb->asoc.peers_rwnd = 0;
4119 				}
4120 				if (stcb->asoc.peers_rwnd <
4121 				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4122 					/* SWS sender side engages */
4123 					stcb->asoc.peers_rwnd = 0;
4124 				}
4125 			}
4126 		}
4127 	} else {
4128 		SCTP_STAT_INCR(sctps_pdrpfmbox);
4129 	}
4130 
4131 	/* now middle boxes in sat networks get a cwnd bump */
4132 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4133 	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4134 	    (stcb->asoc.sat_network)) {
4135 		/*
4136 		 * This is debateable but for sat networks it makes sense
4137 		 * Note if a T3 timer has went off, we will prohibit any
4138 		 * changes to cwnd until we exit the t3 loss recovery.
4139 		 */
4140 		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4141 		    net, cp, &bottle_bw, &on_queue);
4142 	}
4143 }
4144 
4145 /*
4146  * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4147  * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4148  * offset: offset into the mbuf chain to first chunkhdr - length: is the
4149  * length of the complete packet outputs: - length: modified to remaining
4150  * length after control processing - netp: modified to new sctp_nets after
4151  * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4152  * bad packet,...) otherwise return the tcb for this packet
4153  */
4154 #ifdef __GNUC__
4155 __attribute__((noinline))
4156 #endif
4157 	static struct sctp_tcb *
4158 	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4159              struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4160              struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4161              uint32_t vrf_id, uint16_t port)
4162 {
4163 	struct sctp_association *asoc;
4164 	uint32_t vtag_in;
4165 	int num_chunks = 0;	/* number of control chunks processed */
4166 	uint32_t chk_length;
4167 	int ret;
4168 	int abort_no_unlock = 0;
4169 
4170 	/*
4171 	 * How big should this be, and should it be alloc'd? Lets try the
4172 	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4173 	 * until we get into jumbo grams and such..
4174 	 */
4175 	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4176 	struct sctp_tcb *locked_tcb = stcb;
4177 	int got_auth = 0;
4178 	uint32_t auth_offset = 0, auth_len = 0;
4179 	int auth_skipped = 0;
4180 	int asconf_cnt = 0;
4181 
4182 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4183 	struct socket *so;
4184 
4185 #endif
4186 
4187 	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4188 	    iphlen, *offset, length, stcb);
4189 
4190 	/* validate chunk header length... */
4191 	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4192 		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4193 		    ntohs(ch->chunk_length));
4194 		if (locked_tcb) {
4195 			SCTP_TCB_UNLOCK(locked_tcb);
4196 		}
4197 		return (NULL);
4198 	}
4199 	/*
4200 	 * validate the verification tag
4201 	 */
4202 	vtag_in = ntohl(sh->v_tag);
4203 
4204 	if (locked_tcb) {
4205 		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4206 	}
4207 	if (ch->chunk_type == SCTP_INITIATION) {
4208 		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4209 		    ntohs(ch->chunk_length), vtag_in);
4210 		if (vtag_in != 0) {
4211 			/* protocol error- silently discard... */
4212 			SCTP_STAT_INCR(sctps_badvtag);
4213 			if (locked_tcb) {
4214 				SCTP_TCB_UNLOCK(locked_tcb);
4215 			}
4216 			return (NULL);
4217 		}
4218 	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4219 		/*
4220 		 * If there is no stcb, skip the AUTH chunk and process
4221 		 * later after a stcb is found (to validate the lookup was
4222 		 * valid.
4223 		 */
4224 		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4225 		    (stcb == NULL) &&
4226 		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4227 			/* save this chunk for later processing */
4228 			auth_skipped = 1;
4229 			auth_offset = *offset;
4230 			auth_len = ntohs(ch->chunk_length);
4231 
4232 			/* (temporarily) move past this chunk */
4233 			*offset += SCTP_SIZE32(auth_len);
4234 			if (*offset >= length) {
4235 				/* no more data left in the mbuf chain */
4236 				*offset = length;
4237 				if (locked_tcb) {
4238 					SCTP_TCB_UNLOCK(locked_tcb);
4239 				}
4240 				return (NULL);
4241 			}
4242 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4243 			    sizeof(struct sctp_chunkhdr), chunk_buf);
4244 		}
4245 		if (ch == NULL) {
4246 			/* Help */
4247 			*offset = length;
4248 			if (locked_tcb) {
4249 				SCTP_TCB_UNLOCK(locked_tcb);
4250 			}
4251 			return (NULL);
4252 		}
4253 		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4254 			goto process_control_chunks;
4255 		}
4256 		/*
4257 		 * first check if it's an ASCONF with an unknown src addr we
4258 		 * need to look inside to find the association
4259 		 */
4260 		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4261 			struct sctp_chunkhdr *asconf_ch = ch;
4262 			uint32_t asconf_offset = 0, asconf_len = 0;
4263 
4264 			/* inp's refcount may be reduced */
4265 			SCTP_INP_INCR_REF(inp);
4266 
4267 			asconf_offset = *offset;
4268 			do {
4269 				asconf_len = ntohs(asconf_ch->chunk_length);
4270 				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4271 					break;
4272 				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4273 				    *offset, sh, &inp, netp, vrf_id);
4274 				if (stcb != NULL)
4275 					break;
4276 				asconf_offset += SCTP_SIZE32(asconf_len);
4277 				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4278 				    sizeof(struct sctp_chunkhdr), chunk_buf);
4279 			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4280 			if (stcb == NULL) {
4281 				/*
4282 				 * reduce inp's refcount if not reduced in
4283 				 * sctp_findassociation_ep_asconf().
4284 				 */
4285 				SCTP_INP_DECR_REF(inp);
4286 			} else {
4287 				locked_tcb = stcb;
4288 			}
4289 
4290 			/* now go back and verify any auth chunk to be sure */
4291 			if (auth_skipped && (stcb != NULL)) {
4292 				struct sctp_auth_chunk *auth;
4293 
4294 				auth = (struct sctp_auth_chunk *)
4295 				    sctp_m_getptr(m, auth_offset,
4296 				    auth_len, chunk_buf);
4297 				got_auth = 1;
4298 				auth_skipped = 0;
4299 				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4300 				    auth_offset)) {
4301 					/* auth HMAC failed so dump it */
4302 					*offset = length;
4303 					if (locked_tcb) {
4304 						SCTP_TCB_UNLOCK(locked_tcb);
4305 					}
4306 					return (NULL);
4307 				} else {
4308 					/* remaining chunks are HMAC checked */
4309 					stcb->asoc.authenticated = 1;
4310 				}
4311 			}
4312 		}
4313 		if (stcb == NULL) {
4314 			/* no association, so it's out of the blue... */
4315 			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4316 			    vrf_id, port);
4317 			*offset = length;
4318 			if (locked_tcb) {
4319 				SCTP_TCB_UNLOCK(locked_tcb);
4320 			}
4321 			return (NULL);
4322 		}
4323 		asoc = &stcb->asoc;
4324 		/* ABORT and SHUTDOWN can use either v_tag... */
4325 		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4326 		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4327 		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4328 			if ((vtag_in == asoc->my_vtag) ||
4329 			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4330 			    (vtag_in == asoc->peer_vtag))) {
4331 				/* this is valid */
4332 			} else {
4333 				/* drop this packet... */
4334 				SCTP_STAT_INCR(sctps_badvtag);
4335 				if (locked_tcb) {
4336 					SCTP_TCB_UNLOCK(locked_tcb);
4337 				}
4338 				return (NULL);
4339 			}
4340 		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4341 			if (vtag_in != asoc->my_vtag) {
4342 				/*
4343 				 * this could be a stale SHUTDOWN-ACK or the
4344 				 * peer never got the SHUTDOWN-COMPLETE and
4345 				 * is still hung; we have started a new asoc
4346 				 * but it won't complete until the shutdown
4347 				 * is completed
4348 				 */
4349 				if (locked_tcb) {
4350 					SCTP_TCB_UNLOCK(locked_tcb);
4351 				}
4352 				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4353 				    NULL, vrf_id, port);
4354 				return (NULL);
4355 			}
4356 		} else {
4357 			/* for all other chunks, vtag must match */
4358 			if (vtag_in != asoc->my_vtag) {
4359 				/* invalid vtag... */
4360 				SCTPDBG(SCTP_DEBUG_INPUT3,
4361 				    "invalid vtag: %xh, expect %xh\n",
4362 				    vtag_in, asoc->my_vtag);
4363 				SCTP_STAT_INCR(sctps_badvtag);
4364 				if (locked_tcb) {
4365 					SCTP_TCB_UNLOCK(locked_tcb);
4366 				}
4367 				*offset = length;
4368 				return (NULL);
4369 			}
4370 		}
4371 	}			/* end if !SCTP_COOKIE_ECHO */
4372 	/*
4373 	 * process all control chunks...
4374 	 */
4375 	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4376 	/* EY */
4377 	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4378 	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4379 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4380 		/* implied cookie-ack.. we must have lost the ack */
4381 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4382 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4383 			    stcb->asoc.overall_error_count,
4384 			    0,
4385 			    SCTP_FROM_SCTP_INPUT,
4386 			    __LINE__);
4387 		}
4388 		stcb->asoc.overall_error_count = 0;
4389 		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4390 		    *netp);
4391 	}
4392 process_control_chunks:
4393 	while (IS_SCTP_CONTROL(ch)) {
4394 		/* validate chunk length */
4395 		chk_length = ntohs(ch->chunk_length);
4396 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4397 		    ch->chunk_type, chk_length);
4398 		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4399 		if (chk_length < sizeof(*ch) ||
4400 		    (*offset + (int)chk_length) > length) {
4401 			*offset = length;
4402 			if (locked_tcb) {
4403 				SCTP_TCB_UNLOCK(locked_tcb);
4404 			}
4405 			return (NULL);
4406 		}
4407 		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4408 		/*
4409 		 * INIT-ACK only gets the init ack "header" portion only
4410 		 * because we don't have to process the peer's COOKIE. All
4411 		 * others get a complete chunk.
4412 		 */
4413 		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4414 		    (ch->chunk_type == SCTP_INITIATION)) {
4415 			/* get an init-ack chunk */
4416 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4417 			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4418 			if (ch == NULL) {
4419 				*offset = length;
4420 				if (locked_tcb) {
4421 					SCTP_TCB_UNLOCK(locked_tcb);
4422 				}
4423 				return (NULL);
4424 			}
4425 		} else {
4426 			/* For cookies and all other chunks. */
4427 			if (chk_length > sizeof(chunk_buf)) {
4428 				/*
4429 				 * use just the size of the chunk buffer so
4430 				 * the front part of our chunks fit in
4431 				 * contiguous space up to the chunk buffer
4432 				 * size (508 bytes). For chunks that need to
4433 				 * get more than that they must use the
4434 				 * sctp_m_getptr() function or other means
4435 				 * (e.g. know how to parse mbuf chains).
4436 				 * Cookies do this already.
4437 				 */
4438 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4439 				    (sizeof(chunk_buf) - 4),
4440 				    chunk_buf);
4441 				if (ch == NULL) {
4442 					*offset = length;
4443 					if (locked_tcb) {
4444 						SCTP_TCB_UNLOCK(locked_tcb);
4445 					}
4446 					return (NULL);
4447 				}
4448 			} else {
4449 				/* We can fit it all */
4450 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4451 				    chk_length, chunk_buf);
4452 				if (ch == NULL) {
4453 					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4454 					*offset = length;
4455 					if (locked_tcb) {
4456 						SCTP_TCB_UNLOCK(locked_tcb);
4457 					}
4458 					return (NULL);
4459 				}
4460 			}
4461 		}
4462 		num_chunks++;
4463 		/* Save off the last place we got a control from */
4464 		if (stcb != NULL) {
4465 			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4466 				/*
4467 				 * allow last_control to be NULL if
4468 				 * ASCONF... ASCONF processing will find the
4469 				 * right net later
4470 				 */
4471 				if ((netp != NULL) && (*netp != NULL))
4472 					stcb->asoc.last_control_chunk_from = *netp;
4473 			}
4474 		}
4475 #ifdef SCTP_AUDITING_ENABLED
4476 		sctp_audit_log(0xB0, ch->chunk_type);
4477 #endif
4478 
4479 		/* check to see if this chunk required auth, but isn't */
4480 		if ((stcb != NULL) &&
4481 		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4482 		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4483 		    !stcb->asoc.authenticated) {
4484 			/* "silently" ignore */
4485 			SCTP_STAT_INCR(sctps_recvauthmissing);
4486 			goto next_chunk;
4487 		}
4488 		switch (ch->chunk_type) {
4489 		case SCTP_INITIATION:
4490 			/* must be first and only chunk */
4491 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4492 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4493 				/* We are not interested anymore? */
4494 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4495 					/*
4496 					 * collision case where we are
4497 					 * sending to them too
4498 					 */
4499 					;
4500 				} else {
4501 					if (locked_tcb) {
4502 						SCTP_TCB_UNLOCK(locked_tcb);
4503 					}
4504 					*offset = length;
4505 					return (NULL);
4506 				}
4507 			}
4508 			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4509 			    (num_chunks > 1) ||
4510 			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4511 				*offset = length;
4512 				if (locked_tcb) {
4513 					SCTP_TCB_UNLOCK(locked_tcb);
4514 				}
4515 				return (NULL);
4516 			}
4517 			if ((stcb != NULL) &&
4518 			    (SCTP_GET_STATE(&stcb->asoc) ==
4519 			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4520 				sctp_send_shutdown_ack(stcb,
4521 				    stcb->asoc.primary_destination);
4522 				*offset = length;
4523 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4524 				if (locked_tcb) {
4525 					SCTP_TCB_UNLOCK(locked_tcb);
4526 				}
4527 				return (NULL);
4528 			}
4529 			if (netp) {
4530 				sctp_handle_init(m, iphlen, *offset, sh,
4531 				    (struct sctp_init_chunk *)ch, inp,
4532 				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4533 			}
4534 			if (abort_no_unlock)
4535 				return (NULL);
4536 
4537 			*offset = length;
4538 			if (locked_tcb) {
4539 				SCTP_TCB_UNLOCK(locked_tcb);
4540 			}
4541 			return (NULL);
4542 			break;
4543 		case SCTP_PAD_CHUNK:
4544 			break;
4545 		case SCTP_INITIATION_ACK:
4546 			/* must be first and only chunk */
4547 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4548 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4549 				/* We are not interested anymore */
4550 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4551 					;
4552 				} else {
4553 					if (locked_tcb) {
4554 						SCTP_TCB_UNLOCK(locked_tcb);
4555 					}
4556 					*offset = length;
4557 					if (stcb) {
4558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4559 						so = SCTP_INP_SO(inp);
4560 						atomic_add_int(&stcb->asoc.refcnt, 1);
4561 						SCTP_TCB_UNLOCK(stcb);
4562 						SCTP_SOCKET_LOCK(so, 1);
4563 						SCTP_TCB_LOCK(stcb);
4564 						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4565 #endif
4566 						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4567 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4568 						SCTP_SOCKET_UNLOCK(so, 1);
4569 #endif
4570 					}
4571 					return (NULL);
4572 				}
4573 			}
4574 			if ((num_chunks > 1) ||
4575 			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4576 				*offset = length;
4577 				if (locked_tcb) {
4578 					SCTP_TCB_UNLOCK(locked_tcb);
4579 				}
4580 				return (NULL);
4581 			}
4582 			if ((netp) && (*netp)) {
4583 				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4584 				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4585 			} else {
4586 				ret = -1;
4587 			}
4588 			/*
4589 			 * Special case, I must call the output routine to
4590 			 * get the cookie echoed
4591 			 */
4592 			if (abort_no_unlock)
4593 				return (NULL);
4594 
4595 			if ((stcb) && ret == 0)
4596 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4597 			*offset = length;
4598 			if (locked_tcb) {
4599 				SCTP_TCB_UNLOCK(locked_tcb);
4600 			}
4601 			return (NULL);
4602 			break;
4603 		case SCTP_SELECTIVE_ACK:
4604 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4605 			SCTP_STAT_INCR(sctps_recvsacks);
4606 			{
4607 				struct sctp_sack_chunk *sack;
4608 				int abort_now = 0;
4609 				uint32_t a_rwnd, cum_ack;
4610 				uint16_t num_seg, num_dup;
4611 				uint8_t flags;
4612 				int offset_seg, offset_dup;
4613 				int nonce_sum_flag;
4614 
4615 				if (stcb == NULL) {
4616 					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4617 					break;
4618 				}
4619 				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4620 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4621 					break;
4622 				}
4623 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4624 					/*-
4625 					 * If we have sent a shutdown-ack, we will pay no
4626 					 * attention to a sack sent in to us since
4627 					 * we don't care anymore.
4628 					 */
4629 					break;
4630 				}
4631 				sack = (struct sctp_sack_chunk *)ch;
4632 				flags = ch->chunk_flags;
4633 				nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4634 				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4635 				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4636 				num_dup = ntohs(sack->sack.num_dup_tsns);
4637 				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4638 				if (sizeof(struct sctp_sack_chunk) +
4639 				    num_seg * sizeof(struct sctp_gap_ack_block) +
4640 				    num_dup * sizeof(uint32_t) != chk_length) {
4641 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4642 					break;
4643 				}
4644 				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4645 				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4646 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4647 				    cum_ack, num_seg, a_rwnd);
4648 				stcb->asoc.seen_a_sack_this_pkt = 1;
4649 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4650 				    (num_seg == 0) &&
4651 				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4652 				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4653 				    (stcb->asoc.saw_sack_with_frags == 0) &&
4654 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4655 				    ) {
4656 					/*
4657 					 * We have a SIMPLE sack having no
4658 					 * prior segments and data on sent
4659 					 * queue to be acked.. Use the
4660 					 * faster path sack processing. We
4661 					 * also allow window update sacks
4662 					 * with no missing segments to go
4663 					 * this way too.
4664 					 */
4665 					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4666 					    &abort_now);
4667 				} else {
4668 					if (netp && *netp)
4669 						sctp_handle_sack(m, offset_seg, offset_dup,
4670 						    stcb, *netp,
4671 						    num_seg, 0, num_dup, &abort_now, flags,
4672 						    cum_ack, a_rwnd);
4673 				}
4674 				if (abort_now) {
4675 					/* ABORT signal from sack processing */
4676 					*offset = length;
4677 					return (NULL);
4678 				}
4679 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4680 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4681 				    (stcb->asoc.stream_queue_cnt == 0)) {
4682 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4683 				}
4684 			}
4685 			break;
4686 			/*
4687 			 * EY - nr_sack:  If the received chunk is an
4688 			 * nr_sack chunk
4689 			 */
4690 		case SCTP_NR_SELECTIVE_ACK:
4691 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4692 			SCTP_STAT_INCR(sctps_recvsacks);
4693 			{
4694 				struct sctp_nr_sack_chunk *nr_sack;
4695 				int abort_now = 0;
4696 				uint32_t a_rwnd, cum_ack;
4697 				uint16_t num_seg, num_nr_seg, num_dup;
4698 				uint8_t flags;
4699 				int offset_seg, offset_dup;
4700 				int nonce_sum_flag;
4701 
4702 				/*
4703 				 * EY nr_sacks have not been negotiated but
4704 				 * the peer end sent an nr_sack, silently
4705 				 * discard the chunk
4706 				 */
4707 				if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
4708 				    stcb->asoc.peer_supports_nr_sack)) {
4709 					goto unknown_chunk;
4710 				}
4711 				if (stcb == NULL) {
4712 					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4713 					break;
4714 				}
4715 				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4716 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4717 					break;
4718 				}
4719 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4720 					/*-
4721 					 * If we have sent a shutdown-ack, we will pay no
4722 					 * attention to a sack sent in to us since
4723 					 * we don't care anymore.
4724 					 */
4725 					break;
4726 				}
4727 				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4728 				flags = ch->chunk_flags;
4729 				nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4730 
4731 				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4732 				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4733 				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4734 				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4735 				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4736 				if (sizeof(struct sctp_nr_sack_chunk) +
4737 				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4738 				    num_dup * sizeof(uint32_t) != chk_length) {
4739 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4740 					break;
4741 				}
4742 				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4743 				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4744 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4745 				    cum_ack, num_seg, a_rwnd);
4746 				stcb->asoc.seen_a_sack_this_pkt = 1;
4747 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4748 				    (num_seg == 0) && (num_nr_seg == 0) &&
4749 				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4750 				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4751 				    (stcb->asoc.saw_sack_with_frags == 0) &&
4752 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4753 					/*
4754 					 * We have a SIMPLE sack having no
4755 					 * prior segments and data on sent
4756 					 * queue to be acked. Use the faster
4757 					 * path sack processing. We also
4758 					 * allow window update sacks with no
4759 					 * missing segments to go this way
4760 					 * too.
4761 					 */
4762 					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4763 					    &abort_now);
4764 				} else {
4765 					if (netp && *netp)
4766 						sctp_handle_sack(m, offset_seg, offset_dup,
4767 						    stcb, *netp,
4768 						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4769 						    cum_ack, a_rwnd);
4770 				}
4771 				if (abort_now) {
4772 					/* ABORT signal from sack processing */
4773 					*offset = length;
4774 					return (NULL);
4775 				}
4776 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4777 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4778 				    (stcb->asoc.stream_queue_cnt == 0)) {
4779 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4780 				}
4781 			}
4782 			break;
4783 
4784 		case SCTP_HEARTBEAT_REQUEST:
4785 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4786 			if ((stcb) && netp && *netp) {
4787 				SCTP_STAT_INCR(sctps_recvheartbeat);
4788 				sctp_send_heartbeat_ack(stcb, m, *offset,
4789 				    chk_length, *netp);
4790 
4791 				/* He's alive so give him credit */
4792 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4793 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4794 					    stcb->asoc.overall_error_count,
4795 					    0,
4796 					    SCTP_FROM_SCTP_INPUT,
4797 					    __LINE__);
4798 				}
4799 				stcb->asoc.overall_error_count = 0;
4800 			}
4801 			break;
4802 		case SCTP_HEARTBEAT_ACK:
4803 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4804 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4805 				/* Its not ours */
4806 				*offset = length;
4807 				if (locked_tcb) {
4808 					SCTP_TCB_UNLOCK(locked_tcb);
4809 				}
4810 				return (NULL);
4811 			}
4812 			/* He's alive so give him credit */
4813 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4814 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4815 				    stcb->asoc.overall_error_count,
4816 				    0,
4817 				    SCTP_FROM_SCTP_INPUT,
4818 				    __LINE__);
4819 			}
4820 			stcb->asoc.overall_error_count = 0;
4821 			SCTP_STAT_INCR(sctps_recvheartbeatack);
4822 			if (netp && *netp)
4823 				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4824 				    stcb, *netp);
4825 			break;
4826 		case SCTP_ABORT_ASSOCIATION:
4827 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4828 			    stcb);
4829 			if ((stcb) && netp && *netp)
4830 				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4831 				    stcb, *netp);
4832 			*offset = length;
4833 			return (NULL);
4834 			break;
4835 		case SCTP_SHUTDOWN:
4836 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4837 			    stcb);
4838 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4839 				*offset = length;
4840 				if (locked_tcb) {
4841 					SCTP_TCB_UNLOCK(locked_tcb);
4842 				}
4843 				return (NULL);
4844 			}
4845 			if (netp && *netp) {
4846 				int abort_flag = 0;
4847 
4848 				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4849 				    stcb, *netp, &abort_flag);
4850 				if (abort_flag) {
4851 					*offset = length;
4852 					return (NULL);
4853 				}
4854 			}
4855 			break;
4856 		case SCTP_SHUTDOWN_ACK:
4857 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4858 			if ((stcb) && (netp) && (*netp))
4859 				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4860 			*offset = length;
4861 			return (NULL);
4862 			break;
4863 
4864 		case SCTP_OPERATION_ERROR:
4865 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4866 			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4867 
4868 				*offset = length;
4869 				return (NULL);
4870 			}
4871 			break;
4872 		case SCTP_COOKIE_ECHO:
4873 			SCTPDBG(SCTP_DEBUG_INPUT3,
4874 			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4875 			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4876 				;
4877 			} else {
4878 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4879 					/* We are not interested anymore */
4880 					*offset = length;
4881 					return (NULL);
4882 				}
4883 			}
4884 			/*
4885 			 * First are we accepting? We do this again here
4886 			 * sincen it is possible that a previous endpoint
4887 			 * WAS listening responded to a INIT-ACK and then
4888 			 * closed. We opened and bound.. and are now no
4889 			 * longer listening.
4890 			 */
4891 
4892 			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4893 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4894 				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4895 					struct mbuf *oper;
4896 					struct sctp_paramhdr *phdr;
4897 
4898 					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4899 					    0, M_DONTWAIT, 1, MT_DATA);
4900 					if (oper) {
4901 						SCTP_BUF_LEN(oper) =
4902 						    sizeof(struct sctp_paramhdr);
4903 						phdr = mtod(oper,
4904 						    struct sctp_paramhdr *);
4905 						phdr->param_type =
4906 						    htons(SCTP_CAUSE_OUT_OF_RESC);
4907 						phdr->param_length =
4908 						    htons(sizeof(struct sctp_paramhdr));
4909 					}
4910 					sctp_abort_association(inp, stcb, m,
4911 					    iphlen, sh, oper, vrf_id, port);
4912 				}
4913 				*offset = length;
4914 				return (NULL);
4915 			} else {
4916 				struct mbuf *ret_buf;
4917 				struct sctp_inpcb *linp;
4918 
4919 				if (stcb) {
4920 					linp = NULL;
4921 				} else {
4922 					linp = inp;
4923 				}
4924 
4925 				if (linp) {
4926 					SCTP_ASOC_CREATE_LOCK(linp);
4927 				}
4928 				if (netp) {
4929 					ret_buf =
4930 					    sctp_handle_cookie_echo(m, iphlen,
4931 					    *offset, sh,
4932 					    (struct sctp_cookie_echo_chunk *)ch,
4933 					    &inp, &stcb, netp,
4934 					    auth_skipped,
4935 					    auth_offset,
4936 					    auth_len,
4937 					    &locked_tcb,
4938 					    vrf_id,
4939 					    port);
4940 				} else {
4941 					ret_buf = NULL;
4942 				}
4943 				if (linp) {
4944 					SCTP_ASOC_CREATE_UNLOCK(linp);
4945 				}
4946 				if (ret_buf == NULL) {
4947 					if (locked_tcb) {
4948 						SCTP_TCB_UNLOCK(locked_tcb);
4949 					}
4950 					SCTPDBG(SCTP_DEBUG_INPUT3,
4951 					    "GAK, null buffer\n");
4952 					auth_skipped = 0;
4953 					*offset = length;
4954 					return (NULL);
4955 				}
4956 				/* if AUTH skipped, see if it verified... */
4957 				if (auth_skipped) {
4958 					got_auth = 1;
4959 					auth_skipped = 0;
4960 				}
4961 				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4962 					/*
4963 					 * Restart the timer if we have
4964 					 * pending data
4965 					 */
4966 					struct sctp_tmit_chunk *chk;
4967 
4968 					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4969 					if (chk) {
4970 						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4971 						    stcb->sctp_ep, stcb,
4972 						    chk->whoTo);
4973 					}
4974 				}
4975 			}
4976 			break;
4977 		case SCTP_COOKIE_ACK:
4978 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4979 			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4980 				if (locked_tcb) {
4981 					SCTP_TCB_UNLOCK(locked_tcb);
4982 				}
4983 				return (NULL);
4984 			}
4985 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4986 				/* We are not interested anymore */
4987 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4988 					;
4989 				} else if (stcb) {
4990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4991 					so = SCTP_INP_SO(inp);
4992 					atomic_add_int(&stcb->asoc.refcnt, 1);
4993 					SCTP_TCB_UNLOCK(stcb);
4994 					SCTP_SOCKET_LOCK(so, 1);
4995 					SCTP_TCB_LOCK(stcb);
4996 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4997 #endif
4998 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5000 					SCTP_SOCKET_UNLOCK(so, 1);
5001 #endif
5002 					*offset = length;
5003 					return (NULL);
5004 				}
5005 			}
5006 			/* He's alive so give him credit */
5007 			if ((stcb) && netp && *netp) {
5008 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5009 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5010 					    stcb->asoc.overall_error_count,
5011 					    0,
5012 					    SCTP_FROM_SCTP_INPUT,
5013 					    __LINE__);
5014 				}
5015 				stcb->asoc.overall_error_count = 0;
5016 				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5017 			}
5018 			break;
5019 		case SCTP_ECN_ECHO:
5020 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5021 			/* He's alive so give him credit */
5022 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5023 				/* Its not ours */
5024 				if (locked_tcb) {
5025 					SCTP_TCB_UNLOCK(locked_tcb);
5026 				}
5027 				*offset = length;
5028 				return (NULL);
5029 			}
5030 			if (stcb) {
5031 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5032 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5033 					    stcb->asoc.overall_error_count,
5034 					    0,
5035 					    SCTP_FROM_SCTP_INPUT,
5036 					    __LINE__);
5037 				}
5038 				stcb->asoc.overall_error_count = 0;
5039 				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5040 				    stcb);
5041 			}
5042 			break;
5043 		case SCTP_ECN_CWR:
5044 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5045 			/* He's alive so give him credit */
5046 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5047 				/* Its not ours */
5048 				if (locked_tcb) {
5049 					SCTP_TCB_UNLOCK(locked_tcb);
5050 				}
5051 				*offset = length;
5052 				return (NULL);
5053 			}
5054 			if (stcb) {
5055 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5056 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5057 					    stcb->asoc.overall_error_count,
5058 					    0,
5059 					    SCTP_FROM_SCTP_INPUT,
5060 					    __LINE__);
5061 				}
5062 				stcb->asoc.overall_error_count = 0;
5063 				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
5064 			}
5065 			break;
5066 		case SCTP_SHUTDOWN_COMPLETE:
5067 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
5068 			/* must be first and only chunk */
5069 			if ((num_chunks > 1) ||
5070 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5071 				*offset = length;
5072 				if (locked_tcb) {
5073 					SCTP_TCB_UNLOCK(locked_tcb);
5074 				}
5075 				return (NULL);
5076 			}
5077 			if ((stcb) && netp && *netp) {
5078 				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5079 				    stcb, *netp);
5080 			}
5081 			*offset = length;
5082 			return (NULL);
5083 			break;
5084 		case SCTP_ASCONF:
5085 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5086 			/* He's alive so give him credit */
5087 			if (stcb) {
5088 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5089 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5090 					    stcb->asoc.overall_error_count,
5091 					    0,
5092 					    SCTP_FROM_SCTP_INPUT,
5093 					    __LINE__);
5094 				}
5095 				stcb->asoc.overall_error_count = 0;
5096 				sctp_handle_asconf(m, *offset,
5097 				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5098 				asconf_cnt++;
5099 			}
5100 			break;
5101 		case SCTP_ASCONF_ACK:
5102 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5103 			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5104 				/* Its not ours */
5105 				if (locked_tcb) {
5106 					SCTP_TCB_UNLOCK(locked_tcb);
5107 				}
5108 				*offset = length;
5109 				return (NULL);
5110 			}
5111 			if ((stcb) && netp && *netp) {
5112 				/* He's alive so give him credit */
5113 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5114 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5115 					    stcb->asoc.overall_error_count,
5116 					    0,
5117 					    SCTP_FROM_SCTP_INPUT,
5118 					    __LINE__);
5119 				}
5120 				stcb->asoc.overall_error_count = 0;
5121 				sctp_handle_asconf_ack(m, *offset,
5122 				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5123 				if (abort_no_unlock)
5124 					return (NULL);
5125 			}
5126 			break;
5127 		case SCTP_FORWARD_CUM_TSN:
5128 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5129 			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5130 				/* Its not ours */
5131 				if (locked_tcb) {
5132 					SCTP_TCB_UNLOCK(locked_tcb);
5133 				}
5134 				*offset = length;
5135 				return (NULL);
5136 			}
5137 			/* He's alive so give him credit */
5138 			if (stcb) {
5139 				int abort_flag = 0;
5140 
5141 				stcb->asoc.overall_error_count = 0;
5142 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5143 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5144 					    stcb->asoc.overall_error_count,
5145 					    0,
5146 					    SCTP_FROM_SCTP_INPUT,
5147 					    __LINE__);
5148 				}
5149 				*fwd_tsn_seen = 1;
5150 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5151 					/* We are not interested anymore */
5152 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5153 					so = SCTP_INP_SO(inp);
5154 					atomic_add_int(&stcb->asoc.refcnt, 1);
5155 					SCTP_TCB_UNLOCK(stcb);
5156 					SCTP_SOCKET_LOCK(so, 1);
5157 					SCTP_TCB_LOCK(stcb);
5158 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5159 #endif
5160 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5161 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5162 					SCTP_SOCKET_UNLOCK(so, 1);
5163 #endif
5164 					*offset = length;
5165 					return (NULL);
5166 				}
5167 				sctp_handle_forward_tsn(stcb,
5168 				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5169 				if (abort_flag) {
5170 					*offset = length;
5171 					return (NULL);
5172 				} else {
5173 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5174 						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5175 						    stcb->asoc.overall_error_count,
5176 						    0,
5177 						    SCTP_FROM_SCTP_INPUT,
5178 						    __LINE__);
5179 					}
5180 					stcb->asoc.overall_error_count = 0;
5181 				}
5182 
5183 			}
5184 			break;
5185 		case SCTP_STREAM_RESET:
5186 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5187 			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5188 				/* Its not ours */
5189 				if (locked_tcb) {
5190 					SCTP_TCB_UNLOCK(locked_tcb);
5191 				}
5192 				*offset = length;
5193 				return (NULL);
5194 			}
5195 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5196 				/* We are not interested anymore */
5197 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5198 				so = SCTP_INP_SO(inp);
5199 				atomic_add_int(&stcb->asoc.refcnt, 1);
5200 				SCTP_TCB_UNLOCK(stcb);
5201 				SCTP_SOCKET_LOCK(so, 1);
5202 				SCTP_TCB_LOCK(stcb);
5203 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5204 #endif
5205 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5206 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5207 				SCTP_SOCKET_UNLOCK(so, 1);
5208 #endif
5209 				*offset = length;
5210 				return (NULL);
5211 			}
5212 			if (stcb->asoc.peer_supports_strreset == 0) {
5213 				/*
5214 				 * hmm, peer should have announced this, but
5215 				 * we will turn it on since he is sending us
5216 				 * a stream reset.
5217 				 */
5218 				stcb->asoc.peer_supports_strreset = 1;
5219 			}
5220 			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5221 				/* stop processing */
5222 				*offset = length;
5223 				return (NULL);
5224 			}
5225 			break;
5226 		case SCTP_PACKET_DROPPED:
5227 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5228 			/* re-get it all please */
5229 			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5230 				/* Its not ours */
5231 				if (locked_tcb) {
5232 					SCTP_TCB_UNLOCK(locked_tcb);
5233 				}
5234 				*offset = length;
5235 				return (NULL);
5236 			}
5237 			if (ch && (stcb) && netp && (*netp)) {
5238 				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5239 				    stcb, *netp,
5240 				    min(chk_length, (sizeof(chunk_buf) - 4)));
5241 
5242 			}
5243 			break;
5244 
5245 		case SCTP_AUTHENTICATION:
5246 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5247 			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5248 				goto unknown_chunk;
5249 
5250 			if (stcb == NULL) {
5251 				/* save the first AUTH for later processing */
5252 				if (auth_skipped == 0) {
5253 					auth_offset = *offset;
5254 					auth_len = chk_length;
5255 					auth_skipped = 1;
5256 				}
5257 				/* skip this chunk (temporarily) */
5258 				goto next_chunk;
5259 			}
5260 			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5261 			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5262 			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5263 				/* Its not ours */
5264 				if (locked_tcb) {
5265 					SCTP_TCB_UNLOCK(locked_tcb);
5266 				}
5267 				*offset = length;
5268 				return (NULL);
5269 			}
5270 			if (got_auth == 1) {
5271 				/* skip this chunk... it's already auth'd */
5272 				goto next_chunk;
5273 			}
5274 			got_auth = 1;
5275 			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5276 			    m, *offset)) {
5277 				/* auth HMAC failed so dump the packet */
5278 				*offset = length;
5279 				return (stcb);
5280 			} else {
5281 				/* remaining chunks are HMAC checked */
5282 				stcb->asoc.authenticated = 1;
5283 			}
5284 			break;
5285 
5286 		default:
5287 	unknown_chunk:
5288 			/* it's an unknown chunk! */
5289 			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5290 				struct mbuf *mm;
5291 				struct sctp_paramhdr *phd;
5292 
5293 				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5294 				    0, M_DONTWAIT, 1, MT_DATA);
5295 				if (mm) {
5296 					phd = mtod(mm, struct sctp_paramhdr *);
5297 					/*
5298 					 * We cheat and use param type since
5299 					 * we did not bother to define a
5300 					 * error cause struct. They are the
5301 					 * same basic format with different
5302 					 * names.
5303 					 */
5304 					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5305 					phd->param_length = htons(chk_length + sizeof(*phd));
5306 					SCTP_BUF_LEN(mm) = sizeof(*phd);
5307 					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5308 					    M_DONTWAIT);
5309 					if (SCTP_BUF_NEXT(mm)) {
5310 #ifdef SCTP_MBUF_LOGGING
5311 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5312 							struct mbuf *mat;
5313 
5314 							mat = SCTP_BUF_NEXT(mm);
5315 							while (mat) {
5316 								if (SCTP_BUF_IS_EXTENDED(mat)) {
5317 									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5318 								}
5319 								mat = SCTP_BUF_NEXT(mat);
5320 							}
5321 						}
5322 #endif
5323 						sctp_queue_op_err(stcb, mm);
5324 					} else {
5325 						sctp_m_freem(mm);
5326 					}
5327 				}
5328 			}
5329 			if ((ch->chunk_type & 0x80) == 0) {
5330 				/* discard this packet */
5331 				*offset = length;
5332 				return (stcb);
5333 			}	/* else skip this bad chunk and continue... */
5334 			break;
5335 		}		/* switch (ch->chunk_type) */
5336 
5337 
5338 next_chunk:
5339 		/* get the next chunk */
5340 		*offset += SCTP_SIZE32(chk_length);
5341 		if (*offset >= length) {
5342 			/* no more data left in the mbuf chain */
5343 			break;
5344 		}
5345 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5346 		    sizeof(struct sctp_chunkhdr), chunk_buf);
5347 		if (ch == NULL) {
5348 			if (locked_tcb) {
5349 				SCTP_TCB_UNLOCK(locked_tcb);
5350 			}
5351 			*offset = length;
5352 			return (NULL);
5353 		}
5354 	}			/* while */
5355 
5356 	if (asconf_cnt > 0 && stcb != NULL) {
5357 		sctp_send_asconf_ack(stcb);
5358 	}
5359 	return (stcb);
5360 }
5361 
5362 
5363 /*
5364  * Process the ECN bits we have something set so we must look to see if it is
5365  * ECN(0) or ECN(1) or CE
5366  */
5367 static void
5368 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
5369     uint8_t ecn_bits)
5370 {
5371 	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5372 		;
5373 	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
5374 		/*
5375 		 * we only add to the nonce sum for ECT1, ECT0 does not
5376 		 * change the NS bit (that we have yet to find a way to send
5377 		 * it yet).
5378 		 */
5379 
5380 		/* ECN Nonce stuff */
5381 		stcb->asoc.receiver_nonce_sum++;
5382 		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
5383 
5384 		/*
5385 		 * Drag up the last_echo point if cumack is larger since we
5386 		 * don't want the point falling way behind by more than
5387 		 * 2^^31 and then having it be incorrect.
5388 		 */
5389 		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5390 		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5391 			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5392 		}
5393 	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
5394 		/*
5395 		 * Drag up the last_echo point if cumack is larger since we
5396 		 * don't want the point falling way behind by more than
5397 		 * 2^^31 and then having it be incorrect.
5398 		 */
5399 		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5400 		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5401 			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5402 		}
5403 	}
5404 }
5405 
5406 static void
5407 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
5408     uint32_t high_tsn, uint8_t ecn_bits)
5409 {
5410 	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5411 		/*
5412 		 * we possibly must notify the sender that a congestion
5413 		 * window reduction is in order. We do this by adding a ECNE
5414 		 * chunk to the output chunk queue. The incoming CWR will
5415 		 * remove this chunk.
5416 		 */
5417 		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
5418 		    MAX_TSN)) {
5419 			/* Yep, we need to add a ECNE */
5420 			sctp_send_ecn_echo(stcb, net, high_tsn);
5421 			stcb->asoc.last_echo_tsn = high_tsn;
5422 		}
5423 	}
5424 }
5425 
5426 #ifdef INVARIANTS
5427 static void
5428 sctp_validate_no_locks(struct sctp_inpcb *inp)
5429 {
5430 	struct sctp_tcb *stcb;
5431 
5432 	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
5433 		if (mtx_owned(&stcb->tcb_mtx)) {
5434 			panic("Own lock on stcb at return from input");
5435 		}
5436 	}
5437 }
5438 
5439 #endif
5440 
5441 /*
5442  * common input chunk processing (v4 and v6)
5443  */
5444 void
5445 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5446     int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5447     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5448     uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5449 {
5450 	/*
5451 	 * Control chunk processing
5452 	 */
5453 	uint32_t high_tsn;
5454 	int fwd_tsn_seen = 0, data_processed = 0;
5455 	struct mbuf *m = *mm;
5456 	int abort_flag = 0;
5457 	int un_sent;
5458 
5459 	SCTP_STAT_INCR(sctps_recvdatagrams);
5460 #ifdef SCTP_AUDITING_ENABLED
5461 	sctp_audit_log(0xE0, 1);
5462 	sctp_auditing(0, inp, stcb, net);
5463 #endif
5464 
5465 	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5466 	    m, iphlen, offset, length, stcb);
5467 	if (stcb) {
5468 		/* always clear this before beginning a packet */
5469 		stcb->asoc.authenticated = 0;
5470 		stcb->asoc.seen_a_sack_this_pkt = 0;
5471 		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5472 		    stcb, stcb->asoc.state);
5473 
5474 		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5475 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5476 			/*-
5477 			 * If we hit here, we had a ref count
5478 			 * up when the assoc was aborted and the
5479 			 * timer is clearing out the assoc, we should
5480 			 * NOT respond to any packet.. its OOTB.
5481 			 */
5482 			SCTP_TCB_UNLOCK(stcb);
5483 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5484 			    vrf_id, port);
5485 			goto out_now;
5486 		}
5487 	}
5488 	if (IS_SCTP_CONTROL(ch)) {
5489 		/* process the control portion of the SCTP packet */
5490 		/* sa_ignore NO_NULL_CHK */
5491 		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5492 		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5493 		if (stcb) {
5494 			/*
5495 			 * This covers us if the cookie-echo was there and
5496 			 * it changes our INP.
5497 			 */
5498 			inp = stcb->sctp_ep;
5499 			if ((net) && (port)) {
5500 				if (net->port == 0) {
5501 					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5502 				}
5503 				net->port = port;
5504 			}
5505 		}
5506 	} else {
5507 		/*
5508 		 * no control chunks, so pre-process DATA chunks (these
5509 		 * checks are taken care of by control processing)
5510 		 */
5511 
5512 		/*
5513 		 * if DATA only packet, and auth is required, then punt...
5514 		 * can't have authenticated without any AUTH (control)
5515 		 * chunks
5516 		 */
5517 		if ((stcb != NULL) &&
5518 		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5519 		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5520 			/* "silently" ignore */
5521 			SCTP_STAT_INCR(sctps_recvauthmissing);
5522 			SCTP_TCB_UNLOCK(stcb);
5523 			goto out_now;
5524 		}
5525 		if (stcb == NULL) {
5526 			/* out of the blue DATA chunk */
5527 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5528 			    vrf_id, port);
5529 			goto out_now;
5530 		}
5531 		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5532 			/* v_tag mismatch! */
5533 			SCTP_STAT_INCR(sctps_badvtag);
5534 			SCTP_TCB_UNLOCK(stcb);
5535 			goto out_now;
5536 		}
5537 	}
5538 
5539 	if (stcb == NULL) {
5540 		/*
5541 		 * no valid TCB for this packet, or we found it's a bad
5542 		 * packet while processing control, or we're done with this
5543 		 * packet (done or skip rest of data), so we drop it...
5544 		 */
5545 		goto out_now;
5546 	}
5547 	/*
5548 	 * DATA chunk processing
5549 	 */
5550 	/* plow through the data chunks while length > offset */
5551 
5552 	/*
5553 	 * Rest should be DATA only.  Check authentication state if AUTH for
5554 	 * DATA is required.
5555 	 */
5556 	if ((length > offset) &&
5557 	    (stcb != NULL) &&
5558 	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5559 	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5560 	    !stcb->asoc.authenticated) {
5561 		/* "silently" ignore */
5562 		SCTP_STAT_INCR(sctps_recvauthmissing);
5563 		SCTPDBG(SCTP_DEBUG_AUTH1,
5564 		    "Data chunk requires AUTH, skipped\n");
5565 		goto trigger_send;
5566 	}
5567 	if (length > offset) {
5568 		int retval;
5569 
5570 		/*
5571 		 * First check to make sure our state is correct. We would
5572 		 * not get here unless we really did have a tag, so we don't
5573 		 * abort if this happens, just dump the chunk silently.
5574 		 */
5575 		switch (SCTP_GET_STATE(&stcb->asoc)) {
5576 		case SCTP_STATE_COOKIE_ECHOED:
5577 			/*
5578 			 * we consider data with valid tags in this state
5579 			 * shows us the cookie-ack was lost. Imply it was
5580 			 * there.
5581 			 */
5582 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5583 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5584 				    stcb->asoc.overall_error_count,
5585 				    0,
5586 				    SCTP_FROM_SCTP_INPUT,
5587 				    __LINE__);
5588 			}
5589 			stcb->asoc.overall_error_count = 0;
5590 			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5591 			break;
5592 		case SCTP_STATE_COOKIE_WAIT:
5593 			/*
5594 			 * We consider OOTB any data sent during asoc setup.
5595 			 */
5596 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5597 			    vrf_id, port);
5598 			SCTP_TCB_UNLOCK(stcb);
5599 			goto out_now;
5600 			/* sa_ignore NOTREACHED */
5601 			break;
5602 		case SCTP_STATE_EMPTY:	/* should not happen */
5603 		case SCTP_STATE_INUSE:	/* should not happen */
5604 		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5605 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5606 		default:
5607 			SCTP_TCB_UNLOCK(stcb);
5608 			goto out_now;
5609 			/* sa_ignore NOTREACHED */
5610 			break;
5611 		case SCTP_STATE_OPEN:
5612 		case SCTP_STATE_SHUTDOWN_SENT:
5613 			break;
5614 		}
5615 		/* take care of ECN, part 1. */
5616 		if (stcb->asoc.ecn_allowed &&
5617 		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5618 			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5619 		}
5620 		/* plow through the data chunks while length > offset */
5621 		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5622 		    inp, stcb, net, &high_tsn);
5623 		if (retval == 2) {
5624 			/*
5625 			 * The association aborted, NO UNLOCK needed since
5626 			 * the association is destroyed.
5627 			 */
5628 			goto out_now;
5629 		}
5630 		data_processed = 1;
5631 		if (retval == 0) {
5632 			/* take care of ecn part 2. */
5633 			if (stcb->asoc.ecn_allowed &&
5634 			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5635 				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5636 				    ecn_bits);
5637 			}
5638 		}
5639 		/*
5640 		 * Anything important needs to have been m_copy'ed in
5641 		 * process_data
5642 		 */
5643 	}
5644 	if ((data_processed == 0) && (fwd_tsn_seen)) {
5645 		int was_a_gap = 0;
5646 
5647 		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
5648 		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
5649 			/* there was a gap before this data was processed */
5650 			was_a_gap = 1;
5651 		}
5652 		stcb->asoc.send_sack = 1;
5653 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
5654 		if (abort_flag) {
5655 			/* Again, we aborted so NO UNLOCK needed */
5656 			goto out_now;
5657 		}
5658 	} else if (fwd_tsn_seen) {
5659 		stcb->asoc.send_sack = 1;
5660 	}
5661 	/* trigger send of any chunks in queue... */
5662 trigger_send:
5663 #ifdef SCTP_AUDITING_ENABLED
5664 	sctp_audit_log(0xE0, 2);
5665 	sctp_auditing(1, inp, stcb, net);
5666 #endif
5667 	SCTPDBG(SCTP_DEBUG_INPUT1,
5668 	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5669 	    stcb->asoc.peers_rwnd,
5670 	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5671 	    stcb->asoc.total_flight);
5672 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5673 
5674 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5675 	    ((un_sent) &&
5676 	    (stcb->asoc.peers_rwnd > 0 ||
5677 	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5678 		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5679 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5680 		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5681 	}
5682 #ifdef SCTP_AUDITING_ENABLED
5683 	sctp_audit_log(0xE0, 3);
5684 	sctp_auditing(2, inp, stcb, net);
5685 #endif
5686 	SCTP_TCB_UNLOCK(stcb);
5687 out_now:
5688 #ifdef INVARIANTS
5689 	sctp_validate_no_locks(inp);
5690 #endif
5691 	return;
5692 }
5693 
5694 #if 0
5695 static void
5696 sctp_print_mbuf_chain(struct mbuf *m)
5697 {
5698 	for (; m; m = SCTP_BUF_NEXT(m)) {
5699 		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5700 		if (SCTP_BUF_IS_EXTENDED(m))
5701 			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5702 	}
5703 }
5704 
5705 #endif
5706 
5707 void
5708 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5709 {
5710 #ifdef SCTP_MBUF_LOGGING
5711 	struct mbuf *mat;
5712 
5713 #endif
5714 	struct mbuf *m;
5715 	int iphlen;
5716 	uint32_t vrf_id = 0;
5717 	uint8_t ecn_bits;
5718 	struct ip *ip;
5719 	struct sctphdr *sh;
5720 	struct sctp_inpcb *inp = NULL;
5721 
5722 	uint32_t check, calc_check;
5723 	struct sctp_nets *net;
5724 	struct sctp_tcb *stcb = NULL;
5725 	struct sctp_chunkhdr *ch;
5726 	int refcount_up = 0;
5727 	int length, mlen, offset;
5728 
5729 	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5730 		SCTP_RELEASE_PKT(i_pak);
5731 		return;
5732 	}
5733 	mlen = SCTP_HEADER_LEN(i_pak);
5734 	iphlen = off;
5735 	m = SCTP_HEADER_TO_CHAIN(i_pak);
5736 
5737 	net = NULL;
5738 	SCTP_STAT_INCR(sctps_recvpackets);
5739 	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5740 
5741 
5742 #ifdef SCTP_MBUF_LOGGING
5743 	/* Log in any input mbufs */
5744 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5745 		mat = m;
5746 		while (mat) {
5747 			if (SCTP_BUF_IS_EXTENDED(mat)) {
5748 				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5749 			}
5750 			mat = SCTP_BUF_NEXT(mat);
5751 		}
5752 	}
5753 #endif
5754 #ifdef  SCTP_PACKET_LOGGING
5755 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5756 		sctp_packet_log(m, mlen);
5757 #endif
5758 	/*
5759 	 * Must take out the iphlen, since mlen expects this (only effect lb
5760 	 * case)
5761 	 */
5762 	mlen -= iphlen;
5763 
5764 	/*
5765 	 * Get IP, SCTP, and first chunk header together in first mbuf.
5766 	 */
5767 	ip = mtod(m, struct ip *);
5768 	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5769 	if (SCTP_BUF_LEN(m) < offset) {
5770 		if ((m = m_pullup(m, offset)) == 0) {
5771 			SCTP_STAT_INCR(sctps_hdrops);
5772 			return;
5773 		}
5774 		ip = mtod(m, struct ip *);
5775 	}
5776 	/* validate mbuf chain length with IP payload length */
5777 	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5778 		SCTP_STAT_INCR(sctps_hdrops);
5779 		goto bad;
5780 	}
5781 	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5782 	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5783 	SCTPDBG(SCTP_DEBUG_INPUT1,
5784 	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5785 
5786 	/* SCTP does not allow broadcasts or multicasts */
5787 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5788 		goto bad;
5789 	}
5790 	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5791 		/*
5792 		 * We only look at broadcast if its a front state, All
5793 		 * others we will not have a tcb for anyway.
5794 		 */
5795 		goto bad;
5796 	}
5797 	/* validate SCTP checksum */
5798 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5799 	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
5800 	    m->m_pkthdr.len,
5801 	    if_name(m->m_pkthdr.rcvif),
5802 	    m->m_pkthdr.csum_flags);
5803 	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5804 		SCTP_STAT_INCR(sctps_recvhwcrc);
5805 		goto sctp_skip_csum_4;
5806 	}
5807 	check = sh->checksum;	/* save incoming checksum */
5808 	if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
5809 	    ((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
5810 	    (SCTP_IS_IT_LOOPBACK(m)))
5811 	    ) {
5812 		SCTP_STAT_INCR(sctps_recvnocrc);
5813 		goto sctp_skip_csum_4;
5814 	}
5815 	sh->checksum = 0;	/* prepare for calc */
5816 	calc_check = sctp_calculate_cksum(m, iphlen);
5817 	sh->checksum = check;
5818 	SCTP_STAT_INCR(sctps_recvswcrc);
5819 	if (calc_check != check) {
5820 		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5821 		    calc_check, check, m, mlen, iphlen);
5822 
5823 		stcb = sctp_findassociation_addr(m, iphlen,
5824 		    offset - sizeof(*ch),
5825 		    sh, ch, &inp, &net,
5826 		    vrf_id);
5827 		if ((net) && (port)) {
5828 			if (net->port == 0) {
5829 				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5830 			}
5831 			net->port = port;
5832 		}
5833 		if ((inp) && (stcb)) {
5834 			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5835 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5836 		} else if ((inp != NULL) && (stcb == NULL)) {
5837 			refcount_up = 1;
5838 		}
5839 		SCTP_STAT_INCR(sctps_badsum);
5840 		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5841 		goto bad;
5842 	}
5843 sctp_skip_csum_4:
5844 	/* destination port of 0 is illegal, based on RFC2960. */
5845 	if (sh->dest_port == 0) {
5846 		SCTP_STAT_INCR(sctps_hdrops);
5847 		goto bad;
5848 	}
5849 	/*
5850 	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5851 	 * IP/SCTP/first chunk header...
5852 	 */
5853 	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5854 	    sh, ch, &inp, &net, vrf_id);
5855 	if ((net) && (port)) {
5856 		if (net->port == 0) {
5857 			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5858 		}
5859 		net->port = port;
5860 	}
5861 	/* inp's ref-count increased && stcb locked */
5862 	if (inp == NULL) {
5863 		struct sctp_init_chunk *init_chk, chunk_buf;
5864 
5865 		SCTP_STAT_INCR(sctps_noport);
5866 #ifdef ICMP_BANDLIM
5867 		/*
5868 		 * we use the bandwidth limiting to protect against sending
5869 		 * too many ABORTS all at once. In this case these count the
5870 		 * same as an ICMP message.
5871 		 */
5872 		if (badport_bandlim(0) < 0)
5873 			goto bad;
5874 #endif				/* ICMP_BANDLIM */
5875 		SCTPDBG(SCTP_DEBUG_INPUT1,
5876 		    "Sending a ABORT from packet entry!\n");
5877 		if (ch->chunk_type == SCTP_INITIATION) {
5878 			/*
5879 			 * we do a trick here to get the INIT tag, dig in
5880 			 * and get the tag from the INIT and put it in the
5881 			 * common header.
5882 			 */
5883 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5884 			    iphlen + sizeof(*sh), sizeof(*init_chk),
5885 			    (uint8_t *) & chunk_buf);
5886 			if (init_chk != NULL)
5887 				sh->v_tag = init_chk->init.initiate_tag;
5888 		}
5889 		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5890 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5891 			goto bad;
5892 		}
5893 		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5894 			goto bad;
5895 		}
5896 		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5897 			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5898 		goto bad;
5899 	} else if (stcb == NULL) {
5900 		refcount_up = 1;
5901 	}
5902 #ifdef IPSEC
5903 	/*
5904 	 * I very much doubt any of the IPSEC stuff will work but I have no
5905 	 * idea, so I will leave it in place.
5906 	 */
5907 	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5908 		MODULE_GLOBAL(ipsec4stat).in_polvio++;
5909 		SCTP_STAT_INCR(sctps_hdrops);
5910 		goto bad;
5911 	}
5912 #endif				/* IPSEC */
5913 
5914 	/*
5915 	 * common chunk processing
5916 	 */
5917 	length = ip->ip_len + iphlen;
5918 	offset -= sizeof(struct sctp_chunkhdr);
5919 
5920 	ecn_bits = ip->ip_tos;
5921 
5922 	/* sa_ignore NO_NULL_CHK */
5923 	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5924 	    inp, stcb, net, ecn_bits, vrf_id, port);
5925 	/* inp's ref-count reduced && stcb unlocked */
5926 	if (m) {
5927 		sctp_m_freem(m);
5928 	}
5929 	if ((inp) && (refcount_up)) {
5930 		/* reduce ref-count */
5931 		SCTP_INP_DECR_REF(inp);
5932 	}
5933 	return;
5934 bad:
5935 	if (stcb) {
5936 		SCTP_TCB_UNLOCK(stcb);
5937 	}
5938 	if ((inp) && (refcount_up)) {
5939 		/* reduce ref-count */
5940 		SCTP_INP_DECR_REF(inp);
5941 	}
5942 	if (m) {
5943 		sctp_m_freem(m);
5944 	}
5945 	return;
5946 }
5947 void
5948 sctp_input(i_pak, off)
5949 	struct mbuf *i_pak;
5950 	int off;
5951 {
5952 	sctp_input_with_port(i_pak, off, 0);
5953 }
5954