xref: /freebsd/sys/netinet/sctp_input.c (revision 54ebdd631db8c0bba2baab0155f603a8b5cf014a)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_indata.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_bsd_addr.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/udp.h>
50 
51 
52 
53 static void
54 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
55 {
56 	struct sctp_nets *net;
57 
58 	/*
59 	 * This now not only stops all cookie timers it also stops any INIT
60 	 * timers as well. This will make sure that the timers are stopped
61 	 * in all collision cases.
62 	 */
63 	SCTP_TCB_LOCK_ASSERT(stcb);
64 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
65 		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
66 			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
67 			    stcb->sctp_ep,
68 			    stcb,
69 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
70 		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
71 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
72 			    stcb->sctp_ep,
73 			    stcb,
74 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
75 		}
76 	}
77 }
78 
79 /* INIT handler */
80 static void
81 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
82     struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
83     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
84 {
85 	struct sctp_init *init;
86 	struct mbuf *op_err;
87 	uint32_t init_limit;
88 
89 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
90 	    stcb);
91 	if (stcb == NULL) {
92 		SCTP_INP_RLOCK(inp);
93 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
94 			goto outnow;
95 		}
96 	}
97 	op_err = NULL;
98 	init = &cp->init;
99 	/* First are we accepting? */
100 	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
101 		SCTPDBG(SCTP_DEBUG_INPUT2,
102 		    "sctp_handle_init: Abort, so_qlimit:%d\n",
103 		    inp->sctp_socket->so_qlimit);
104 		/*
105 		 * FIX ME ?? What about TCP model and we have a
106 		 * match/restart case? Actually no fix is needed. the lookup
107 		 * will always find the existing assoc so stcb would not be
108 		 * NULL. It may be questionable to do this since we COULD
109 		 * just send back the INIT-ACK and hope that the app did
110 		 * accept()'s by the time the COOKIE was sent. But there is
111 		 * a price to pay for COOKIE generation and I don't want to
112 		 * pay it on the chance that the app will actually do some
113 		 * accepts(). The App just looses and should NOT be in this
114 		 * state :-)
115 		 */
116 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
117 		    vrf_id, port);
118 		if (stcb)
119 			*abort_no_unlock = 1;
120 		goto outnow;
121 	}
122 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
123 		/* Invalid length */
124 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
125 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
126 		    vrf_id, port);
127 		if (stcb)
128 			*abort_no_unlock = 1;
129 		goto outnow;
130 	}
131 	/* validate parameters */
132 	if (init->initiate_tag == 0) {
133 		/* protocol error... send abort */
134 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136 		    vrf_id, port);
137 		if (stcb)
138 			*abort_no_unlock = 1;
139 		goto outnow;
140 	}
141 	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
142 		/* invalid parameter... send abort */
143 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145 		    vrf_id, port);
146 		if (stcb)
147 			*abort_no_unlock = 1;
148 		goto outnow;
149 	}
150 	if (init->num_inbound_streams == 0) {
151 		/* protocol error... send abort */
152 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
153 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
154 		    vrf_id, port);
155 		if (stcb)
156 			*abort_no_unlock = 1;
157 		goto outnow;
158 	}
159 	if (init->num_outbound_streams == 0) {
160 		/* protocol error... send abort */
161 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
162 		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
163 		    vrf_id, port);
164 		if (stcb)
165 			*abort_no_unlock = 1;
166 		goto outnow;
167 	}
168 	init_limit = offset + ntohs(cp->ch.chunk_length);
169 	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
170 	    init_limit)) {
171 		/* auth parameter(s) error... send abort */
172 		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
173 		if (stcb)
174 			*abort_no_unlock = 1;
175 		goto outnow;
176 	}
177 	/* send an INIT-ACK w/cookie */
178 	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
179 	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
180 	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
181 outnow:
182 	if (stcb == NULL) {
183 		SCTP_INP_RUNLOCK(inp);
184 	}
185 }
186 
187 /*
188  * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
189  */
190 
191 int
192 sctp_is_there_unsent_data(struct sctp_tcb *stcb)
193 {
194 	int unsent_data = 0;
195 	struct sctp_stream_queue_pending *sp;
196 	struct sctp_stream_out *strq;
197 	struct sctp_association *asoc;
198 
199 	/*
200 	 * This function returns the number of streams that have true unsent
201 	 * data on them. Note that as it looks through it will clean up any
202 	 * places that have old data that has been sent but left at top of
203 	 * stream queue.
204 	 */
205 	asoc = &stcb->asoc;
206 	SCTP_TCB_SEND_LOCK(stcb);
207 	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
208 		/* Check to see if some data queued */
209 		TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
210 	is_there_another:
211 			/* sa_ignore FREED_MEMORY */
212 			sp = TAILQ_FIRST(&strq->outqueue);
213 			if (sp == NULL) {
214 				continue;
215 			}
216 			if ((sp->msg_is_complete) &&
217 			    (sp->length == 0) &&
218 			    (sp->sender_all_done)) {
219 				/*
220 				 * We are doing differed cleanup. Last time
221 				 * through when we took all the data the
222 				 * sender_all_done was not set.
223 				 */
224 				if (sp->put_last_out == 0) {
225 					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
226 					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
227 					    sp->sender_all_done,
228 					    sp->length,
229 					    sp->msg_is_complete,
230 					    sp->put_last_out);
231 				}
232 				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
233 				TAILQ_REMOVE(&strq->outqueue, sp, next);
234 				sctp_free_remote_addr(sp->net);
235 				if (sp->data) {
236 					sctp_m_freem(sp->data);
237 					sp->data = NULL;
238 				}
239 				sctp_free_a_strmoq(stcb, sp);
240 				goto is_there_another;
241 			} else {
242 				unsent_data++;
243 				continue;
244 			}
245 		}
246 	}
247 	SCTP_TCB_SEND_UNLOCK(stcb);
248 	return (unsent_data);
249 }
250 
251 static int
252 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
253     struct sctp_nets *net)
254 {
255 	struct sctp_init *init;
256 	struct sctp_association *asoc;
257 	struct sctp_nets *lnet;
258 	unsigned int i;
259 
260 	init = &cp->init;
261 	asoc = &stcb->asoc;
262 	/* save off parameters */
263 	asoc->peer_vtag = ntohl(init->initiate_tag);
264 	asoc->peers_rwnd = ntohl(init->a_rwnd);
265 	if (TAILQ_FIRST(&asoc->nets)) {
266 		/* update any ssthresh's that may have a default */
267 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
268 			lnet->ssthresh = asoc->peers_rwnd;
269 
270 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
271 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
272 			}
273 		}
274 	}
275 	SCTP_TCB_SEND_LOCK(stcb);
276 	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
277 		unsigned int newcnt;
278 		struct sctp_stream_out *outs;
279 		struct sctp_stream_queue_pending *sp;
280 
281 		/* cut back on number of streams */
282 		newcnt = ntohs(init->num_inbound_streams);
283 		/* This if is probably not needed but I am cautious */
284 		if (asoc->strmout) {
285 			/* First make sure no data chunks are trapped */
286 			for (i = newcnt; i < asoc->pre_open_streams; i++) {
287 				outs = &asoc->strmout[i];
288 				sp = TAILQ_FIRST(&outs->outqueue);
289 				while (sp) {
290 					TAILQ_REMOVE(&outs->outqueue, sp,
291 					    next);
292 					asoc->stream_queue_cnt--;
293 					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
294 					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
295 					    sp, SCTP_SO_NOT_LOCKED);
296 					if (sp->data) {
297 						sctp_m_freem(sp->data);
298 						sp->data = NULL;
299 					}
300 					sctp_free_remote_addr(sp->net);
301 					sp->net = NULL;
302 					/* Free the chunk */
303 					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
304 					    sp, stcb);
305 
306 					sctp_free_a_strmoq(stcb, sp);
307 					/* sa_ignore FREED_MEMORY */
308 					sp = TAILQ_FIRST(&outs->outqueue);
309 				}
310 			}
311 		}
312 		/* cut back the count and abandon the upper streams */
313 		asoc->pre_open_streams = newcnt;
314 	}
315 	SCTP_TCB_SEND_UNLOCK(stcb);
316 	asoc->streamoutcnt = asoc->pre_open_streams;
317 	/* init tsn's */
318 	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
319 	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
320 	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
321 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
322 		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
323 	}
324 	/* This is the next one we expect */
325 	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
326 
327 	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
328 	/*
329 	 * EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
330 	 * like above
331 	 */
332 	asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
333 	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
334 	asoc->last_echo_tsn = asoc->asconf_seq_in;
335 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
336 	/* open the requested streams */
337 
338 	if (asoc->strmin != NULL) {
339 		/* Free the old ones */
340 		struct sctp_queued_to_read *ctl;
341 
342 		for (i = 0; i < asoc->streamincnt; i++) {
343 			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
344 			while (ctl) {
345 				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
346 				sctp_free_remote_addr(ctl->whoFrom);
347 				ctl->whoFrom = NULL;
348 				sctp_m_freem(ctl->data);
349 				ctl->data = NULL;
350 				sctp_free_a_readq(stcb, ctl);
351 				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
352 			}
353 		}
354 		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
355 	}
356 	asoc->streamincnt = ntohs(init->num_outbound_streams);
357 	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
358 		asoc->streamincnt = MAX_SCTP_STREAMS;
359 	}
360 	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
361 	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
362 	if (asoc->strmin == NULL) {
363 		/* we didn't get memory for the streams! */
364 		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
365 		return (-1);
366 	}
367 	for (i = 0; i < asoc->streamincnt; i++) {
368 		asoc->strmin[i].stream_no = i;
369 		asoc->strmin[i].last_sequence_delivered = 0xffff;
370 		/*
371 		 * U-stream ranges will be set when the cookie is unpacked.
372 		 * Or for the INIT sender they are un set (if pr-sctp not
373 		 * supported) when the INIT-ACK arrives.
374 		 */
375 		TAILQ_INIT(&asoc->strmin[i].inqueue);
376 		asoc->strmin[i].delivery_started = 0;
377 	}
378 	/*
379 	 * load_address_from_init will put the addresses into the
380 	 * association when the COOKIE is processed or the INIT-ACK is
381 	 * processed. Both types of COOKIE's existing and new call this
382 	 * routine. It will remove addresses that are no longer in the
383 	 * association (for the restarting case where addresses are
384 	 * removed). Up front when the INIT arrives we will discard it if it
385 	 * is a restart and new addresses have been added.
386 	 */
387 	/* sa_ignore MEMLEAK */
388 	return (0);
389 }
390 
391 /*
392  * INIT-ACK message processing/consumption returns value < 0 on error
393  */
394 static int
395 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
396     struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
397     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
398 {
399 	struct sctp_association *asoc;
400 	struct mbuf *op_err;
401 	int retval, abort_flag;
402 	uint32_t initack_limit;
403 	int nat_friendly = 0;
404 
405 	/* First verify that we have no illegal param's */
406 	abort_flag = 0;
407 	op_err = NULL;
408 
409 	op_err = sctp_arethere_unrecognized_parameters(m,
410 	    (offset + sizeof(struct sctp_init_chunk)),
411 	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
412 	if (abort_flag) {
413 		/* Send an abort and notify peer */
414 		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
415 		*abort_no_unlock = 1;
416 		return (-1);
417 	}
418 	asoc = &stcb->asoc;
419 	asoc->peer_supports_nat = (uint8_t) nat_friendly;
420 	/* process the peer's parameters in the INIT-ACK */
421 	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
422 	if (retval < 0) {
423 		return (retval);
424 	}
425 	initack_limit = offset + ntohs(cp->ch.chunk_length);
426 	/* load all addresses */
427 	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
428 	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
429 	    NULL))) {
430 		/* Huh, we should abort */
431 		SCTPDBG(SCTP_DEBUG_INPUT1,
432 		    "Load addresses from INIT causes an abort %d\n",
433 		    retval);
434 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
435 		    NULL, 0, net->port);
436 		*abort_no_unlock = 1;
437 		return (-1);
438 	}
439 	/* if the peer doesn't support asconf, flush the asconf queue */
440 	if (asoc->peer_supports_asconf == 0) {
441 		struct sctp_asconf_addr *aparam;
442 
443 		while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
444 			/* sa_ignore FREED_MEMORY */
445 			aparam = TAILQ_FIRST(&asoc->asconf_queue);
446 			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
447 			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
448 		}
449 	}
450 	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
451 	    stcb->asoc.local_hmacs);
452 	if (op_err) {
453 		sctp_queue_op_err(stcb, op_err);
454 		/* queuing will steal away the mbuf chain to the out queue */
455 		op_err = NULL;
456 	}
457 	/* extract the cookie and queue it to "echo" it back... */
458 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
459 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
460 		    stcb->asoc.overall_error_count,
461 		    0,
462 		    SCTP_FROM_SCTP_INPUT,
463 		    __LINE__);
464 	}
465 	stcb->asoc.overall_error_count = 0;
466 	net->error_count = 0;
467 
468 	/*
469 	 * Cancel the INIT timer, We do this first before queueing the
470 	 * cookie. We always cancel at the primary to assue that we are
471 	 * canceling the timer started by the INIT which always goes to the
472 	 * primary.
473 	 */
474 	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
475 	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
476 
477 	/* calculate the RTO */
478 	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
479 
480 	retval = sctp_send_cookie_echo(m, offset, stcb, net);
481 	if (retval < 0) {
482 		/*
483 		 * No cookie, we probably should send a op error. But in any
484 		 * case if there is no cookie in the INIT-ACK, we can
485 		 * abandon the peer, its broke.
486 		 */
487 		if (retval == -3) {
488 			/* We abort with an error of missing mandatory param */
489 			op_err =
490 			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
491 			if (op_err) {
492 				/*
493 				 * Expand beyond to include the mandatory
494 				 * param cookie
495 				 */
496 				struct sctp_inv_mandatory_param *mp;
497 
498 				SCTP_BUF_LEN(op_err) =
499 				    sizeof(struct sctp_inv_mandatory_param);
500 				mp = mtod(op_err,
501 				    struct sctp_inv_mandatory_param *);
502 				/* Subtract the reserved param */
503 				mp->length =
504 				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
505 				mp->num_param = htonl(1);
506 				mp->param = htons(SCTP_STATE_COOKIE);
507 				mp->resv = 0;
508 			}
509 			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
510 			    sh, op_err, 0, net->port);
511 			*abort_no_unlock = 1;
512 		}
513 		return (retval);
514 	}
515 	return (0);
516 }
517 
518 static void
519 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
520     struct sctp_tcb *stcb, struct sctp_nets *net)
521 {
522 	struct sockaddr_storage store;
523 	struct sockaddr_in *sin;
524 	struct sockaddr_in6 *sin6;
525 	struct sctp_nets *r_net;
526 	struct timeval tv;
527 	int req_prim = 0;
528 
529 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
530 		/* Invalid length */
531 		return;
532 	}
533 	sin = (struct sockaddr_in *)&store;
534 	sin6 = (struct sockaddr_in6 *)&store;
535 
536 	memset(&store, 0, sizeof(store));
537 	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
538 	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
539 		sin->sin_family = cp->heartbeat.hb_info.addr_family;
540 		sin->sin_len = cp->heartbeat.hb_info.addr_len;
541 		sin->sin_port = stcb->rport;
542 		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
543 		    sizeof(sin->sin_addr));
544 	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
545 	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
546 		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
547 		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
548 		sin6->sin6_port = stcb->rport;
549 		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
550 		    sizeof(sin6->sin6_addr));
551 	} else {
552 		return;
553 	}
554 	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
555 	if (r_net == NULL) {
556 		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
557 		return;
558 	}
559 	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
560 	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
561 	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
562 		/*
563 		 * If the its a HB and it's random value is correct when can
564 		 * confirm the destination.
565 		 */
566 		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
567 		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
568 			stcb->asoc.primary_destination = r_net;
569 			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
570 			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
571 			r_net = TAILQ_FIRST(&stcb->asoc.nets);
572 			if (r_net != stcb->asoc.primary_destination) {
573 				/*
574 				 * first one on the list is NOT the primary
575 				 * sctp_cmpaddr() is much more efficent if
576 				 * the primary is the first on the list,
577 				 * make it so.
578 				 */
579 				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
580 				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
581 			}
582 			req_prim = 1;
583 		}
584 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
585 		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
586 	}
587 	r_net->error_count = 0;
588 	r_net->hb_responded = 1;
589 	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
590 	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
591 	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
592 		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
593 		r_net->dest_state |= SCTP_ADDR_REACHABLE;
594 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
595 		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
596 		/* now was it the primary? if so restore */
597 		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
598 			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
599 		}
600 	}
601 	/*
602 	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
603 	 * set the destination to active state and set the cwnd to one or
604 	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
605 	 * timer is running, for the destination, stop the timer because a
606 	 * PF-heartbeat was received.
607 	 */
608 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
609 	    SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
610 	    (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
611 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
612 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
613 			    stcb, net,
614 			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
615 		}
616 		net->dest_state &= ~SCTP_ADDR_PF;
617 		net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
618 		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
619 		    net, net->cwnd);
620 	}
621 	/* Now lets do a RTO with this */
622 	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
623 	/* Mobility adaptation */
624 	if (req_prim) {
625 		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
626 		    SCTP_MOBILITY_BASE) ||
627 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
628 		    SCTP_MOBILITY_FASTHANDOFF)) &&
629 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
630 		    SCTP_MOBILITY_PRIM_DELETED)) {
631 
632 			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
633 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
634 			    SCTP_MOBILITY_FASTHANDOFF)) {
635 				sctp_assoc_immediate_retrans(stcb,
636 				    stcb->asoc.primary_destination);
637 			}
638 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
639 			    SCTP_MOBILITY_BASE)) {
640 				sctp_move_chunks_from_deleted_prim(stcb,
641 				    stcb->asoc.primary_destination);
642 			}
643 			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
644 			    stcb->asoc.deleted_primary);
645 		}
646 	}
647 }
648 
649 static int
650 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
651 {
652 	/*
653 	 * return 0 means we want you to proceed with the abort non-zero
654 	 * means no abort processing
655 	 */
656 	struct sctpasochead *head;
657 
658 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
659 		/* generate a new vtag and send init */
660 		LIST_REMOVE(stcb, sctp_asocs);
661 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
662 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
663 		/*
664 		 * put it in the bucket in the vtag hash of assoc's for the
665 		 * system
666 		 */
667 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
668 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
669 		return (1);
670 	}
671 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
672 		/*
673 		 * treat like a case where the cookie expired i.e.: - dump
674 		 * current cookie. - generate a new vtag. - resend init.
675 		 */
676 		/* generate a new vtag and send init */
677 		LIST_REMOVE(stcb, sctp_asocs);
678 		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
679 		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
680 		sctp_stop_all_cookie_timers(stcb);
681 		sctp_toss_old_cookies(stcb, &stcb->asoc);
682 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
683 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
684 		/*
685 		 * put it in the bucket in the vtag hash of assoc's for the
686 		 * system
687 		 */
688 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
689 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
690 		return (1);
691 	}
692 	return (0);
693 }
694 
695 static int
696 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
697     struct sctp_nets *net)
698 {
699 	/*
700 	 * return 0 means we want you to proceed with the abort non-zero
701 	 * means no abort processing
702 	 */
703 	if (stcb->asoc.peer_supports_auth == 0) {
704 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
705 		return (0);
706 	}
707 	sctp_asconf_send_nat_state_update(stcb, net);
708 	return (1);
709 }
710 
711 
712 static void
713 sctp_handle_abort(struct sctp_abort_chunk *cp,
714     struct sctp_tcb *stcb, struct sctp_nets *net)
715 {
716 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
717 	struct socket *so;
718 
719 #endif
720 	uint16_t len;
721 
722 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
723 	if (stcb == NULL)
724 		return;
725 
726 	len = ntohs(cp->ch.chunk_length);
727 	if (len > sizeof(struct sctp_chunkhdr)) {
728 		/*
729 		 * Need to check the cause codes for our two magic nat
730 		 * aborts which don't kill the assoc necessarily.
731 		 */
732 		struct sctp_abort_chunk *cpnext;
733 		struct sctp_missing_nat_state *natc;
734 		uint16_t cause;
735 
736 		cpnext = cp;
737 		cpnext++;
738 		natc = (struct sctp_missing_nat_state *)cpnext;
739 		cause = ntohs(natc->cause);
740 		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
741 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
742 			    cp->ch.chunk_flags);
743 			if (sctp_handle_nat_colliding_state(stcb)) {
744 				return;
745 			}
746 		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
747 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
748 			    cp->ch.chunk_flags);
749 			if (sctp_handle_nat_missing_state(stcb, net)) {
750 				return;
751 			}
752 		}
753 	}
754 	/* stop any receive timers */
755 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
756 	/* notify user of the abort and clean up... */
757 	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
758 	/* free the tcb */
759 #if defined(SCTP_PANIC_ON_ABORT)
760 	printf("stcb:%p state:%d rport:%d net:%p\n",
761 	    stcb, stcb->asoc.state, stcb->rport, net);
762 	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
763 		panic("Received an ABORT");
764 	} else {
765 		printf("No panic its in state %x closed\n", stcb->asoc.state);
766 	}
767 #endif
768 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
769 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
770 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
771 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
772 	}
773 #ifdef SCTP_ASOCLOG_OF_TSNS
774 	sctp_print_out_track_log(stcb);
775 #endif
776 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
777 	so = SCTP_INP_SO(stcb->sctp_ep);
778 	atomic_add_int(&stcb->asoc.refcnt, 1);
779 	SCTP_TCB_UNLOCK(stcb);
780 	SCTP_SOCKET_LOCK(so, 1);
781 	SCTP_TCB_LOCK(stcb);
782 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
783 #endif
784 	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
785 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
786 	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
788 	SCTP_SOCKET_UNLOCK(so, 1);
789 #endif
790 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
791 }
792 
793 static void
794 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
795     struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
796 {
797 	struct sctp_association *asoc;
798 	int some_on_streamwheel;
799 
800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
801 	struct socket *so;
802 
803 #endif
804 
805 	SCTPDBG(SCTP_DEBUG_INPUT2,
806 	    "sctp_handle_shutdown: handling SHUTDOWN\n");
807 	if (stcb == NULL)
808 		return;
809 	asoc = &stcb->asoc;
810 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
811 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
812 		return;
813 	}
814 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
815 		/* Shutdown NOT the expected size */
816 		return;
817 	} else {
818 		sctp_update_acked(stcb, cp, net, abort_flag);
819 	}
820 	if (asoc->control_pdapi) {
821 		/*
822 		 * With a normal shutdown we assume the end of last record.
823 		 */
824 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
825 		asoc->control_pdapi->end_added = 1;
826 		asoc->control_pdapi->pdapi_aborted = 1;
827 		asoc->control_pdapi = NULL;
828 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
829 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
830 		so = SCTP_INP_SO(stcb->sctp_ep);
831 		atomic_add_int(&stcb->asoc.refcnt, 1);
832 		SCTP_TCB_UNLOCK(stcb);
833 		SCTP_SOCKET_LOCK(so, 1);
834 		SCTP_TCB_LOCK(stcb);
835 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
836 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
837 			/* assoc was freed while we were unlocked */
838 			SCTP_SOCKET_UNLOCK(so, 1);
839 			return;
840 		}
841 #endif
842 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
844 		SCTP_SOCKET_UNLOCK(so, 1);
845 #endif
846 	}
847 	/* goto SHUTDOWN_RECEIVED state to block new requests */
848 	if (stcb->sctp_socket) {
849 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
850 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
851 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
852 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
853 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
854 			/*
855 			 * notify upper layer that peer has initiated a
856 			 * shutdown
857 			 */
858 			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
859 
860 			/* reset time */
861 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
862 		}
863 	}
864 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
865 		/*
866 		 * stop the shutdown timer, since we WILL move to
867 		 * SHUTDOWN-ACK-SENT.
868 		 */
869 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
870 	}
871 	/* Now is there unsent data on a stream somewhere? */
872 	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
873 
874 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
875 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
876 	    some_on_streamwheel) {
877 		/* By returning we will push more data out */
878 		return;
879 	} else {
880 		/* no outstanding data to send, so move on... */
881 		/* send SHUTDOWN-ACK */
882 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
883 		/* move to SHUTDOWN-ACK-SENT state */
884 		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
885 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
886 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
887 		}
888 		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
889 		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
890 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
891 		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
892 		/* start SHUTDOWN timer */
893 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
894 		    stcb, net);
895 	}
896 }
897 
898 static void
899 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
900     struct sctp_tcb *stcb, struct sctp_nets *net)
901 {
902 	struct sctp_association *asoc;
903 
904 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
905 	struct socket *so;
906 
907 	so = SCTP_INP_SO(stcb->sctp_ep);
908 #endif
909 	SCTPDBG(SCTP_DEBUG_INPUT2,
910 	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
911 	if (stcb == NULL)
912 		return;
913 
914 	asoc = &stcb->asoc;
915 	/* process according to association state */
916 	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
917 	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
918 		/* unexpected SHUTDOWN-ACK... so ignore... */
919 		SCTP_TCB_UNLOCK(stcb);
920 		return;
921 	}
922 	if (asoc->control_pdapi) {
923 		/*
924 		 * With a normal shutdown we assume the end of last record.
925 		 */
926 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
927 		asoc->control_pdapi->end_added = 1;
928 		asoc->control_pdapi->pdapi_aborted = 1;
929 		asoc->control_pdapi = NULL;
930 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
931 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
932 		atomic_add_int(&stcb->asoc.refcnt, 1);
933 		SCTP_TCB_UNLOCK(stcb);
934 		SCTP_SOCKET_LOCK(so, 1);
935 		SCTP_TCB_LOCK(stcb);
936 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
937 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
938 			/* assoc was freed while we were unlocked */
939 			SCTP_SOCKET_UNLOCK(so, 1);
940 			return;
941 		}
942 #endif
943 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
944 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
945 		SCTP_SOCKET_UNLOCK(so, 1);
946 #endif
947 	}
948 	/* are the queues empty? */
949 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
950 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
951 	    !TAILQ_EMPTY(&asoc->out_wheel)) {
952 		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
953 	}
954 	/* stop the timer */
955 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
956 	/* send SHUTDOWN-COMPLETE */
957 	sctp_send_shutdown_complete(stcb, net);
958 	/* notify upper layer protocol */
959 	if (stcb->sctp_socket) {
960 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
961 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
962 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
963 			/* Set the connected flag to disconnected */
964 			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
965 		}
966 	}
967 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
968 	/* free the TCB but first save off the ep */
969 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
970 	atomic_add_int(&stcb->asoc.refcnt, 1);
971 	SCTP_TCB_UNLOCK(stcb);
972 	SCTP_SOCKET_LOCK(so, 1);
973 	SCTP_TCB_LOCK(stcb);
974 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
975 #endif
976 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
977 	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
978 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
979 	SCTP_SOCKET_UNLOCK(so, 1);
980 #endif
981 }
982 
983 /*
984  * Skip past the param header and then we will find the chunk that caused the
985  * problem. There are two possiblities ASCONF or FWD-TSN other than that and
986  * our peer must be broken.
987  */
988 static void
989 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
990     struct sctp_nets *net)
991 {
992 	struct sctp_chunkhdr *chk;
993 
994 	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
995 	switch (chk->chunk_type) {
996 	case SCTP_ASCONF_ACK:
997 	case SCTP_ASCONF:
998 		sctp_asconf_cleanup(stcb, net);
999 		break;
1000 	case SCTP_FORWARD_CUM_TSN:
1001 		stcb->asoc.peer_supports_prsctp = 0;
1002 		break;
1003 	default:
1004 		SCTPDBG(SCTP_DEBUG_INPUT2,
1005 		    "Peer does not support chunk type %d(%x)??\n",
1006 		    chk->chunk_type, (uint32_t) chk->chunk_type);
1007 		break;
1008 	}
1009 }
1010 
1011 /*
1012  * Skip past the param header and then we will find the param that caused the
1013  * problem.  There are a number of param's in a ASCONF OR the prsctp param
1014  * these will turn of specific features.
1015  */
1016 static void
1017 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1018 {
1019 	struct sctp_paramhdr *pbad;
1020 
1021 	pbad = phdr + 1;
1022 	switch (ntohs(pbad->param_type)) {
1023 		/* pr-sctp draft */
1024 	case SCTP_PRSCTP_SUPPORTED:
1025 		stcb->asoc.peer_supports_prsctp = 0;
1026 		break;
1027 	case SCTP_SUPPORTED_CHUNK_EXT:
1028 		break;
1029 		/* draft-ietf-tsvwg-addip-sctp */
1030 	case SCTP_HAS_NAT_SUPPORT:
1031 		stcb->asoc.peer_supports_nat = 0;
1032 		break;
1033 	case SCTP_ECN_NONCE_SUPPORTED:
1034 		stcb->asoc.peer_supports_ecn_nonce = 0;
1035 		stcb->asoc.ecn_nonce_allowed = 0;
1036 		stcb->asoc.ecn_allowed = 0;
1037 		break;
1038 	case SCTP_ADD_IP_ADDRESS:
1039 	case SCTP_DEL_IP_ADDRESS:
1040 	case SCTP_SET_PRIM_ADDR:
1041 		stcb->asoc.peer_supports_asconf = 0;
1042 		break;
1043 	case SCTP_SUCCESS_REPORT:
1044 	case SCTP_ERROR_CAUSE_IND:
1045 		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1046 		SCTPDBG(SCTP_DEBUG_INPUT2,
1047 		    "Turning off ASCONF to this strange peer\n");
1048 		stcb->asoc.peer_supports_asconf = 0;
1049 		break;
1050 	default:
1051 		SCTPDBG(SCTP_DEBUG_INPUT2,
1052 		    "Peer does not support param type %d(%x)??\n",
1053 		    pbad->param_type, (uint32_t) pbad->param_type);
1054 		break;
1055 	}
1056 }
1057 
1058 static int
1059 sctp_handle_error(struct sctp_chunkhdr *ch,
1060     struct sctp_tcb *stcb, struct sctp_nets *net)
1061 {
1062 	int chklen;
1063 	struct sctp_paramhdr *phdr;
1064 	uint16_t error_type;
1065 	uint16_t error_len;
1066 	struct sctp_association *asoc;
1067 	int adjust;
1068 
1069 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1070 	struct socket *so;
1071 
1072 #endif
1073 
1074 	/* parse through all of the errors and process */
1075 	asoc = &stcb->asoc;
1076 	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1077 	    sizeof(struct sctp_chunkhdr));
1078 	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1079 	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1080 		/* Process an Error Cause */
1081 		error_type = ntohs(phdr->param_type);
1082 		error_len = ntohs(phdr->param_length);
1083 		if ((error_len > chklen) || (error_len == 0)) {
1084 			/* invalid param length for this param */
1085 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1086 			    chklen, error_len);
1087 			return (0);
1088 		}
1089 		switch (error_type) {
1090 		case SCTP_CAUSE_INVALID_STREAM:
1091 		case SCTP_CAUSE_MISSING_PARAM:
1092 		case SCTP_CAUSE_INVALID_PARAM:
1093 		case SCTP_CAUSE_NO_USER_DATA:
1094 			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1095 			    error_type);
1096 			break;
1097 		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1098 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1099 			    ch->chunk_flags);
1100 			if (sctp_handle_nat_colliding_state(stcb)) {
1101 				return (0);
1102 			}
1103 			break;
1104 		case SCTP_CAUSE_NAT_MISSING_STATE:
1105 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1106 			    ch->chunk_flags);
1107 			if (sctp_handle_nat_missing_state(stcb, net)) {
1108 				return (0);
1109 			}
1110 			break;
1111 		case SCTP_CAUSE_STALE_COOKIE:
1112 			/*
1113 			 * We only act if we have echoed a cookie and are
1114 			 * waiting.
1115 			 */
1116 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1117 				int *p;
1118 
1119 				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1120 				/* Save the time doubled */
1121 				asoc->cookie_preserve_req = ntohl(*p) << 1;
1122 				asoc->stale_cookie_count++;
1123 				if (asoc->stale_cookie_count >
1124 				    asoc->max_init_times) {
1125 					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1126 					/* now free the asoc */
1127 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1128 					so = SCTP_INP_SO(stcb->sctp_ep);
1129 					atomic_add_int(&stcb->asoc.refcnt, 1);
1130 					SCTP_TCB_UNLOCK(stcb);
1131 					SCTP_SOCKET_LOCK(so, 1);
1132 					SCTP_TCB_LOCK(stcb);
1133 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1134 #endif
1135 					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1136 					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1137 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1138 					SCTP_SOCKET_UNLOCK(so, 1);
1139 #endif
1140 					return (-1);
1141 				}
1142 				/* blast back to INIT state */
1143 				sctp_toss_old_cookies(stcb, &stcb->asoc);
1144 				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1145 				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1146 				sctp_stop_all_cookie_timers(stcb);
1147 				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1148 			}
1149 			break;
1150 		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1151 			/*
1152 			 * Nothing we can do here, we don't do hostname
1153 			 * addresses so if the peer does not like my IPv6
1154 			 * (or IPv4 for that matter) it does not matter. If
1155 			 * they don't support that type of address, they can
1156 			 * NOT possibly get that packet type... i.e. with no
1157 			 * IPv6 you can't recieve a IPv6 packet. so we can
1158 			 * safely ignore this one. If we ever added support
1159 			 * for HOSTNAME Addresses, then we would need to do
1160 			 * something here.
1161 			 */
1162 			break;
1163 		case SCTP_CAUSE_UNRECOG_CHUNK:
1164 			sctp_process_unrecog_chunk(stcb, phdr, net);
1165 			break;
1166 		case SCTP_CAUSE_UNRECOG_PARAM:
1167 			sctp_process_unrecog_param(stcb, phdr);
1168 			break;
1169 		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1170 			/*
1171 			 * We ignore this since the timer will drive out a
1172 			 * new cookie anyway and there timer will drive us
1173 			 * to send a SHUTDOWN_COMPLETE. We can't send one
1174 			 * here since we don't have their tag.
1175 			 */
1176 			break;
1177 		case SCTP_CAUSE_DELETING_LAST_ADDR:
1178 		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1179 		case SCTP_CAUSE_DELETING_SRC_ADDR:
1180 			/*
1181 			 * We should NOT get these here, but in a
1182 			 * ASCONF-ACK.
1183 			 */
1184 			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1185 			    error_type);
1186 			break;
1187 		case SCTP_CAUSE_OUT_OF_RESC:
1188 			/*
1189 			 * And what, pray tell do we do with the fact that
1190 			 * the peer is out of resources? Not really sure we
1191 			 * could do anything but abort. I suspect this
1192 			 * should have came WITH an abort instead of in a
1193 			 * OP-ERROR.
1194 			 */
1195 			break;
1196 		default:
1197 			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1198 			    error_type);
1199 			break;
1200 		}
1201 		adjust = SCTP_SIZE32(error_len);
1202 		chklen -= adjust;
1203 		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1204 	}
1205 	return (0);
1206 }
1207 
1208 static int
1209 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1210     struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1211     struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1212 {
1213 	struct sctp_init_ack *init_ack;
1214 	struct mbuf *op_err;
1215 
1216 	SCTPDBG(SCTP_DEBUG_INPUT2,
1217 	    "sctp_handle_init_ack: handling INIT-ACK\n");
1218 
1219 	if (stcb == NULL) {
1220 		SCTPDBG(SCTP_DEBUG_INPUT2,
1221 		    "sctp_handle_init_ack: TCB is null\n");
1222 		return (-1);
1223 	}
1224 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1225 		/* Invalid length */
1226 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1227 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1228 		    op_err, 0, net->port);
1229 		*abort_no_unlock = 1;
1230 		return (-1);
1231 	}
1232 	init_ack = &cp->init;
1233 	/* validate parameters */
1234 	if (init_ack->initiate_tag == 0) {
1235 		/* protocol error... send an abort */
1236 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1237 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1238 		    op_err, 0, net->port);
1239 		*abort_no_unlock = 1;
1240 		return (-1);
1241 	}
1242 	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1243 		/* protocol error... send an abort */
1244 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1245 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1246 		    op_err, 0, net->port);
1247 		*abort_no_unlock = 1;
1248 		return (-1);
1249 	}
1250 	if (init_ack->num_inbound_streams == 0) {
1251 		/* protocol error... send an abort */
1252 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1253 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1254 		    op_err, 0, net->port);
1255 		*abort_no_unlock = 1;
1256 		return (-1);
1257 	}
1258 	if (init_ack->num_outbound_streams == 0) {
1259 		/* protocol error... send an abort */
1260 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1261 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1262 		    op_err, 0, net->port);
1263 		*abort_no_unlock = 1;
1264 		return (-1);
1265 	}
1266 	/* process according to association state... */
1267 	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1268 	case SCTP_STATE_COOKIE_WAIT:
1269 		/* this is the expected state for this chunk */
1270 		/* process the INIT-ACK parameters */
1271 		if (stcb->asoc.primary_destination->dest_state &
1272 		    SCTP_ADDR_UNCONFIRMED) {
1273 			/*
1274 			 * The primary is where we sent the INIT, we can
1275 			 * always consider it confirmed when the INIT-ACK is
1276 			 * returned. Do this before we load addresses
1277 			 * though.
1278 			 */
1279 			stcb->asoc.primary_destination->dest_state &=
1280 			    ~SCTP_ADDR_UNCONFIRMED;
1281 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1282 			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1283 		}
1284 		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1285 		    net, abort_no_unlock, vrf_id) < 0) {
1286 			/* error in parsing parameters */
1287 			return (-1);
1288 		}
1289 		/* update our state */
1290 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1291 		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1292 
1293 		/* reset the RTO calc */
1294 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1295 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1296 			    stcb->asoc.overall_error_count,
1297 			    0,
1298 			    SCTP_FROM_SCTP_INPUT,
1299 			    __LINE__);
1300 		}
1301 		stcb->asoc.overall_error_count = 0;
1302 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1303 		/*
1304 		 * collapse the init timer back in case of a exponential
1305 		 * backoff
1306 		 */
1307 		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1308 		    stcb, net);
1309 		/*
1310 		 * the send at the end of the inbound data processing will
1311 		 * cause the cookie to be sent
1312 		 */
1313 		break;
1314 	case SCTP_STATE_SHUTDOWN_SENT:
1315 		/* incorrect state... discard */
1316 		break;
1317 	case SCTP_STATE_COOKIE_ECHOED:
1318 		/* incorrect state... discard */
1319 		break;
1320 	case SCTP_STATE_OPEN:
1321 		/* incorrect state... discard */
1322 		break;
1323 	case SCTP_STATE_EMPTY:
1324 	case SCTP_STATE_INUSE:
1325 	default:
1326 		/* incorrect state... discard */
1327 		return (-1);
1328 		break;
1329 	}
1330 	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1331 	return (0);
1332 }
1333 
1334 static struct sctp_tcb *
1335 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1336     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1337     struct sctp_inpcb *inp, struct sctp_nets **netp,
1338     struct sockaddr *init_src, int *notification,
1339     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1340     uint32_t vrf_id, uint16_t port);
1341 
1342 
1343 /*
1344  * handle a state cookie for an existing association m: input packet mbuf
1345  * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1346  * "split" mbuf and the cookie signature does not exist offset: offset into
1347  * mbuf to the cookie-echo chunk
1348  */
1349 static struct sctp_tcb *
1350 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1351     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1352     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1353     struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1354     uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1355 {
1356 	struct sctp_association *asoc;
1357 	struct sctp_init_chunk *init_cp, init_buf;
1358 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1359 	struct sctp_nets *net;
1360 	struct mbuf *op_err;
1361 	struct sctp_paramhdr *ph;
1362 	int chk_length;
1363 	int init_offset, initack_offset, i;
1364 	int retval;
1365 	int spec_flag = 0;
1366 	uint32_t how_indx;
1367 
1368 	net = *netp;
1369 	/* I know that the TCB is non-NULL from the caller */
1370 	asoc = &stcb->asoc;
1371 	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1372 		if (asoc->cookie_how[how_indx] == 0)
1373 			break;
1374 	}
1375 	if (how_indx < sizeof(asoc->cookie_how)) {
1376 		asoc->cookie_how[how_indx] = 1;
1377 	}
1378 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1379 		/* SHUTDOWN came in after sending INIT-ACK */
1380 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1381 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1382 		    0, M_DONTWAIT, 1, MT_DATA);
1383 		if (op_err == NULL) {
1384 			/* FOOBAR */
1385 			return (NULL);
1386 		}
1387 		/* pre-reserve some space */
1388 #ifdef INET6
1389 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1390 #else
1391 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1392 #endif
1393 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1394 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1395 		/* Set the len */
1396 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1397 		ph = mtod(op_err, struct sctp_paramhdr *);
1398 		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1399 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1400 		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1401 		    vrf_id, net->port);
1402 		if (how_indx < sizeof(asoc->cookie_how))
1403 			asoc->cookie_how[how_indx] = 2;
1404 		return (NULL);
1405 	}
1406 	/*
1407 	 * find and validate the INIT chunk in the cookie (peer's info) the
1408 	 * INIT should start after the cookie-echo header struct (chunk
1409 	 * header, state cookie header struct)
1410 	 */
1411 	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1412 
1413 	init_cp = (struct sctp_init_chunk *)
1414 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1415 	    (uint8_t *) & init_buf);
1416 	if (init_cp == NULL) {
1417 		/* could not pull a INIT chunk in cookie */
1418 		return (NULL);
1419 	}
1420 	chk_length = ntohs(init_cp->ch.chunk_length);
1421 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1422 		return (NULL);
1423 	}
1424 	/*
1425 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1426 	 * INIT-ACK follows the INIT chunk
1427 	 */
1428 	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1429 	initack_cp = (struct sctp_init_ack_chunk *)
1430 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1431 	    (uint8_t *) & initack_buf);
1432 	if (initack_cp == NULL) {
1433 		/* could not pull INIT-ACK chunk in cookie */
1434 		return (NULL);
1435 	}
1436 	chk_length = ntohs(initack_cp->ch.chunk_length);
1437 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1438 		return (NULL);
1439 	}
1440 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1441 	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1442 		/*
1443 		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1444 		 * to get into the OPEN state
1445 		 */
1446 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1447 			/*-
1448 			 * Opps, this means that we somehow generated two vtag's
1449 			 * the same. I.e. we did:
1450 			 *  Us               Peer
1451 			 *   <---INIT(tag=a)------
1452 			 *   ----INIT-ACK(tag=t)-->
1453 			 *   ----INIT(tag=t)------> *1
1454 			 *   <---INIT-ACK(tag=a)---
1455                          *   <----CE(tag=t)------------- *2
1456 			 *
1457 			 * At point *1 we should be generating a different
1458 			 * tag t'. Which means we would throw away the CE and send
1459 			 * ours instead. Basically this is case C (throw away side).
1460 			 */
1461 			if (how_indx < sizeof(asoc->cookie_how))
1462 				asoc->cookie_how[how_indx] = 17;
1463 			return (NULL);
1464 
1465 		}
1466 		switch SCTP_GET_STATE
1467 			(asoc) {
1468 		case SCTP_STATE_COOKIE_WAIT:
1469 		case SCTP_STATE_COOKIE_ECHOED:
1470 			/*
1471 			 * INIT was sent but got a COOKIE_ECHO with the
1472 			 * correct tags... just accept it...but we must
1473 			 * process the init so that we can make sure we have
1474 			 * the right seq no's.
1475 			 */
1476 			/* First we must process the INIT !! */
1477 			retval = sctp_process_init(init_cp, stcb, net);
1478 			if (retval < 0) {
1479 				if (how_indx < sizeof(asoc->cookie_how))
1480 					asoc->cookie_how[how_indx] = 3;
1481 				return (NULL);
1482 			}
1483 			/* we have already processed the INIT so no problem */
1484 			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1485 			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1486 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1487 			/* update current state */
1488 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1489 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1490 			else
1491 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1492 
1493 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1494 			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1495 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1496 				    stcb->sctp_ep, stcb, asoc->primary_destination);
1497 			}
1498 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1499 			sctp_stop_all_cookie_timers(stcb);
1500 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1501 			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1502 			    (inp->sctp_socket->so_qlimit == 0)
1503 			    ) {
1504 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1505 				struct socket *so;
1506 
1507 #endif
1508 				/*
1509 				 * Here is where collision would go if we
1510 				 * did a connect() and instead got a
1511 				 * init/init-ack/cookie done before the
1512 				 * init-ack came back..
1513 				 */
1514 				stcb->sctp_ep->sctp_flags |=
1515 				    SCTP_PCB_FLAGS_CONNECTED;
1516 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517 				so = SCTP_INP_SO(stcb->sctp_ep);
1518 				atomic_add_int(&stcb->asoc.refcnt, 1);
1519 				SCTP_TCB_UNLOCK(stcb);
1520 				SCTP_SOCKET_LOCK(so, 1);
1521 				SCTP_TCB_LOCK(stcb);
1522 				atomic_add_int(&stcb->asoc.refcnt, -1);
1523 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1524 					SCTP_SOCKET_UNLOCK(so, 1);
1525 					return (NULL);
1526 				}
1527 #endif
1528 				soisconnected(stcb->sctp_socket);
1529 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1530 				SCTP_SOCKET_UNLOCK(so, 1);
1531 #endif
1532 			}
1533 			/* notify upper layer */
1534 			*notification = SCTP_NOTIFY_ASSOC_UP;
1535 			/*
1536 			 * since we did not send a HB make sure we don't
1537 			 * double things
1538 			 */
1539 			net->hb_responded = 1;
1540 			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1541 			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1542 
1543 			if (stcb->asoc.sctp_autoclose_ticks &&
1544 			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1545 				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1546 				    inp, stcb, NULL);
1547 			}
1548 			break;
1549 		default:
1550 			/*
1551 			 * we're in the OPEN state (or beyond), so peer must
1552 			 * have simply lost the COOKIE-ACK
1553 			 */
1554 			break;
1555 			}	/* end switch */
1556 		sctp_stop_all_cookie_timers(stcb);
1557 		/*
1558 		 * We ignore the return code here.. not sure if we should
1559 		 * somehow abort.. but we do have an existing asoc. This
1560 		 * really should not fail.
1561 		 */
1562 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1563 		    init_offset + sizeof(struct sctp_init_chunk),
1564 		    initack_offset, sh, init_src)) {
1565 			if (how_indx < sizeof(asoc->cookie_how))
1566 				asoc->cookie_how[how_indx] = 4;
1567 			return (NULL);
1568 		}
1569 		/* respond with a COOKIE-ACK */
1570 		sctp_toss_old_cookies(stcb, asoc);
1571 		sctp_send_cookie_ack(stcb);
1572 		if (how_indx < sizeof(asoc->cookie_how))
1573 			asoc->cookie_how[how_indx] = 5;
1574 		return (stcb);
1575 	}
1576 	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1577 	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1578 	    cookie->tie_tag_my_vtag == 0 &&
1579 	    cookie->tie_tag_peer_vtag == 0) {
1580 		/*
1581 		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1582 		 */
1583 		if (how_indx < sizeof(asoc->cookie_how))
1584 			asoc->cookie_how[how_indx] = 6;
1585 		return (NULL);
1586 	}
1587 	/*
1588 	 * If nat support, and the below and stcb is established, send back
1589 	 * a ABORT(colliding state) if we are established.
1590 	 */
1591 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1592 	    (asoc->peer_supports_nat) &&
1593 	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1594 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1595 	    (asoc->peer_vtag == 0)))) {
1596 		/*
1597 		 * Special case - Peer's support nat. We may have two init's
1598 		 * that we gave out the same tag on since one was not
1599 		 * established.. i.e. we get INIT from host-1 behind the nat
1600 		 * and we respond tag-a, we get a INIT from host-2 behind
1601 		 * the nat and we get tag-a again. Then we bring up host-1
1602 		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1603 		 * Now we have colliding state. We must send an abort here
1604 		 * with colliding state indication.
1605 		 */
1606 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1607 		    0, M_DONTWAIT, 1, MT_DATA);
1608 		if (op_err == NULL) {
1609 			/* FOOBAR */
1610 			return (NULL);
1611 		}
1612 		/* pre-reserve some space */
1613 #ifdef INET6
1614 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1615 #else
1616 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1617 #endif
1618 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1619 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1620 		/* Set the len */
1621 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1622 		ph = mtod(op_err, struct sctp_paramhdr *);
1623 		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1624 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1625 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1626 		return (NULL);
1627 	}
1628 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1629 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1630 	    (asoc->peer_vtag == 0))) {
1631 		/*
1632 		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1633 		 * should be ok, re-accept peer info
1634 		 */
1635 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1636 			/*
1637 			 * Extension of case C. If we hit this, then the
1638 			 * random number generator returned the same vtag
1639 			 * when we first sent our INIT-ACK and when we later
1640 			 * sent our INIT. The side with the seq numbers that
1641 			 * are different will be the one that normnally
1642 			 * would have hit case C. This in effect "extends"
1643 			 * our vtags in this collision case to be 64 bits.
1644 			 * The same collision could occur aka you get both
1645 			 * vtag and seq number the same twice in a row.. but
1646 			 * is much less likely. If it did happen then we
1647 			 * would proceed through and bring up the assoc.. we
1648 			 * may end up with the wrong stream setup however..
1649 			 * which would be bad.. but there is no way to
1650 			 * tell.. until we send on a stream that does not
1651 			 * exist :-)
1652 			 */
1653 			if (how_indx < sizeof(asoc->cookie_how))
1654 				asoc->cookie_how[how_indx] = 7;
1655 
1656 			return (NULL);
1657 		}
1658 		if (how_indx < sizeof(asoc->cookie_how))
1659 			asoc->cookie_how[how_indx] = 8;
1660 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1661 		sctp_stop_all_cookie_timers(stcb);
1662 		/*
1663 		 * since we did not send a HB make sure we don't double
1664 		 * things
1665 		 */
1666 		net->hb_responded = 1;
1667 		if (stcb->asoc.sctp_autoclose_ticks &&
1668 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1669 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1670 			    NULL);
1671 		}
1672 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1673 		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1674 
1675 		/* Note last_cwr_tsn? where is this used? */
1676 		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1677 		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1678 			/*
1679 			 * Ok the peer probably discarded our data (if we
1680 			 * echoed a cookie+data). So anything on the
1681 			 * sent_queue should be marked for retransmit, we
1682 			 * may not get something to kick us so it COULD
1683 			 * still take a timeout to move these.. but it can't
1684 			 * hurt to mark them.
1685 			 */
1686 			struct sctp_tmit_chunk *chk;
1687 
1688 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1689 				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1690 					chk->sent = SCTP_DATAGRAM_RESEND;
1691 					sctp_flight_size_decrease(chk);
1692 					sctp_total_flight_decrease(stcb, chk);
1693 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1694 					spec_flag++;
1695 				}
1696 			}
1697 
1698 		}
1699 		/* process the INIT info (peer's info) */
1700 		retval = sctp_process_init(init_cp, stcb, net);
1701 		if (retval < 0) {
1702 			if (how_indx < sizeof(asoc->cookie_how))
1703 				asoc->cookie_how[how_indx] = 9;
1704 			return (NULL);
1705 		}
1706 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1707 		    init_offset + sizeof(struct sctp_init_chunk),
1708 		    initack_offset, sh, init_src)) {
1709 			if (how_indx < sizeof(asoc->cookie_how))
1710 				asoc->cookie_how[how_indx] = 10;
1711 			return (NULL);
1712 		}
1713 		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1714 		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1715 			*notification = SCTP_NOTIFY_ASSOC_UP;
1716 
1717 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1718 			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1719 			    (inp->sctp_socket->so_qlimit == 0)) {
1720 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1721 				struct socket *so;
1722 
1723 #endif
1724 				stcb->sctp_ep->sctp_flags |=
1725 				    SCTP_PCB_FLAGS_CONNECTED;
1726 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1727 				so = SCTP_INP_SO(stcb->sctp_ep);
1728 				atomic_add_int(&stcb->asoc.refcnt, 1);
1729 				SCTP_TCB_UNLOCK(stcb);
1730 				SCTP_SOCKET_LOCK(so, 1);
1731 				SCTP_TCB_LOCK(stcb);
1732 				atomic_add_int(&stcb->asoc.refcnt, -1);
1733 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1734 					SCTP_SOCKET_UNLOCK(so, 1);
1735 					return (NULL);
1736 				}
1737 #endif
1738 				soisconnected(stcb->sctp_socket);
1739 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1740 				SCTP_SOCKET_UNLOCK(so, 1);
1741 #endif
1742 			}
1743 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1744 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1745 			else
1746 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1747 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1748 		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1749 			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1750 		} else {
1751 			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1752 		}
1753 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1754 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1755 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1756 			    stcb->sctp_ep, stcb, asoc->primary_destination);
1757 		}
1758 		sctp_stop_all_cookie_timers(stcb);
1759 		sctp_toss_old_cookies(stcb, asoc);
1760 		sctp_send_cookie_ack(stcb);
1761 		if (spec_flag) {
1762 			/*
1763 			 * only if we have retrans set do we do this. What
1764 			 * this call does is get only the COOKIE-ACK out and
1765 			 * then when we return the normal call to
1766 			 * sctp_chunk_output will get the retrans out behind
1767 			 * this.
1768 			 */
1769 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1770 		}
1771 		if (how_indx < sizeof(asoc->cookie_how))
1772 			asoc->cookie_how[how_indx] = 11;
1773 
1774 		return (stcb);
1775 	}
1776 	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1777 	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1778 	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1779 	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1780 	    cookie->tie_tag_peer_vtag != 0) {
1781 		struct sctpasochead *head;
1782 
1783 		if (asoc->peer_supports_nat) {
1784 			/*
1785 			 * This is a gross gross hack. just call the
1786 			 * cookie_new code since we are allowing a duplicate
1787 			 * association. I hope this works...
1788 			 */
1789 			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1790 			    inp, netp, init_src, notification,
1791 			    auth_skipped, auth_offset, auth_len,
1792 			    vrf_id, port));
1793 		}
1794 		/*
1795 		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1796 		 */
1797 		/* temp code */
1798 		if (how_indx < sizeof(asoc->cookie_how))
1799 			asoc->cookie_how[how_indx] = 12;
1800 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1801 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1802 
1803 		*sac_assoc_id = sctp_get_associd(stcb);
1804 		/* notify upper layer */
1805 		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1806 		atomic_add_int(&stcb->asoc.refcnt, 1);
1807 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1808 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1809 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1810 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1811 		}
1812 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1813 			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1814 		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1815 			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1816 		}
1817 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1818 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1819 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1820 			    stcb->sctp_ep, stcb, asoc->primary_destination);
1821 
1822 		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1823 			/* move to OPEN state, if not in SHUTDOWN_SENT */
1824 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1825 		}
1826 		asoc->pre_open_streams =
1827 		    ntohs(initack_cp->init.num_outbound_streams);
1828 		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1829 		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1830 		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1831 
1832 		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1833 		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1834 
1835 		asoc->str_reset_seq_in = asoc->init_seq_number;
1836 
1837 		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1838 		if (asoc->mapping_array) {
1839 			memset(asoc->mapping_array, 0,
1840 			    asoc->mapping_array_size);
1841 		}
1842 		/* EY 05/13/08 - nr_sack version of the above if statement */
1843 		if (asoc->nr_mapping_array && SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)
1844 		    && asoc->peer_supports_nr_sack) {
1845 			memset(asoc->nr_mapping_array, 0,
1846 			    asoc->nr_mapping_array_size);
1847 		}
1848 		SCTP_TCB_UNLOCK(stcb);
1849 		SCTP_INP_INFO_WLOCK();
1850 		SCTP_INP_WLOCK(stcb->sctp_ep);
1851 		SCTP_TCB_LOCK(stcb);
1852 		atomic_add_int(&stcb->asoc.refcnt, -1);
1853 		/* send up all the data */
1854 		SCTP_TCB_SEND_LOCK(stcb);
1855 
1856 		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1857 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1858 			stcb->asoc.strmout[i].stream_no = i;
1859 			stcb->asoc.strmout[i].next_sequence_sent = 0;
1860 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1861 		}
1862 		/* process the INIT-ACK info (my info) */
1863 		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1864 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1865 
1866 		/* pull from vtag hash */
1867 		LIST_REMOVE(stcb, sctp_asocs);
1868 		/* re-insert to new vtag position */
1869 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1870 		    SCTP_BASE_INFO(hashasocmark))];
1871 		/*
1872 		 * put it in the bucket in the vtag hash of assoc's for the
1873 		 * system
1874 		 */
1875 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1876 
1877 		/* process the INIT info (peer's info) */
1878 		SCTP_TCB_SEND_UNLOCK(stcb);
1879 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1880 		SCTP_INP_INFO_WUNLOCK();
1881 
1882 		retval = sctp_process_init(init_cp, stcb, net);
1883 		if (retval < 0) {
1884 			if (how_indx < sizeof(asoc->cookie_how))
1885 				asoc->cookie_how[how_indx] = 13;
1886 
1887 			return (NULL);
1888 		}
1889 		/*
1890 		 * since we did not send a HB make sure we don't double
1891 		 * things
1892 		 */
1893 		net->hb_responded = 1;
1894 
1895 		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1896 		    init_offset + sizeof(struct sctp_init_chunk),
1897 		    initack_offset, sh, init_src)) {
1898 			if (how_indx < sizeof(asoc->cookie_how))
1899 				asoc->cookie_how[how_indx] = 14;
1900 
1901 			return (NULL);
1902 		}
1903 		/* respond with a COOKIE-ACK */
1904 		sctp_stop_all_cookie_timers(stcb);
1905 		sctp_toss_old_cookies(stcb, asoc);
1906 		sctp_send_cookie_ack(stcb);
1907 		if (how_indx < sizeof(asoc->cookie_how))
1908 			asoc->cookie_how[how_indx] = 15;
1909 
1910 		return (stcb);
1911 	}
1912 	if (how_indx < sizeof(asoc->cookie_how))
1913 		asoc->cookie_how[how_indx] = 16;
1914 	/* all other cases... */
1915 	return (NULL);
1916 }
1917 
1918 
1919 /*
1920  * handle a state cookie for a new association m: input packet mbuf chain--
1921  * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1922  * and the cookie signature does not exist offset: offset into mbuf to the
1923  * cookie-echo chunk length: length of the cookie chunk to: where the init
1924  * was from returns a new TCB
1925  */
1926 struct sctp_tcb *
1927 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1928     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1929     struct sctp_inpcb *inp, struct sctp_nets **netp,
1930     struct sockaddr *init_src, int *notification,
1931     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1932     uint32_t vrf_id, uint16_t port)
1933 {
1934 	struct sctp_tcb *stcb;
1935 	struct sctp_init_chunk *init_cp, init_buf;
1936 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1937 	struct sockaddr_storage sa_store;
1938 	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1939 	struct sockaddr_in *sin;
1940 	struct sockaddr_in6 *sin6;
1941 	struct sctp_association *asoc;
1942 	int chk_length;
1943 	int init_offset, initack_offset, initack_limit;
1944 	int retval;
1945 	int error = 0;
1946 	uint32_t old_tag;
1947 	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1948 
1949 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1950 	struct socket *so;
1951 
1952 	so = SCTP_INP_SO(inp);
1953 #endif
1954 
1955 	/*
1956 	 * find and validate the INIT chunk in the cookie (peer's info) the
1957 	 * INIT should start after the cookie-echo header struct (chunk
1958 	 * header, state cookie header struct)
1959 	 */
1960 	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1961 	init_cp = (struct sctp_init_chunk *)
1962 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1963 	    (uint8_t *) & init_buf);
1964 	if (init_cp == NULL) {
1965 		/* could not pull a INIT chunk in cookie */
1966 		SCTPDBG(SCTP_DEBUG_INPUT1,
1967 		    "process_cookie_new: could not pull INIT chunk hdr\n");
1968 		return (NULL);
1969 	}
1970 	chk_length = ntohs(init_cp->ch.chunk_length);
1971 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1972 		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1973 		return (NULL);
1974 	}
1975 	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1976 	/*
1977 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1978 	 * INIT-ACK follows the INIT chunk
1979 	 */
1980 	initack_cp = (struct sctp_init_ack_chunk *)
1981 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1982 	    (uint8_t *) & initack_buf);
1983 	if (initack_cp == NULL) {
1984 		/* could not pull INIT-ACK chunk in cookie */
1985 		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1986 		return (NULL);
1987 	}
1988 	chk_length = ntohs(initack_cp->ch.chunk_length);
1989 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1990 		return (NULL);
1991 	}
1992 	/*
1993 	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1994 	 * "initack_limit" value.  This is because the chk_length field
1995 	 * includes the length of the cookie, but the cookie is omitted when
1996 	 * the INIT and INIT_ACK are tacked onto the cookie...
1997 	 */
1998 	initack_limit = offset + cookie_len;
1999 
2000 	/*
2001 	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2002 	 * and popluate
2003 	 */
2004 
2005 	/*
2006 	 * Here we do a trick, we set in NULL for the proc/thread argument.
2007 	 * We do this since in effect we only use the p argument when the
2008 	 * socket is unbound and we must do an implicit bind. Since we are
2009 	 * getting a cookie, we cannot be unbound.
2010 	 */
2011 	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
2012 	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2013 	    (struct thread *)NULL
2014 	    );
2015 	if (stcb == NULL) {
2016 		struct mbuf *op_err;
2017 
2018 		/* memory problem? */
2019 		SCTPDBG(SCTP_DEBUG_INPUT1,
2020 		    "process_cookie_new: no room for another TCB!\n");
2021 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2022 
2023 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2024 		    sh, op_err, vrf_id, port);
2025 		return (NULL);
2026 	}
2027 	/* get the correct sctp_nets */
2028 	if (netp)
2029 		*netp = sctp_findnet(stcb, init_src);
2030 
2031 	asoc = &stcb->asoc;
2032 	/* get scope variables out of cookie */
2033 	asoc->ipv4_local_scope = cookie->ipv4_scope;
2034 	asoc->site_scope = cookie->site_scope;
2035 	asoc->local_scope = cookie->local_scope;
2036 	asoc->loopback_scope = cookie->loopback_scope;
2037 
2038 	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2039 	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2040 		struct mbuf *op_err;
2041 
2042 		/*
2043 		 * Houston we have a problem. The EP changed while the
2044 		 * cookie was in flight. Only recourse is to abort the
2045 		 * association.
2046 		 */
2047 		atomic_add_int(&stcb->asoc.refcnt, 1);
2048 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2049 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2050 		    sh, op_err, vrf_id, port);
2051 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2052 		SCTP_TCB_UNLOCK(stcb);
2053 		SCTP_SOCKET_LOCK(so, 1);
2054 		SCTP_TCB_LOCK(stcb);
2055 #endif
2056 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2057 		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2058 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2059 		SCTP_SOCKET_UNLOCK(so, 1);
2060 #endif
2061 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2062 		return (NULL);
2063 	}
2064 	/* process the INIT-ACK info (my info) */
2065 	old_tag = asoc->my_vtag;
2066 	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2067 	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2068 	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2069 	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2070 	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2071 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2072 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
2073 	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2074 	asoc->str_reset_seq_in = asoc->init_seq_number;
2075 
2076 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2077 
2078 	/* process the INIT info (peer's info) */
2079 	if (netp)
2080 		retval = sctp_process_init(init_cp, stcb, *netp);
2081 	else
2082 		retval = 0;
2083 	if (retval < 0) {
2084 		atomic_add_int(&stcb->asoc.refcnt, 1);
2085 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2086 		SCTP_TCB_UNLOCK(stcb);
2087 		SCTP_SOCKET_LOCK(so, 1);
2088 		SCTP_TCB_LOCK(stcb);
2089 #endif
2090 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2091 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2092 		SCTP_SOCKET_UNLOCK(so, 1);
2093 #endif
2094 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2095 		return (NULL);
2096 	}
2097 	/* load all addresses */
2098 	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2099 	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2100 	    init_src)) {
2101 		atomic_add_int(&stcb->asoc.refcnt, 1);
2102 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2103 		SCTP_TCB_UNLOCK(stcb);
2104 		SCTP_SOCKET_LOCK(so, 1);
2105 		SCTP_TCB_LOCK(stcb);
2106 #endif
2107 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2108 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2109 		SCTP_SOCKET_UNLOCK(so, 1);
2110 #endif
2111 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2112 		return (NULL);
2113 	}
2114 	/*
2115 	 * verify any preceding AUTH chunk that was skipped
2116 	 */
2117 	/* pull the local authentication parameters from the cookie/init-ack */
2118 	sctp_auth_get_cookie_params(stcb, m,
2119 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2120 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2121 	if (auth_skipped) {
2122 		struct sctp_auth_chunk *auth;
2123 
2124 		auth = (struct sctp_auth_chunk *)
2125 		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2126 		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2127 			/* auth HMAC failed, dump the assoc and packet */
2128 			SCTPDBG(SCTP_DEBUG_AUTH1,
2129 			    "COOKIE-ECHO: AUTH failed\n");
2130 			atomic_add_int(&stcb->asoc.refcnt, 1);
2131 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2132 			SCTP_TCB_UNLOCK(stcb);
2133 			SCTP_SOCKET_LOCK(so, 1);
2134 			SCTP_TCB_LOCK(stcb);
2135 #endif
2136 			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2137 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2138 			SCTP_SOCKET_UNLOCK(so, 1);
2139 #endif
2140 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2141 			return (NULL);
2142 		} else {
2143 			/* remaining chunks checked... good to go */
2144 			stcb->asoc.authenticated = 1;
2145 		}
2146 	}
2147 	/* update current state */
2148 	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2149 	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2150 	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2151 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2152 		    stcb->sctp_ep, stcb, asoc->primary_destination);
2153 	}
2154 	sctp_stop_all_cookie_timers(stcb);
2155 	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2156 	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2157 
2158 	/*
2159 	 * if we're doing ASCONFs, check to see if we have any new local
2160 	 * addresses that need to get added to the peer (eg. addresses
2161 	 * changed while cookie echo in flight).  This needs to be done
2162 	 * after we go to the OPEN state to do the correct asconf
2163 	 * processing. else, make sure we have the correct addresses in our
2164 	 * lists
2165 	 */
2166 
2167 	/* warning, we re-use sin, sin6, sa_store here! */
2168 	/* pull in local_address (our "from" address) */
2169 	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2170 		/* source addr is IPv4 */
2171 		sin = (struct sockaddr_in *)initack_src;
2172 		memset(sin, 0, sizeof(*sin));
2173 		sin->sin_family = AF_INET;
2174 		sin->sin_len = sizeof(struct sockaddr_in);
2175 		sin->sin_addr.s_addr = cookie->laddress[0];
2176 	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2177 		/* source addr is IPv6 */
2178 		sin6 = (struct sockaddr_in6 *)initack_src;
2179 		memset(sin6, 0, sizeof(*sin6));
2180 		sin6->sin6_family = AF_INET6;
2181 		sin6->sin6_len = sizeof(struct sockaddr_in6);
2182 		sin6->sin6_scope_id = cookie->scope_id;
2183 		memcpy(&sin6->sin6_addr, cookie->laddress,
2184 		    sizeof(sin6->sin6_addr));
2185 	} else {
2186 		atomic_add_int(&stcb->asoc.refcnt, 1);
2187 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2188 		SCTP_TCB_UNLOCK(stcb);
2189 		SCTP_SOCKET_LOCK(so, 1);
2190 		SCTP_TCB_LOCK(stcb);
2191 #endif
2192 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2193 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2194 		SCTP_SOCKET_UNLOCK(so, 1);
2195 #endif
2196 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2197 		return (NULL);
2198 	}
2199 
2200 	/* set up to notify upper layer */
2201 	*notification = SCTP_NOTIFY_ASSOC_UP;
2202 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2203 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2204 	    (inp->sctp_socket->so_qlimit == 0)) {
2205 		/*
2206 		 * This is an endpoint that called connect() how it got a
2207 		 * cookie that is NEW is a bit of a mystery. It must be that
2208 		 * the INIT was sent, but before it got there.. a complete
2209 		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2210 		 * should have went to the other code.. not here.. oh well..
2211 		 * a bit of protection is worth having..
2212 		 */
2213 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2214 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2215 		atomic_add_int(&stcb->asoc.refcnt, 1);
2216 		SCTP_TCB_UNLOCK(stcb);
2217 		SCTP_SOCKET_LOCK(so, 1);
2218 		SCTP_TCB_LOCK(stcb);
2219 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2220 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2221 			SCTP_SOCKET_UNLOCK(so, 1);
2222 			return (NULL);
2223 		}
2224 #endif
2225 		soisconnected(stcb->sctp_socket);
2226 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2227 		SCTP_SOCKET_UNLOCK(so, 1);
2228 #endif
2229 	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2230 	    (inp->sctp_socket->so_qlimit)) {
2231 		/*
2232 		 * We don't want to do anything with this one. Since it is
2233 		 * the listening guy. The timer will get started for
2234 		 * accepted connections in the caller.
2235 		 */
2236 		;
2237 	}
2238 	/* since we did not send a HB make sure we don't double things */
2239 	if ((netp) && (*netp))
2240 		(*netp)->hb_responded = 1;
2241 
2242 	if (stcb->asoc.sctp_autoclose_ticks &&
2243 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2244 		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2245 	}
2246 	/* calculate the RTT */
2247 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2248 	if ((netp) && (*netp)) {
2249 		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2250 		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2251 	}
2252 	/* respond with a COOKIE-ACK */
2253 	sctp_send_cookie_ack(stcb);
2254 
2255 	/*
2256 	 * check the address lists for any ASCONFs that need to be sent
2257 	 * AFTER the cookie-ack is sent
2258 	 */
2259 	sctp_check_address_list(stcb, m,
2260 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2261 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2262 	    initack_src, cookie->local_scope, cookie->site_scope,
2263 	    cookie->ipv4_scope, cookie->loopback_scope);
2264 
2265 
2266 	return (stcb);
2267 }
2268 
2269 /*
2270  * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2271  * we NEED to make sure we are not already using the vtag. If so we
2272  * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2273 	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2274 							    SCTP_BASE_INFO(hashasocmark))];
2275 	LIST_FOREACH(stcb, head, sctp_asocs) {
2276 	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2277 		       -- SEND ABORT - TRY AGAIN --
2278 		}
2279 	}
2280 */
2281 
2282 /*
2283  * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2284  * existing (non-NULL) TCB
2285  */
2286 static struct mbuf *
2287 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2288     struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2289     struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2290     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2291     struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2292 {
2293 	struct sctp_state_cookie *cookie;
2294 	struct sockaddr_in6 sin6;
2295 	struct sockaddr_in sin;
2296 	struct sctp_tcb *l_stcb = *stcb;
2297 	struct sctp_inpcb *l_inp;
2298 	struct sockaddr *to;
2299 	sctp_assoc_t sac_restart_id;
2300 	struct sctp_pcb *ep;
2301 	struct mbuf *m_sig;
2302 	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2303 	uint8_t *sig;
2304 	uint8_t cookie_ok = 0;
2305 	unsigned int size_of_pkt, sig_offset, cookie_offset;
2306 	unsigned int cookie_len;
2307 	struct timeval now;
2308 	struct timeval time_expires;
2309 	struct sockaddr_storage dest_store;
2310 	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2311 	struct ip *iph;
2312 	int notification = 0;
2313 	struct sctp_nets *netl;
2314 	int had_a_existing_tcb = 0;
2315 
2316 	SCTPDBG(SCTP_DEBUG_INPUT2,
2317 	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2318 
2319 	if (inp_p == NULL) {
2320 		return (NULL);
2321 	}
2322 	/* First get the destination address setup too. */
2323 	iph = mtod(m, struct ip *);
2324 	switch (iph->ip_v) {
2325 	case IPVERSION:
2326 		{
2327 			/* its IPv4 */
2328 			struct sockaddr_in *lsin;
2329 
2330 			lsin = (struct sockaddr_in *)(localep_sa);
2331 			memset(lsin, 0, sizeof(*lsin));
2332 			lsin->sin_family = AF_INET;
2333 			lsin->sin_len = sizeof(*lsin);
2334 			lsin->sin_port = sh->dest_port;
2335 			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2336 			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2337 			break;
2338 		}
2339 #ifdef INET6
2340 	case IPV6_VERSION >> 4:
2341 		{
2342 			/* its IPv6 */
2343 			struct ip6_hdr *ip6;
2344 			struct sockaddr_in6 *lsin6;
2345 
2346 			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2347 			memset(lsin6, 0, sizeof(*lsin6));
2348 			lsin6->sin6_family = AF_INET6;
2349 			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2350 			ip6 = mtod(m, struct ip6_hdr *);
2351 			lsin6->sin6_port = sh->dest_port;
2352 			lsin6->sin6_addr = ip6->ip6_dst;
2353 			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2354 			break;
2355 		}
2356 #endif
2357 	default:
2358 		return (NULL);
2359 	}
2360 
2361 	cookie = &cp->cookie;
2362 	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2363 	cookie_len = ntohs(cp->ch.chunk_length);
2364 
2365 	if ((cookie->peerport != sh->src_port) &&
2366 	    (cookie->myport != sh->dest_port) &&
2367 	    (cookie->my_vtag != sh->v_tag)) {
2368 		/*
2369 		 * invalid ports or bad tag.  Note that we always leave the
2370 		 * v_tag in the header in network order and when we stored
2371 		 * it in the my_vtag slot we also left it in network order.
2372 		 * This maintains the match even though it may be in the
2373 		 * opposite byte order of the machine :->
2374 		 */
2375 		return (NULL);
2376 	}
2377 	if (cookie_len > size_of_pkt ||
2378 	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2379 	    sizeof(struct sctp_init_chunk) +
2380 	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2381 		/* cookie too long!  or too small */
2382 		return (NULL);
2383 	}
2384 	/*
2385 	 * split off the signature into its own mbuf (since it should not be
2386 	 * calculated in the sctp_hmac_m() call).
2387 	 */
2388 	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2389 	if (sig_offset > size_of_pkt) {
2390 		/* packet not correct size! */
2391 		/* XXX this may already be accounted for earlier... */
2392 		return (NULL);
2393 	}
2394 	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2395 	if (m_sig == NULL) {
2396 		/* out of memory or ?? */
2397 		return (NULL);
2398 	}
2399 #ifdef SCTP_MBUF_LOGGING
2400 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2401 		struct mbuf *mat;
2402 
2403 		mat = m_sig;
2404 		while (mat) {
2405 			if (SCTP_BUF_IS_EXTENDED(mat)) {
2406 				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2407 			}
2408 			mat = SCTP_BUF_NEXT(mat);
2409 		}
2410 	}
2411 #endif
2412 
2413 	/*
2414 	 * compute the signature/digest for the cookie
2415 	 */
2416 	ep = &(*inp_p)->sctp_ep;
2417 	l_inp = *inp_p;
2418 	if (l_stcb) {
2419 		SCTP_TCB_UNLOCK(l_stcb);
2420 	}
2421 	SCTP_INP_RLOCK(l_inp);
2422 	if (l_stcb) {
2423 		SCTP_TCB_LOCK(l_stcb);
2424 	}
2425 	/* which cookie is it? */
2426 	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2427 	    (ep->current_secret_number != ep->last_secret_number)) {
2428 		/* it's the old cookie */
2429 		(void)sctp_hmac_m(SCTP_HMAC,
2430 		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2431 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2432 	} else {
2433 		/* it's the current cookie */
2434 		(void)sctp_hmac_m(SCTP_HMAC,
2435 		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2436 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2437 	}
2438 	/* get the signature */
2439 	SCTP_INP_RUNLOCK(l_inp);
2440 	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2441 	if (sig == NULL) {
2442 		/* couldn't find signature */
2443 		sctp_m_freem(m_sig);
2444 		return (NULL);
2445 	}
2446 	/* compare the received digest with the computed digest */
2447 	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2448 		/* try the old cookie? */
2449 		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2450 		    (ep->current_secret_number != ep->last_secret_number)) {
2451 			/* compute digest with old */
2452 			(void)sctp_hmac_m(SCTP_HMAC,
2453 			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2454 			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2455 			/* compare */
2456 			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2457 				cookie_ok = 1;
2458 		}
2459 	} else {
2460 		cookie_ok = 1;
2461 	}
2462 
2463 	/*
2464 	 * Now before we continue we must reconstruct our mbuf so that
2465 	 * normal processing of any other chunks will work.
2466 	 */
2467 	{
2468 		struct mbuf *m_at;
2469 
2470 		m_at = m;
2471 		while (SCTP_BUF_NEXT(m_at) != NULL) {
2472 			m_at = SCTP_BUF_NEXT(m_at);
2473 		}
2474 		SCTP_BUF_NEXT(m_at) = m_sig;
2475 	}
2476 
2477 	if (cookie_ok == 0) {
2478 		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2479 		SCTPDBG(SCTP_DEBUG_INPUT2,
2480 		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2481 		    (uint32_t) offset, cookie_offset, sig_offset);
2482 		return (NULL);
2483 	}
2484 	/*
2485 	 * check the cookie timestamps to be sure it's not stale
2486 	 */
2487 	(void)SCTP_GETTIME_TIMEVAL(&now);
2488 	/* Expire time is in Ticks, so we convert to seconds */
2489 	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2490 	time_expires.tv_usec = cookie->time_entered.tv_usec;
2491 	/*
2492 	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2493 	 * is undefined.
2494 	 */
2495 	if (timevalcmp(&now, &time_expires, >)) {
2496 		/* cookie is stale! */
2497 		struct mbuf *op_err;
2498 		struct sctp_stale_cookie_msg *scm;
2499 		uint32_t tim;
2500 
2501 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2502 		    0, M_DONTWAIT, 1, MT_DATA);
2503 		if (op_err == NULL) {
2504 			/* FOOBAR */
2505 			return (NULL);
2506 		}
2507 		/* pre-reserve some space */
2508 #ifdef INET6
2509 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
2510 #else
2511 		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
2512 #endif
2513 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
2514 		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
2515 
2516 		/* Set the len */
2517 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2518 		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2519 		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2520 		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2521 		    (sizeof(uint32_t))));
2522 		/* seconds to usec */
2523 		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2524 		/* add in usec */
2525 		if (tim == 0)
2526 			tim = now.tv_usec - cookie->time_entered.tv_usec;
2527 		scm->time_usec = htonl(tim);
2528 		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2529 		    vrf_id, port);
2530 		return (NULL);
2531 	}
2532 	/*
2533 	 * Now we must see with the lookup address if we have an existing
2534 	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2535 	 * and a INIT collided with us and somewhere the peer sent the
2536 	 * cookie on another address besides the single address our assoc
2537 	 * had for him. In this case we will have one of the tie-tags set at
2538 	 * least AND the address field in the cookie can be used to look it
2539 	 * up.
2540 	 */
2541 	to = NULL;
2542 	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2543 		memset(&sin6, 0, sizeof(sin6));
2544 		sin6.sin6_family = AF_INET6;
2545 		sin6.sin6_len = sizeof(sin6);
2546 		sin6.sin6_port = sh->src_port;
2547 		sin6.sin6_scope_id = cookie->scope_id;
2548 		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2549 		    sizeof(sin6.sin6_addr.s6_addr));
2550 		to = (struct sockaddr *)&sin6;
2551 	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2552 		memset(&sin, 0, sizeof(sin));
2553 		sin.sin_family = AF_INET;
2554 		sin.sin_len = sizeof(sin);
2555 		sin.sin_port = sh->src_port;
2556 		sin.sin_addr.s_addr = cookie->address[0];
2557 		to = (struct sockaddr *)&sin;
2558 	} else {
2559 		/* This should not happen */
2560 		return (NULL);
2561 	}
2562 	if ((*stcb == NULL) && to) {
2563 		/* Yep, lets check */
2564 		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2565 		if (*stcb == NULL) {
2566 			/*
2567 			 * We should have only got back the same inp. If we
2568 			 * got back a different ep we have a problem. The
2569 			 * original findep got back l_inp and now
2570 			 */
2571 			if (l_inp != *inp_p) {
2572 				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2573 			}
2574 		} else {
2575 			if (*locked_tcb == NULL) {
2576 				/*
2577 				 * In this case we found the assoc only
2578 				 * after we locked the create lock. This
2579 				 * means we are in a colliding case and we
2580 				 * must make sure that we unlock the tcb if
2581 				 * its one of the cases where we throw away
2582 				 * the incoming packets.
2583 				 */
2584 				*locked_tcb = *stcb;
2585 
2586 				/*
2587 				 * We must also increment the inp ref count
2588 				 * since the ref_count flags was set when we
2589 				 * did not find the TCB, now we found it
2590 				 * which reduces the refcount.. we must
2591 				 * raise it back out to balance it all :-)
2592 				 */
2593 				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2594 				if ((*stcb)->sctp_ep != l_inp) {
2595 					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2596 					    (*stcb)->sctp_ep, l_inp);
2597 				}
2598 			}
2599 		}
2600 	}
2601 	if (to == NULL)
2602 		return (NULL);
2603 
2604 	cookie_len -= SCTP_SIGNATURE_SIZE;
2605 	if (*stcb == NULL) {
2606 		/* this is the "normal" case... get a new TCB */
2607 		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2608 		    cookie_len, *inp_p, netp, to, &notification,
2609 		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2610 	} else {
2611 		/* this is abnormal... cookie-echo on existing TCB */
2612 		had_a_existing_tcb = 1;
2613 		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2614 		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2615 		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2616 	}
2617 
2618 	if (*stcb == NULL) {
2619 		/* still no TCB... must be bad cookie-echo */
2620 		return (NULL);
2621 	}
2622 	/*
2623 	 * Ok, we built an association so confirm the address we sent the
2624 	 * INIT-ACK to.
2625 	 */
2626 	netl = sctp_findnet(*stcb, to);
2627 	/*
2628 	 * This code should in theory NOT run but
2629 	 */
2630 	if (netl == NULL) {
2631 		/* TSNH! Huh, why do I need to add this address here? */
2632 		int ret;
2633 
2634 		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2635 		    SCTP_IN_COOKIE_PROC);
2636 		netl = sctp_findnet(*stcb, to);
2637 	}
2638 	if (netl) {
2639 		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2640 			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2641 			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2642 			    netl);
2643 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2644 			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2645 		}
2646 	}
2647 	if (*stcb) {
2648 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2649 		    *stcb, NULL);
2650 	}
2651 	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2652 		if (!had_a_existing_tcb ||
2653 		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2654 			/*
2655 			 * If we have a NEW cookie or the connect never
2656 			 * reached the connected state during collision we
2657 			 * must do the TCP accept thing.
2658 			 */
2659 			struct socket *so, *oso;
2660 			struct sctp_inpcb *inp;
2661 
2662 			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2663 				/*
2664 				 * For a restart we will keep the same
2665 				 * socket, no need to do anything. I THINK!!
2666 				 */
2667 				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2668 				return (m);
2669 			}
2670 			oso = (*inp_p)->sctp_socket;
2671 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2672 			SCTP_TCB_UNLOCK((*stcb));
2673 			so = sonewconn(oso, 0
2674 			    );
2675 			SCTP_TCB_LOCK((*stcb));
2676 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2677 
2678 			if (so == NULL) {
2679 				struct mbuf *op_err;
2680 
2681 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2682 				struct socket *pcb_so;
2683 
2684 #endif
2685 				/* Too many sockets */
2686 				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2687 				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2688 				sctp_abort_association(*inp_p, NULL, m, iphlen,
2689 				    sh, op_err, vrf_id, port);
2690 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691 				pcb_so = SCTP_INP_SO(*inp_p);
2692 				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2693 				SCTP_TCB_UNLOCK((*stcb));
2694 				SCTP_SOCKET_LOCK(pcb_so, 1);
2695 				SCTP_TCB_LOCK((*stcb));
2696 				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2697 #endif
2698 				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2699 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2700 				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2701 #endif
2702 				return (NULL);
2703 			}
2704 			inp = (struct sctp_inpcb *)so->so_pcb;
2705 			SCTP_INP_INCR_REF(inp);
2706 			/*
2707 			 * We add the unbound flag here so that if we get an
2708 			 * soabort() before we get the move_pcb done, we
2709 			 * will properly cleanup.
2710 			 */
2711 			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2712 			    SCTP_PCB_FLAGS_CONNECTED |
2713 			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2714 			    SCTP_PCB_FLAGS_UNBOUND |
2715 			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2716 			    SCTP_PCB_FLAGS_DONT_WAKE);
2717 			inp->sctp_features = (*inp_p)->sctp_features;
2718 			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2719 			inp->sctp_socket = so;
2720 			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2721 			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2722 			inp->sctp_context = (*inp_p)->sctp_context;
2723 			inp->inp_starting_point_for_iterator = NULL;
2724 			/*
2725 			 * copy in the authentication parameters from the
2726 			 * original endpoint
2727 			 */
2728 			if (inp->sctp_ep.local_hmacs)
2729 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2730 			inp->sctp_ep.local_hmacs =
2731 			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2732 			if (inp->sctp_ep.local_auth_chunks)
2733 				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2734 			inp->sctp_ep.local_auth_chunks =
2735 			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2736 
2737 			/*
2738 			 * Now we must move it from one hash table to
2739 			 * another and get the tcb in the right place.
2740 			 */
2741 			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2742 
2743 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2744 			SCTP_TCB_UNLOCK((*stcb));
2745 
2746 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2747 			    0);
2748 			SCTP_TCB_LOCK((*stcb));
2749 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2750 
2751 
2752 			/*
2753 			 * now we must check to see if we were aborted while
2754 			 * the move was going on and the lock/unlock
2755 			 * happened.
2756 			 */
2757 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2758 				/*
2759 				 * yep it was, we leave the assoc attached
2760 				 * to the socket since the sctp_inpcb_free()
2761 				 * call will send an abort for us.
2762 				 */
2763 				SCTP_INP_DECR_REF(inp);
2764 				return (NULL);
2765 			}
2766 			SCTP_INP_DECR_REF(inp);
2767 			/* Switch over to the new guy */
2768 			*inp_p = inp;
2769 			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2770 
2771 			/*
2772 			 * Pull it from the incomplete queue and wake the
2773 			 * guy
2774 			 */
2775 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2776 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2777 			SCTP_TCB_UNLOCK((*stcb));
2778 			SCTP_SOCKET_LOCK(so, 1);
2779 #endif
2780 			soisconnected(so);
2781 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2782 			SCTP_TCB_LOCK((*stcb));
2783 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2784 			SCTP_SOCKET_UNLOCK(so, 1);
2785 #endif
2786 			return (m);
2787 		}
2788 	}
2789 	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2790 		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2791 	}
2792 	return (m);
2793 }
2794 
2795 static void
2796 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2797     struct sctp_tcb *stcb, struct sctp_nets *net)
2798 {
2799 	/* cp must not be used, others call this without a c-ack :-) */
2800 	struct sctp_association *asoc;
2801 
2802 	SCTPDBG(SCTP_DEBUG_INPUT2,
2803 	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2804 	if (stcb == NULL)
2805 		return;
2806 
2807 	asoc = &stcb->asoc;
2808 
2809 	sctp_stop_all_cookie_timers(stcb);
2810 	/* process according to association state */
2811 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2812 		/* state change only needed when I am in right state */
2813 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2814 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2815 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2816 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2817 			    stcb->sctp_ep, stcb, asoc->primary_destination);
2818 
2819 		}
2820 		/* update RTO */
2821 		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2822 		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2823 		if (asoc->overall_error_count == 0) {
2824 			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2825 			    &asoc->time_entered, sctp_align_safe_nocopy);
2826 		}
2827 		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2828 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2829 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2830 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2831 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2832 			struct socket *so;
2833 
2834 #endif
2835 			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2836 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2837 			so = SCTP_INP_SO(stcb->sctp_ep);
2838 			atomic_add_int(&stcb->asoc.refcnt, 1);
2839 			SCTP_TCB_UNLOCK(stcb);
2840 			SCTP_SOCKET_LOCK(so, 1);
2841 			SCTP_TCB_LOCK(stcb);
2842 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2843 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2844 				SCTP_SOCKET_UNLOCK(so, 1);
2845 				return;
2846 			}
2847 #endif
2848 			soisconnected(stcb->sctp_socket);
2849 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2850 			SCTP_SOCKET_UNLOCK(so, 1);
2851 #endif
2852 		}
2853 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2854 		    stcb, net);
2855 		/*
2856 		 * since we did not send a HB make sure we don't double
2857 		 * things
2858 		 */
2859 		net->hb_responded = 1;
2860 
2861 		if (stcb->asoc.sctp_autoclose_ticks &&
2862 		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2863 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2864 			    stcb->sctp_ep, stcb, NULL);
2865 		}
2866 		/*
2867 		 * send ASCONF if parameters are pending and ASCONFs are
2868 		 * allowed (eg. addresses changed when init/cookie echo were
2869 		 * in flight)
2870 		 */
2871 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2872 		    (stcb->asoc.peer_supports_asconf) &&
2873 		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2874 #ifdef SCTP_TIMER_BASED_ASCONF
2875 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2876 			    stcb->sctp_ep, stcb,
2877 			    stcb->asoc.primary_destination);
2878 #else
2879 			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2880 			    SCTP_ADDR_NOT_LOCKED);
2881 #endif
2882 		}
2883 	}
2884 	/* Toss the cookie if I can */
2885 	sctp_toss_old_cookies(stcb, asoc);
2886 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2887 		/* Restart the timer if we have pending data */
2888 		struct sctp_tmit_chunk *chk;
2889 
2890 		chk = TAILQ_FIRST(&asoc->sent_queue);
2891 		if (chk) {
2892 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2893 			    stcb, chk->whoTo);
2894 		}
2895 	}
2896 }
2897 
2898 static void
2899 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2900     struct sctp_tcb *stcb)
2901 {
2902 	struct sctp_nets *net;
2903 	struct sctp_tmit_chunk *lchk;
2904 	uint32_t tsn;
2905 
2906 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2907 		return;
2908 	}
2909 	SCTP_STAT_INCR(sctps_recvecne);
2910 	tsn = ntohl(cp->tsn);
2911 	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2912 	/* Also we make sure we disable the nonce_wait */
2913 	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2914 	if (lchk == NULL) {
2915 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2916 	} else {
2917 		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2918 	}
2919 	stcb->asoc.nonce_wait_for_ecne = 0;
2920 	stcb->asoc.nonce_sum_check = 0;
2921 
2922 	/* Find where it was sent, if possible */
2923 	net = NULL;
2924 	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2925 	while (lchk) {
2926 		if (lchk->rec.data.TSN_seq == tsn) {
2927 			net = lchk->whoTo;
2928 			break;
2929 		}
2930 		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2931 			break;
2932 		lchk = TAILQ_NEXT(lchk, sctp_next);
2933 	}
2934 	if (net == NULL)
2935 		/* default is we use the primary */
2936 		net = stcb->asoc.primary_destination;
2937 
2938 	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2939 		/*
2940 		 * JRS - Use the congestion control given in the pluggable
2941 		 * CC module
2942 		 */
2943 		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2944 		/*
2945 		 * we reduce once every RTT. So we will only lower cwnd at
2946 		 * the next sending seq i.e. the resync_tsn.
2947 		 */
2948 		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2949 	}
2950 	/*
2951 	 * We always send a CWR this way if our previous one was lost our
2952 	 * peer will get an update, or if it is not time again to reduce we
2953 	 * still get the cwr to the peer.
2954 	 */
2955 	sctp_send_cwr(stcb, net, tsn);
2956 }
2957 
2958 static void
2959 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2960 {
2961 	/*
2962 	 * Here we get a CWR from the peer. We must look in the outqueue and
2963 	 * make sure that we have a covered ECNE in teh control chunk part.
2964 	 * If so remove it.
2965 	 */
2966 	struct sctp_tmit_chunk *chk;
2967 	struct sctp_ecne_chunk *ecne;
2968 
2969 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2970 		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2971 			continue;
2972 		}
2973 		/*
2974 		 * Look for and remove if it is the right TSN. Since there
2975 		 * is only ONE ECNE on the control queue at any one time we
2976 		 * don't need to worry about more than one!
2977 		 */
2978 		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2979 		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2980 		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2981 			/* this covers this ECNE, we can remove it */
2982 			stcb->asoc.ecn_echo_cnt_onq--;
2983 			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2984 			    sctp_next);
2985 			if (chk->data) {
2986 				sctp_m_freem(chk->data);
2987 				chk->data = NULL;
2988 			}
2989 			stcb->asoc.ctrl_queue_cnt--;
2990 			sctp_free_a_chunk(stcb, chk);
2991 			break;
2992 		}
2993 	}
2994 }
2995 
2996 static void
2997 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2998     struct sctp_tcb *stcb, struct sctp_nets *net)
2999 {
3000 	struct sctp_association *asoc;
3001 
3002 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3003 	struct socket *so;
3004 
3005 #endif
3006 
3007 	SCTPDBG(SCTP_DEBUG_INPUT2,
3008 	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3009 	if (stcb == NULL)
3010 		return;
3011 
3012 	asoc = &stcb->asoc;
3013 	/* process according to association state */
3014 	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3015 		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3016 		SCTPDBG(SCTP_DEBUG_INPUT2,
3017 		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3018 		SCTP_TCB_UNLOCK(stcb);
3019 		return;
3020 	}
3021 	/* notify upper layer protocol */
3022 	if (stcb->sctp_socket) {
3023 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3024 		/* are the queues empty? they should be */
3025 		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3026 		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3027 		    !TAILQ_EMPTY(&asoc->out_wheel)) {
3028 			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3029 		}
3030 	}
3031 	/* stop the timer */
3032 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3033 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3034 	/* free the TCB */
3035 	SCTPDBG(SCTP_DEBUG_INPUT2,
3036 	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3037 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3038 	so = SCTP_INP_SO(stcb->sctp_ep);
3039 	atomic_add_int(&stcb->asoc.refcnt, 1);
3040 	SCTP_TCB_UNLOCK(stcb);
3041 	SCTP_SOCKET_LOCK(so, 1);
3042 	SCTP_TCB_LOCK(stcb);
3043 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3044 #endif
3045 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3046 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3047 	SCTP_SOCKET_UNLOCK(so, 1);
3048 #endif
3049 	return;
3050 }
3051 
3052 static int
3053 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3054     struct sctp_nets *net, uint8_t flg)
3055 {
3056 	switch (desc->chunk_type) {
3057 		case SCTP_DATA:
3058 		/* find the tsn to resend (possibly */
3059 		{
3060 			uint32_t tsn;
3061 			struct sctp_tmit_chunk *tp1;
3062 
3063 			tsn = ntohl(desc->tsn_ifany);
3064 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3065 			while (tp1) {
3066 				if (tp1->rec.data.TSN_seq == tsn) {
3067 					/* found it */
3068 					break;
3069 				}
3070 				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
3071 				    MAX_TSN)) {
3072 					/* not found */
3073 					tp1 = NULL;
3074 					break;
3075 				}
3076 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3077 			}
3078 			if (tp1 == NULL) {
3079 				/*
3080 				 * Do it the other way , aka without paying
3081 				 * attention to queue seq order.
3082 				 */
3083 				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3084 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3085 				while (tp1) {
3086 					if (tp1->rec.data.TSN_seq == tsn) {
3087 						/* found it */
3088 						break;
3089 					}
3090 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3091 				}
3092 			}
3093 			if (tp1 == NULL) {
3094 				SCTP_STAT_INCR(sctps_pdrptsnnf);
3095 			}
3096 			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3097 				uint8_t *ddp;
3098 
3099 				if ((stcb->asoc.peers_rwnd == 0) &&
3100 				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3101 					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3102 					return (0);
3103 				}
3104 				if (stcb->asoc.peers_rwnd == 0 &&
3105 				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3106 					SCTP_STAT_INCR(sctps_pdrpdizrw);
3107 					return (0);
3108 				}
3109 				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3110 				    sizeof(struct sctp_data_chunk));
3111 				{
3112 					unsigned int iii;
3113 
3114 					for (iii = 0; iii < sizeof(desc->data_bytes);
3115 					    iii++) {
3116 						if (ddp[iii] != desc->data_bytes[iii]) {
3117 							SCTP_STAT_INCR(sctps_pdrpbadd);
3118 							return (-1);
3119 						}
3120 					}
3121 				}
3122 				/*
3123 				 * We zero out the nonce so resync not
3124 				 * needed
3125 				 */
3126 				tp1->rec.data.ect_nonce = 0;
3127 
3128 				if (tp1->do_rtt) {
3129 					/*
3130 					 * this guy had a RTO calculation
3131 					 * pending on it, cancel it
3132 					 */
3133 					tp1->do_rtt = 0;
3134 				}
3135 				SCTP_STAT_INCR(sctps_pdrpmark);
3136 				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3137 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3138 				tp1->sent = SCTP_DATAGRAM_RESEND;
3139 				/*
3140 				 * mark it as if we were doing a FR, since
3141 				 * we will be getting gap ack reports behind
3142 				 * the info from the router.
3143 				 */
3144 				tp1->rec.data.doing_fast_retransmit = 1;
3145 				/*
3146 				 * mark the tsn with what sequences can
3147 				 * cause a new FR.
3148 				 */
3149 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3150 					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3151 				} else {
3152 					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3153 				}
3154 
3155 				/* restart the timer */
3156 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3157 				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3158 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3159 				    stcb, tp1->whoTo);
3160 
3161 				/* fix counts and things */
3162 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3163 					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3164 					    tp1->whoTo->flight_size,
3165 					    tp1->book_size,
3166 					    (uintptr_t) stcb,
3167 					    tp1->rec.data.TSN_seq);
3168 				}
3169 				sctp_flight_size_decrease(tp1);
3170 				sctp_total_flight_decrease(stcb, tp1);
3171 			} {
3172 				/* audit code */
3173 				unsigned int audit;
3174 
3175 				audit = 0;
3176 				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3177 					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3178 						audit++;
3179 				}
3180 				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3181 				    sctp_next) {
3182 					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3183 						audit++;
3184 				}
3185 				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3186 					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3187 					    audit, stcb->asoc.sent_queue_retran_cnt);
3188 #ifndef SCTP_AUDITING_ENABLED
3189 					stcb->asoc.sent_queue_retran_cnt = audit;
3190 #endif
3191 				}
3192 			}
3193 		}
3194 		break;
3195 	case SCTP_ASCONF:
3196 		{
3197 			struct sctp_tmit_chunk *asconf;
3198 
3199 			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3200 			    sctp_next) {
3201 				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3202 					break;
3203 				}
3204 			}
3205 			if (asconf) {
3206 				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3207 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3208 				asconf->sent = SCTP_DATAGRAM_RESEND;
3209 				asconf->snd_count--;
3210 			}
3211 		}
3212 		break;
3213 	case SCTP_INITIATION:
3214 		/* resend the INIT */
3215 		stcb->asoc.dropped_special_cnt++;
3216 		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3217 			/*
3218 			 * If we can get it in, in a few attempts we do
3219 			 * this, otherwise we let the timer fire.
3220 			 */
3221 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3222 			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3223 			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3224 		}
3225 		break;
3226 	case SCTP_SELECTIVE_ACK:
3227 		/* resend the sack */
3228 		sctp_send_sack(stcb);
3229 		break;
3230 		/* EY for nr_sacks */
3231 	case SCTP_NR_SELECTIVE_ACK:
3232 		sctp_send_nr_sack(stcb);	/* EY resend the nr-sack */
3233 		break;
3234 	case SCTP_HEARTBEAT_REQUEST:
3235 		/* resend a demand HB */
3236 		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3237 			/*
3238 			 * Only retransmit if we KNOW we wont destroy the
3239 			 * tcb
3240 			 */
3241 			(void)sctp_send_hb(stcb, 1, net);
3242 		}
3243 		break;
3244 	case SCTP_SHUTDOWN:
3245 		sctp_send_shutdown(stcb, net);
3246 		break;
3247 	case SCTP_SHUTDOWN_ACK:
3248 		sctp_send_shutdown_ack(stcb, net);
3249 		break;
3250 	case SCTP_COOKIE_ECHO:
3251 		{
3252 			struct sctp_tmit_chunk *cookie;
3253 
3254 			cookie = NULL;
3255 			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3256 			    sctp_next) {
3257 				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3258 					break;
3259 				}
3260 			}
3261 			if (cookie) {
3262 				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3263 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3264 				cookie->sent = SCTP_DATAGRAM_RESEND;
3265 				sctp_stop_all_cookie_timers(stcb);
3266 			}
3267 		}
3268 		break;
3269 	case SCTP_COOKIE_ACK:
3270 		sctp_send_cookie_ack(stcb);
3271 		break;
3272 	case SCTP_ASCONF_ACK:
3273 		/* resend last asconf ack */
3274 		sctp_send_asconf_ack(stcb);
3275 		break;
3276 	case SCTP_FORWARD_CUM_TSN:
3277 		send_forward_tsn(stcb, &stcb->asoc);
3278 		break;
3279 		/* can't do anything with these */
3280 	case SCTP_PACKET_DROPPED:
3281 	case SCTP_INITIATION_ACK:	/* this should not happen */
3282 	case SCTP_HEARTBEAT_ACK:
3283 	case SCTP_ABORT_ASSOCIATION:
3284 	case SCTP_OPERATION_ERROR:
3285 	case SCTP_SHUTDOWN_COMPLETE:
3286 	case SCTP_ECN_ECHO:
3287 	case SCTP_ECN_CWR:
3288 	default:
3289 		break;
3290 	}
3291 	return (0);
3292 }
3293 
3294 void
3295 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3296 {
3297 	int i;
3298 	uint16_t temp;
3299 
3300 	/*
3301 	 * We set things to 0xffff since this is the last delivered sequence
3302 	 * and we will be sending in 0 after the reset.
3303 	 */
3304 
3305 	if (number_entries) {
3306 		for (i = 0; i < number_entries; i++) {
3307 			temp = ntohs(list[i]);
3308 			if (temp >= stcb->asoc.streamincnt) {
3309 				continue;
3310 			}
3311 			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3312 		}
3313 	} else {
3314 		list = NULL;
3315 		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3316 			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3317 		}
3318 	}
3319 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3320 }
3321 
3322 static void
3323 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3324 {
3325 	int i;
3326 
3327 	if (number_entries == 0) {
3328 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3329 			stcb->asoc.strmout[i].next_sequence_sent = 0;
3330 		}
3331 	} else if (number_entries) {
3332 		for (i = 0; i < number_entries; i++) {
3333 			uint16_t temp;
3334 
3335 			temp = ntohs(list[i]);
3336 			if (temp >= stcb->asoc.streamoutcnt) {
3337 				/* no such stream */
3338 				continue;
3339 			}
3340 			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3341 		}
3342 	}
3343 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3344 }
3345 
3346 
3347 struct sctp_stream_reset_out_request *
3348 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3349 {
3350 	struct sctp_association *asoc;
3351 	struct sctp_stream_reset_out_req *req;
3352 	struct sctp_stream_reset_out_request *r;
3353 	struct sctp_tmit_chunk *chk;
3354 	int len, clen;
3355 
3356 	asoc = &stcb->asoc;
3357 	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3358 		asoc->stream_reset_outstanding = 0;
3359 		return (NULL);
3360 	}
3361 	if (stcb->asoc.str_reset == NULL) {
3362 		asoc->stream_reset_outstanding = 0;
3363 		return (NULL);
3364 	}
3365 	chk = stcb->asoc.str_reset;
3366 	if (chk->data == NULL) {
3367 		return (NULL);
3368 	}
3369 	if (bchk) {
3370 		/* he wants a copy of the chk pointer */
3371 		*bchk = chk;
3372 	}
3373 	clen = chk->send_size;
3374 	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3375 	r = &req->sr_req;
3376 	if (ntohl(r->request_seq) == seq) {
3377 		/* found it */
3378 		return (r);
3379 	}
3380 	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3381 	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3382 		/* move to the next one, there can only be a max of two */
3383 		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3384 		if (ntohl(r->request_seq) == seq) {
3385 			return (r);
3386 		}
3387 	}
3388 	/* that seq is not here */
3389 	return (NULL);
3390 }
3391 
3392 static void
3393 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3394 {
3395 	struct sctp_association *asoc;
3396 	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3397 
3398 	if (stcb->asoc.str_reset == NULL) {
3399 		return;
3400 	}
3401 	asoc = &stcb->asoc;
3402 
3403 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3404 	TAILQ_REMOVE(&asoc->control_send_queue,
3405 	    chk,
3406 	    sctp_next);
3407 	if (chk->data) {
3408 		sctp_m_freem(chk->data);
3409 		chk->data = NULL;
3410 	}
3411 	asoc->ctrl_queue_cnt--;
3412 	sctp_free_a_chunk(stcb, chk);
3413 	/* sa_ignore NO_NULL_CHK */
3414 	stcb->asoc.str_reset = NULL;
3415 }
3416 
3417 
3418 static int
3419 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3420     uint32_t seq, uint32_t action,
3421     struct sctp_stream_reset_response *respin)
3422 {
3423 	uint16_t type;
3424 	int lparm_len;
3425 	struct sctp_association *asoc = &stcb->asoc;
3426 	struct sctp_tmit_chunk *chk;
3427 	struct sctp_stream_reset_out_request *srparam;
3428 	int number_entries;
3429 
3430 	if (asoc->stream_reset_outstanding == 0) {
3431 		/* duplicate */
3432 		return (0);
3433 	}
3434 	if (seq == stcb->asoc.str_reset_seq_out) {
3435 		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3436 		if (srparam) {
3437 			stcb->asoc.str_reset_seq_out++;
3438 			type = ntohs(srparam->ph.param_type);
3439 			lparm_len = ntohs(srparam->ph.param_length);
3440 			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3441 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3442 				asoc->stream_reset_out_is_outstanding = 0;
3443 				if (asoc->stream_reset_outstanding)
3444 					asoc->stream_reset_outstanding--;
3445 				if (action == SCTP_STREAM_RESET_PERFORMED) {
3446 					/* do it */
3447 					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3448 				} else {
3449 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3450 				}
3451 			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3452 				/* Answered my request */
3453 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3454 				if (asoc->stream_reset_outstanding)
3455 					asoc->stream_reset_outstanding--;
3456 				if (action != SCTP_STREAM_RESET_PERFORMED) {
3457 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3458 				}
3459 			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3460 				/**
3461 				 * a) Adopt the new in tsn.
3462 				 * b) reset the map
3463 				 * c) Adopt the new out-tsn
3464 				 */
3465 				struct sctp_stream_reset_response_tsn *resp;
3466 				struct sctp_forward_tsn_chunk fwdtsn;
3467 				int abort_flag = 0;
3468 
3469 				if (respin == NULL) {
3470 					/* huh ? */
3471 					return (0);
3472 				}
3473 				if (action == SCTP_STREAM_RESET_PERFORMED) {
3474 					resp = (struct sctp_stream_reset_response_tsn *)respin;
3475 					asoc->stream_reset_outstanding--;
3476 					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3477 					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3478 					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3479 					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3480 					if (abort_flag) {
3481 						return (1);
3482 					}
3483 					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3484 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3485 						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3486 					}
3487 					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3488 					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3489 					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3490 
3491 					/*
3492 					 * EY 05/13/08 - nr_sack: to keep
3493 					 * nr_mapping array be consistent
3494 					 * with mapping_array
3495 					 */
3496 					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3497 						stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3498 						stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
3499 						memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3500 					}
3501 					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3502 					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3503 
3504 					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3505 					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3506 
3507 				}
3508 			}
3509 			/* get rid of the request and get the request flags */
3510 			if (asoc->stream_reset_outstanding == 0) {
3511 				sctp_clean_up_stream_reset(stcb);
3512 			}
3513 		}
3514 	}
3515 	return (0);
3516 }
3517 
3518 static void
3519 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3520     struct sctp_tmit_chunk *chk,
3521     struct sctp_stream_reset_in_request *req, int trunc)
3522 {
3523 	uint32_t seq;
3524 	int len, i;
3525 	int number_entries;
3526 	uint16_t temp;
3527 
3528 	/*
3529 	 * peer wants me to send a str-reset to him for my outgoing seq's if
3530 	 * seq_in is right.
3531 	 */
3532 	struct sctp_association *asoc = &stcb->asoc;
3533 
3534 	seq = ntohl(req->request_seq);
3535 	if (asoc->str_reset_seq_in == seq) {
3536 		if (trunc) {
3537 			/* Can't do it, since they exceeded our buffer size  */
3538 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3539 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3540 			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3541 		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3542 			len = ntohs(req->ph.param_length);
3543 			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3544 			for (i = 0; i < number_entries; i++) {
3545 				temp = ntohs(req->list_of_streams[i]);
3546 				req->list_of_streams[i] = temp;
3547 			}
3548 			/* move the reset action back one */
3549 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3550 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3551 			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3552 			    asoc->str_reset_seq_out,
3553 			    seq, (asoc->sending_seq - 1));
3554 			asoc->stream_reset_out_is_outstanding = 1;
3555 			asoc->str_reset = chk;
3556 			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3557 			stcb->asoc.stream_reset_outstanding++;
3558 		} else {
3559 			/* Can't do it, since we have sent one out */
3560 			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3561 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3562 			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3563 		}
3564 		asoc->str_reset_seq_in++;
3565 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3566 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3567 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3568 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3569 	} else {
3570 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3571 	}
3572 }
3573 
3574 static int
3575 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3576     struct sctp_tmit_chunk *chk,
3577     struct sctp_stream_reset_tsn_request *req)
3578 {
3579 	/* reset all in and out and update the tsn */
3580 	/*
3581 	 * A) reset my str-seq's on in and out. B) Select a receive next,
3582 	 * and set cum-ack to it. Also process this selected number as a
3583 	 * fwd-tsn as well. C) set in the response my next sending seq.
3584 	 */
3585 	struct sctp_forward_tsn_chunk fwdtsn;
3586 	struct sctp_association *asoc = &stcb->asoc;
3587 	int abort_flag = 0;
3588 	uint32_t seq;
3589 
3590 	seq = ntohl(req->request_seq);
3591 	if (asoc->str_reset_seq_in == seq) {
3592 		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3593 		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3594 		fwdtsn.ch.chunk_flags = 0;
3595 		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3596 		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3597 		if (abort_flag) {
3598 			return (1);
3599 		}
3600 		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3601 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3602 			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3603 		}
3604 		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3605 		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3606 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3607 		/*
3608 		 * EY 05/13/08 -nr_sack: to keep nr_mapping array consistent
3609 		 * with mapping array
3610 		 */
3611 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3612 			stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3613 			stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3614 			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3615 		}
3616 		atomic_add_int(&stcb->asoc.sending_seq, 1);
3617 		/* save off historical data for retrans */
3618 		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3619 		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3620 		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3621 		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3622 
3623 		sctp_add_stream_reset_result_tsn(chk,
3624 		    ntohl(req->request_seq),
3625 		    SCTP_STREAM_RESET_PERFORMED,
3626 		    stcb->asoc.sending_seq,
3627 		    stcb->asoc.mapping_array_base_tsn);
3628 		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3629 		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3630 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3631 		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3632 
3633 		asoc->str_reset_seq_in++;
3634 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3635 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3636 		    stcb->asoc.last_sending_seq[0],
3637 		    stcb->asoc.last_base_tsnsent[0]
3638 		    );
3639 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3640 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3641 		    stcb->asoc.last_sending_seq[1],
3642 		    stcb->asoc.last_base_tsnsent[1]
3643 		    );
3644 	} else {
3645 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3646 	}
3647 	return (0);
3648 }
3649 
3650 static void
3651 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3652     struct sctp_tmit_chunk *chk,
3653     struct sctp_stream_reset_out_request *req, int trunc)
3654 {
3655 	uint32_t seq, tsn;
3656 	int number_entries, len;
3657 	struct sctp_association *asoc = &stcb->asoc;
3658 
3659 	seq = ntohl(req->request_seq);
3660 
3661 	/* now if its not a duplicate we process it */
3662 	if (asoc->str_reset_seq_in == seq) {
3663 		len = ntohs(req->ph.param_length);
3664 		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3665 		/*
3666 		 * the sender is resetting, handle the list issue.. we must
3667 		 * a) verify if we can do the reset, if so no problem b) If
3668 		 * we can't do the reset we must copy the request. c) queue
3669 		 * it, and setup the data in processor to trigger it off
3670 		 * when needed and dequeue all the queued data.
3671 		 */
3672 		tsn = ntohl(req->send_reset_at_tsn);
3673 
3674 		/* move the reset action back one */
3675 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3676 		if (trunc) {
3677 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3678 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3679 		} else if ((tsn == asoc->cumulative_tsn) ||
3680 		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3681 			/* we can do it now */
3682 			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3683 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3684 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3685 		} else {
3686 			/*
3687 			 * we must queue it up and thus wait for the TSN's
3688 			 * to arrive that are at or before tsn
3689 			 */
3690 			struct sctp_stream_reset_list *liste;
3691 			int siz;
3692 
3693 			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3694 			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3695 			    siz, SCTP_M_STRESET);
3696 			if (liste == NULL) {
3697 				/* gak out of memory */
3698 				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3699 				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3700 				return;
3701 			}
3702 			liste->tsn = tsn;
3703 			liste->number_entries = number_entries;
3704 			memcpy(&liste->req, req,
3705 			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3706 			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3707 			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3708 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3709 		}
3710 		asoc->str_reset_seq_in++;
3711 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3712 		/*
3713 		 * one seq back, just echo back last action since my
3714 		 * response was lost.
3715 		 */
3716 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3717 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3718 		/*
3719 		 * two seq back, just echo back last action since my
3720 		 * response was lost.
3721 		 */
3722 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3723 	} else {
3724 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3725 	}
3726 }
3727 
3728 #ifdef __GNUC__
3729 __attribute__((noinline))
3730 #endif
3731 	static int
3732 	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3733         struct sctp_stream_reset_out_req *sr_req)
3734 {
3735 	int chk_length, param_len, ptype;
3736 	struct sctp_paramhdr pstore;
3737 	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3738 
3739 	uint32_t seq;
3740 	int num_req = 0;
3741 	int trunc = 0;
3742 	struct sctp_tmit_chunk *chk;
3743 	struct sctp_chunkhdr *ch;
3744 	struct sctp_paramhdr *ph;
3745 	int ret_code = 0;
3746 	int num_param = 0;
3747 
3748 	/* now it may be a reset or a reset-response */
3749 	chk_length = ntohs(sr_req->ch.chunk_length);
3750 
3751 	/* setup for adding the response */
3752 	sctp_alloc_a_chunk(stcb, chk);
3753 	if (chk == NULL) {
3754 		return (ret_code);
3755 	}
3756 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3757 	chk->rec.chunk_id.can_take_data = 0;
3758 	chk->asoc = &stcb->asoc;
3759 	chk->no_fr_allowed = 0;
3760 	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3761 	chk->book_size_scale = 0;
3762 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3763 	if (chk->data == NULL) {
3764 strres_nochunk:
3765 		if (chk->data) {
3766 			sctp_m_freem(chk->data);
3767 			chk->data = NULL;
3768 		}
3769 		sctp_free_a_chunk(stcb, chk);
3770 		return (ret_code);
3771 	}
3772 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3773 
3774 	/* setup chunk parameters */
3775 	chk->sent = SCTP_DATAGRAM_UNSENT;
3776 	chk->snd_count = 0;
3777 	chk->whoTo = stcb->asoc.primary_destination;
3778 	atomic_add_int(&chk->whoTo->ref_count, 1);
3779 
3780 	ch = mtod(chk->data, struct sctp_chunkhdr *);
3781 	ch->chunk_type = SCTP_STREAM_RESET;
3782 	ch->chunk_flags = 0;
3783 	ch->chunk_length = htons(chk->send_size);
3784 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3785 	offset += sizeof(struct sctp_chunkhdr);
3786 	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3787 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3788 		if (ph == NULL)
3789 			break;
3790 		param_len = ntohs(ph->param_length);
3791 		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3792 			/* bad param */
3793 			break;
3794 		}
3795 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3796 		    (uint8_t *) & cstore);
3797 		ptype = ntohs(ph->param_type);
3798 		num_param++;
3799 		if (param_len > (int)sizeof(cstore)) {
3800 			trunc = 1;
3801 		} else {
3802 			trunc = 0;
3803 		}
3804 
3805 		if (num_param > SCTP_MAX_RESET_PARAMS) {
3806 			/* hit the max of parameters already sorry.. */
3807 			break;
3808 		}
3809 		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3810 			struct sctp_stream_reset_out_request *req_out;
3811 
3812 			req_out = (struct sctp_stream_reset_out_request *)ph;
3813 			num_req++;
3814 			if (stcb->asoc.stream_reset_outstanding) {
3815 				seq = ntohl(req_out->response_seq);
3816 				if (seq == stcb->asoc.str_reset_seq_out) {
3817 					/* implicit ack */
3818 					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3819 				}
3820 			}
3821 			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3822 		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3823 			struct sctp_stream_reset_in_request *req_in;
3824 
3825 			num_req++;
3826 
3827 			req_in = (struct sctp_stream_reset_in_request *)ph;
3828 
3829 			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3830 		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3831 			struct sctp_stream_reset_tsn_request *req_tsn;
3832 
3833 			num_req++;
3834 			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3835 
3836 			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3837 				ret_code = 1;
3838 				goto strres_nochunk;
3839 			}
3840 			/* no more */
3841 			break;
3842 		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3843 			struct sctp_stream_reset_response *resp;
3844 			uint32_t result;
3845 
3846 			resp = (struct sctp_stream_reset_response *)ph;
3847 			seq = ntohl(resp->response_seq);
3848 			result = ntohl(resp->result);
3849 			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3850 				ret_code = 1;
3851 				goto strres_nochunk;
3852 			}
3853 		} else {
3854 			break;
3855 		}
3856 		offset += SCTP_SIZE32(param_len);
3857 		chk_length -= SCTP_SIZE32(param_len);
3858 	}
3859 	if (num_req == 0) {
3860 		/* we have no response free the stuff */
3861 		goto strres_nochunk;
3862 	}
3863 	/* ok we have a chunk to link in */
3864 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3865 	    chk,
3866 	    sctp_next);
3867 	stcb->asoc.ctrl_queue_cnt++;
3868 	return (ret_code);
3869 }
3870 
3871 /*
3872  * Handle a router or endpoints report of a packet loss, there are two ways
3873  * to handle this, either we get the whole packet and must disect it
3874  * ourselves (possibly with truncation and or corruption) or it is a summary
3875  * from a middle box that did the disectting for us.
3876  */
3877 static void
3878 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3879     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3880 {
3881 	uint32_t bottle_bw, on_queue;
3882 	uint16_t trunc_len;
3883 	unsigned int chlen;
3884 	unsigned int at;
3885 	struct sctp_chunk_desc desc;
3886 	struct sctp_chunkhdr *ch;
3887 
3888 	chlen = ntohs(cp->ch.chunk_length);
3889 	chlen -= sizeof(struct sctp_pktdrop_chunk);
3890 	/* XXX possible chlen underflow */
3891 	if (chlen == 0) {
3892 		ch = NULL;
3893 		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3894 			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3895 	} else {
3896 		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3897 		chlen -= sizeof(struct sctphdr);
3898 		/* XXX possible chlen underflow */
3899 		memset(&desc, 0, sizeof(desc));
3900 	}
3901 	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3902 	if (trunc_len > limit) {
3903 		trunc_len = limit;
3904 	}
3905 	/* now the chunks themselves */
3906 	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3907 		desc.chunk_type = ch->chunk_type;
3908 		/* get amount we need to move */
3909 		at = ntohs(ch->chunk_length);
3910 		if (at < sizeof(struct sctp_chunkhdr)) {
3911 			/* corrupt chunk, maybe at the end? */
3912 			SCTP_STAT_INCR(sctps_pdrpcrupt);
3913 			break;
3914 		}
3915 		if (trunc_len == 0) {
3916 			/* we are supposed to have all of it */
3917 			if (at > chlen) {
3918 				/* corrupt skip it */
3919 				SCTP_STAT_INCR(sctps_pdrpcrupt);
3920 				break;
3921 			}
3922 		} else {
3923 			/* is there enough of it left ? */
3924 			if (desc.chunk_type == SCTP_DATA) {
3925 				if (chlen < (sizeof(struct sctp_data_chunk) +
3926 				    sizeof(desc.data_bytes))) {
3927 					break;
3928 				}
3929 			} else {
3930 				if (chlen < sizeof(struct sctp_chunkhdr)) {
3931 					break;
3932 				}
3933 			}
3934 		}
3935 		if (desc.chunk_type == SCTP_DATA) {
3936 			/* can we get out the tsn? */
3937 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3938 				SCTP_STAT_INCR(sctps_pdrpmbda);
3939 
3940 			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3941 				/* yep */
3942 				struct sctp_data_chunk *dcp;
3943 				uint8_t *ddp;
3944 				unsigned int iii;
3945 
3946 				dcp = (struct sctp_data_chunk *)ch;
3947 				ddp = (uint8_t *) (dcp + 1);
3948 				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3949 					desc.data_bytes[iii] = ddp[iii];
3950 				}
3951 				desc.tsn_ifany = dcp->dp.tsn;
3952 			} else {
3953 				/* nope we are done. */
3954 				SCTP_STAT_INCR(sctps_pdrpnedat);
3955 				break;
3956 			}
3957 		} else {
3958 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3959 				SCTP_STAT_INCR(sctps_pdrpmbct);
3960 		}
3961 
3962 		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3963 			SCTP_STAT_INCR(sctps_pdrppdbrk);
3964 			break;
3965 		}
3966 		if (SCTP_SIZE32(at) > chlen) {
3967 			break;
3968 		}
3969 		chlen -= SCTP_SIZE32(at);
3970 		if (chlen < sizeof(struct sctp_chunkhdr)) {
3971 			/* done, none left */
3972 			break;
3973 		}
3974 		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3975 	}
3976 	/* Now update any rwnd --- possibly */
3977 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3978 		/* From a peer, we get a rwnd report */
3979 		uint32_t a_rwnd;
3980 
3981 		SCTP_STAT_INCR(sctps_pdrpfehos);
3982 
3983 		bottle_bw = ntohl(cp->bottle_bw);
3984 		on_queue = ntohl(cp->current_onq);
3985 		if (bottle_bw && on_queue) {
3986 			/* a rwnd report is in here */
3987 			if (bottle_bw > on_queue)
3988 				a_rwnd = bottle_bw - on_queue;
3989 			else
3990 				a_rwnd = 0;
3991 
3992 			if (a_rwnd == 0)
3993 				stcb->asoc.peers_rwnd = 0;
3994 			else {
3995 				if (a_rwnd > stcb->asoc.total_flight) {
3996 					stcb->asoc.peers_rwnd =
3997 					    a_rwnd - stcb->asoc.total_flight;
3998 				} else {
3999 					stcb->asoc.peers_rwnd = 0;
4000 				}
4001 				if (stcb->asoc.peers_rwnd <
4002 				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4003 					/* SWS sender side engages */
4004 					stcb->asoc.peers_rwnd = 0;
4005 				}
4006 			}
4007 		}
4008 	} else {
4009 		SCTP_STAT_INCR(sctps_pdrpfmbox);
4010 	}
4011 
4012 	/* now middle boxes in sat networks get a cwnd bump */
4013 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4014 	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4015 	    (stcb->asoc.sat_network)) {
4016 		/*
4017 		 * This is debateable but for sat networks it makes sense
4018 		 * Note if a T3 timer has went off, we will prohibit any
4019 		 * changes to cwnd until we exit the t3 loss recovery.
4020 		 */
4021 		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4022 		    net, cp, &bottle_bw, &on_queue);
4023 	}
4024 }
4025 
4026 /*
4027  * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4028  * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4029  * offset: offset into the mbuf chain to first chunkhdr - length: is the
4030  * length of the complete packet outputs: - length: modified to remaining
4031  * length after control processing - netp: modified to new sctp_nets after
4032  * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4033  * bad packet,...) otherwise return the tcb for this packet
4034  */
4035 #ifdef __GNUC__
4036 __attribute__((noinline))
4037 #endif
4038 	static struct sctp_tcb *
4039 	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4040              struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4041              struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4042              uint32_t vrf_id, uint16_t port)
4043 {
4044 	struct sctp_association *asoc;
4045 	uint32_t vtag_in;
4046 	int num_chunks = 0;	/* number of control chunks processed */
4047 	uint32_t chk_length;
4048 	int ret;
4049 	int abort_no_unlock = 0;
4050 
4051 	/*
4052 	 * How big should this be, and should it be alloc'd? Lets try the
4053 	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4054 	 * until we get into jumbo grams and such..
4055 	 */
4056 	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4057 	struct sctp_tcb *locked_tcb = stcb;
4058 	int got_auth = 0;
4059 	uint32_t auth_offset = 0, auth_len = 0;
4060 	int auth_skipped = 0;
4061 	int asconf_cnt = 0;
4062 
4063 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4064 	struct socket *so;
4065 
4066 #endif
4067 
4068 	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4069 	    iphlen, *offset, length, stcb);
4070 
4071 	/* validate chunk header length... */
4072 	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4073 		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4074 		    ntohs(ch->chunk_length));
4075 		if (locked_tcb) {
4076 			SCTP_TCB_UNLOCK(locked_tcb);
4077 		}
4078 		return (NULL);
4079 	}
4080 	/*
4081 	 * validate the verification tag
4082 	 */
4083 	vtag_in = ntohl(sh->v_tag);
4084 
4085 	if (locked_tcb) {
4086 		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4087 	}
4088 	if (ch->chunk_type == SCTP_INITIATION) {
4089 		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4090 		    ntohs(ch->chunk_length), vtag_in);
4091 		if (vtag_in != 0) {
4092 			/* protocol error- silently discard... */
4093 			SCTP_STAT_INCR(sctps_badvtag);
4094 			if (locked_tcb) {
4095 				SCTP_TCB_UNLOCK(locked_tcb);
4096 			}
4097 			return (NULL);
4098 		}
4099 	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4100 		/*
4101 		 * If there is no stcb, skip the AUTH chunk and process
4102 		 * later after a stcb is found (to validate the lookup was
4103 		 * valid.
4104 		 */
4105 		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4106 		    (stcb == NULL) &&
4107 		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4108 			/* save this chunk for later processing */
4109 			auth_skipped = 1;
4110 			auth_offset = *offset;
4111 			auth_len = ntohs(ch->chunk_length);
4112 
4113 			/* (temporarily) move past this chunk */
4114 			*offset += SCTP_SIZE32(auth_len);
4115 			if (*offset >= length) {
4116 				/* no more data left in the mbuf chain */
4117 				*offset = length;
4118 				if (locked_tcb) {
4119 					SCTP_TCB_UNLOCK(locked_tcb);
4120 				}
4121 				return (NULL);
4122 			}
4123 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4124 			    sizeof(struct sctp_chunkhdr), chunk_buf);
4125 		}
4126 		if (ch == NULL) {
4127 			/* Help */
4128 			*offset = length;
4129 			if (locked_tcb) {
4130 				SCTP_TCB_UNLOCK(locked_tcb);
4131 			}
4132 			return (NULL);
4133 		}
4134 		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4135 			goto process_control_chunks;
4136 		}
4137 		/*
4138 		 * first check if it's an ASCONF with an unknown src addr we
4139 		 * need to look inside to find the association
4140 		 */
4141 		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4142 			struct sctp_chunkhdr *asconf_ch = ch;
4143 			uint32_t asconf_offset = 0, asconf_len = 0;
4144 
4145 			/* inp's refcount may be reduced */
4146 			SCTP_INP_INCR_REF(inp);
4147 
4148 			asconf_offset = *offset;
4149 			do {
4150 				asconf_len = ntohs(asconf_ch->chunk_length);
4151 				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4152 					break;
4153 				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4154 				    *offset, sh, &inp, netp, vrf_id);
4155 				if (stcb != NULL)
4156 					break;
4157 				asconf_offset += SCTP_SIZE32(asconf_len);
4158 				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4159 				    sizeof(struct sctp_chunkhdr), chunk_buf);
4160 			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4161 			if (stcb == NULL) {
4162 				/*
4163 				 * reduce inp's refcount if not reduced in
4164 				 * sctp_findassociation_ep_asconf().
4165 				 */
4166 				SCTP_INP_DECR_REF(inp);
4167 			} else {
4168 				locked_tcb = stcb;
4169 			}
4170 
4171 			/* now go back and verify any auth chunk to be sure */
4172 			if (auth_skipped && (stcb != NULL)) {
4173 				struct sctp_auth_chunk *auth;
4174 
4175 				auth = (struct sctp_auth_chunk *)
4176 				    sctp_m_getptr(m, auth_offset,
4177 				    auth_len, chunk_buf);
4178 				got_auth = 1;
4179 				auth_skipped = 0;
4180 				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4181 				    auth_offset)) {
4182 					/* auth HMAC failed so dump it */
4183 					*offset = length;
4184 					if (locked_tcb) {
4185 						SCTP_TCB_UNLOCK(locked_tcb);
4186 					}
4187 					return (NULL);
4188 				} else {
4189 					/* remaining chunks are HMAC checked */
4190 					stcb->asoc.authenticated = 1;
4191 				}
4192 			}
4193 		}
4194 		if (stcb == NULL) {
4195 			/* no association, so it's out of the blue... */
4196 			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4197 			    vrf_id, port);
4198 			*offset = length;
4199 			if (locked_tcb) {
4200 				SCTP_TCB_UNLOCK(locked_tcb);
4201 			}
4202 			return (NULL);
4203 		}
4204 		asoc = &stcb->asoc;
4205 		/* ABORT and SHUTDOWN can use either v_tag... */
4206 		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4207 		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4208 		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4209 			if ((vtag_in == asoc->my_vtag) ||
4210 			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4211 			    (vtag_in == asoc->peer_vtag))) {
4212 				/* this is valid */
4213 			} else {
4214 				/* drop this packet... */
4215 				SCTP_STAT_INCR(sctps_badvtag);
4216 				if (locked_tcb) {
4217 					SCTP_TCB_UNLOCK(locked_tcb);
4218 				}
4219 				return (NULL);
4220 			}
4221 		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4222 			if (vtag_in != asoc->my_vtag) {
4223 				/*
4224 				 * this could be a stale SHUTDOWN-ACK or the
4225 				 * peer never got the SHUTDOWN-COMPLETE and
4226 				 * is still hung; we have started a new asoc
4227 				 * but it won't complete until the shutdown
4228 				 * is completed
4229 				 */
4230 				if (locked_tcb) {
4231 					SCTP_TCB_UNLOCK(locked_tcb);
4232 				}
4233 				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4234 				    NULL, vrf_id, port);
4235 				return (NULL);
4236 			}
4237 		} else {
4238 			/* for all other chunks, vtag must match */
4239 			if (vtag_in != asoc->my_vtag) {
4240 				/* invalid vtag... */
4241 				SCTPDBG(SCTP_DEBUG_INPUT3,
4242 				    "invalid vtag: %xh, expect %xh\n",
4243 				    vtag_in, asoc->my_vtag);
4244 				SCTP_STAT_INCR(sctps_badvtag);
4245 				if (locked_tcb) {
4246 					SCTP_TCB_UNLOCK(locked_tcb);
4247 				}
4248 				*offset = length;
4249 				return (NULL);
4250 			}
4251 		}
4252 	}			/* end if !SCTP_COOKIE_ECHO */
4253 	/*
4254 	 * process all control chunks...
4255 	 */
4256 	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4257 	/* EY */
4258 	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4259 	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4260 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4261 		/* implied cookie-ack.. we must have lost the ack */
4262 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4263 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4264 			    stcb->asoc.overall_error_count,
4265 			    0,
4266 			    SCTP_FROM_SCTP_INPUT,
4267 			    __LINE__);
4268 		}
4269 		stcb->asoc.overall_error_count = 0;
4270 		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4271 		    *netp);
4272 	}
4273 process_control_chunks:
4274 	while (IS_SCTP_CONTROL(ch)) {
4275 		/* validate chunk length */
4276 		chk_length = ntohs(ch->chunk_length);
4277 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4278 		    ch->chunk_type, chk_length);
4279 		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4280 		if (chk_length < sizeof(*ch) ||
4281 		    (*offset + (int)chk_length) > length) {
4282 			*offset = length;
4283 			if (locked_tcb) {
4284 				SCTP_TCB_UNLOCK(locked_tcb);
4285 			}
4286 			return (NULL);
4287 		}
4288 		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4289 		/*
4290 		 * INIT-ACK only gets the init ack "header" portion only
4291 		 * because we don't have to process the peer's COOKIE. All
4292 		 * others get a complete chunk.
4293 		 */
4294 		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4295 		    (ch->chunk_type == SCTP_INITIATION)) {
4296 			/* get an init-ack chunk */
4297 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4298 			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4299 			if (ch == NULL) {
4300 				*offset = length;
4301 				if (locked_tcb) {
4302 					SCTP_TCB_UNLOCK(locked_tcb);
4303 				}
4304 				return (NULL);
4305 			}
4306 		} else {
4307 			/* For cookies and all other chunks. */
4308 			if (chk_length > sizeof(chunk_buf)) {
4309 				/*
4310 				 * use just the size of the chunk buffer so
4311 				 * the front part of our chunks fit in
4312 				 * contiguous space up to the chunk buffer
4313 				 * size (508 bytes). For chunks that need to
4314 				 * get more than that they must use the
4315 				 * sctp_m_getptr() function or other means
4316 				 * (e.g. know how to parse mbuf chains).
4317 				 * Cookies do this already.
4318 				 */
4319 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4320 				    (sizeof(chunk_buf) - 4),
4321 				    chunk_buf);
4322 				if (ch == NULL) {
4323 					*offset = length;
4324 					if (locked_tcb) {
4325 						SCTP_TCB_UNLOCK(locked_tcb);
4326 					}
4327 					return (NULL);
4328 				}
4329 			} else {
4330 				/* We can fit it all */
4331 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4332 				    chk_length, chunk_buf);
4333 				if (ch == NULL) {
4334 					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4335 					*offset = length;
4336 					if (locked_tcb) {
4337 						SCTP_TCB_UNLOCK(locked_tcb);
4338 					}
4339 					return (NULL);
4340 				}
4341 			}
4342 		}
4343 		num_chunks++;
4344 		/* Save off the last place we got a control from */
4345 		if (stcb != NULL) {
4346 			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4347 				/*
4348 				 * allow last_control to be NULL if
4349 				 * ASCONF... ASCONF processing will find the
4350 				 * right net later
4351 				 */
4352 				if ((netp != NULL) && (*netp != NULL))
4353 					stcb->asoc.last_control_chunk_from = *netp;
4354 			}
4355 		}
4356 #ifdef SCTP_AUDITING_ENABLED
4357 		sctp_audit_log(0xB0, ch->chunk_type);
4358 #endif
4359 
4360 		/* check to see if this chunk required auth, but isn't */
4361 		if ((stcb != NULL) &&
4362 		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4363 		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4364 		    !stcb->asoc.authenticated) {
4365 			/* "silently" ignore */
4366 			SCTP_STAT_INCR(sctps_recvauthmissing);
4367 			goto next_chunk;
4368 		}
4369 		switch (ch->chunk_type) {
4370 		case SCTP_INITIATION:
4371 			/* must be first and only chunk */
4372 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4373 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4374 				/* We are not interested anymore? */
4375 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4376 					/*
4377 					 * collision case where we are
4378 					 * sending to them too
4379 					 */
4380 					;
4381 				} else {
4382 					if (locked_tcb) {
4383 						SCTP_TCB_UNLOCK(locked_tcb);
4384 					}
4385 					*offset = length;
4386 					return (NULL);
4387 				}
4388 			}
4389 			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4390 			    (num_chunks > 1) ||
4391 			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4392 				*offset = length;
4393 				if (locked_tcb) {
4394 					SCTP_TCB_UNLOCK(locked_tcb);
4395 				}
4396 				return (NULL);
4397 			}
4398 			if ((stcb != NULL) &&
4399 			    (SCTP_GET_STATE(&stcb->asoc) ==
4400 			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4401 				sctp_send_shutdown_ack(stcb,
4402 				    stcb->asoc.primary_destination);
4403 				*offset = length;
4404 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4405 				if (locked_tcb) {
4406 					SCTP_TCB_UNLOCK(locked_tcb);
4407 				}
4408 				return (NULL);
4409 			}
4410 			if (netp) {
4411 				sctp_handle_init(m, iphlen, *offset, sh,
4412 				    (struct sctp_init_chunk *)ch, inp,
4413 				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4414 			}
4415 			if (abort_no_unlock)
4416 				return (NULL);
4417 
4418 			*offset = length;
4419 			if (locked_tcb) {
4420 				SCTP_TCB_UNLOCK(locked_tcb);
4421 			}
4422 			return (NULL);
4423 			break;
4424 		case SCTP_PAD_CHUNK:
4425 			break;
4426 		case SCTP_INITIATION_ACK:
4427 			/* must be first and only chunk */
4428 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4429 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4430 				/* We are not interested anymore */
4431 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4432 					;
4433 				} else {
4434 					if (locked_tcb) {
4435 						SCTP_TCB_UNLOCK(locked_tcb);
4436 					}
4437 					*offset = length;
4438 					if (stcb) {
4439 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 						so = SCTP_INP_SO(inp);
4441 						atomic_add_int(&stcb->asoc.refcnt, 1);
4442 						SCTP_TCB_UNLOCK(stcb);
4443 						SCTP_SOCKET_LOCK(so, 1);
4444 						SCTP_TCB_LOCK(stcb);
4445 						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4446 #endif
4447 						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4448 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4449 						SCTP_SOCKET_UNLOCK(so, 1);
4450 #endif
4451 					}
4452 					return (NULL);
4453 				}
4454 			}
4455 			if ((num_chunks > 1) ||
4456 			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4457 				*offset = length;
4458 				if (locked_tcb) {
4459 					SCTP_TCB_UNLOCK(locked_tcb);
4460 				}
4461 				return (NULL);
4462 			}
4463 			if ((netp) && (*netp)) {
4464 				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4465 				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4466 			} else {
4467 				ret = -1;
4468 			}
4469 			/*
4470 			 * Special case, I must call the output routine to
4471 			 * get the cookie echoed
4472 			 */
4473 			if (abort_no_unlock)
4474 				return (NULL);
4475 
4476 			if ((stcb) && ret == 0)
4477 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4478 			*offset = length;
4479 			if (locked_tcb) {
4480 				SCTP_TCB_UNLOCK(locked_tcb);
4481 			}
4482 			return (NULL);
4483 			break;
4484 		case SCTP_SELECTIVE_ACK:
4485 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4486 			SCTP_STAT_INCR(sctps_recvsacks);
4487 			{
4488 				struct sctp_sack_chunk *sack;
4489 				int abort_now = 0;
4490 				uint32_t a_rwnd, cum_ack;
4491 				uint16_t num_seg;
4492 				int nonce_sum_flag;
4493 
4494 				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
4495 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
4496 					*offset = length;
4497 					if (locked_tcb) {
4498 						SCTP_TCB_UNLOCK(locked_tcb);
4499 					}
4500 					return (NULL);
4501 				}
4502 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4503 					/*-
4504 					 * If we have sent a shutdown-ack, we will pay no
4505 					 * attention to a sack sent in to us since
4506 					 * we don't care anymore.
4507 					 */
4508 					break;
4509 				}
4510 				sack = (struct sctp_sack_chunk *)ch;
4511 				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4512 				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4513 				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4514 				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4515 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4516 				    cum_ack,
4517 				    num_seg,
4518 				    a_rwnd
4519 				    );
4520 				stcb->asoc.seen_a_sack_this_pkt = 1;
4521 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4522 				    (num_seg == 0) &&
4523 				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4524 				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4525 				    (stcb->asoc.saw_sack_with_frags == 0) &&
4526 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4527 				    ) {
4528 					/*
4529 					 * We have a SIMPLE sack having no
4530 					 * prior segments and data on sent
4531 					 * queue to be acked.. Use the
4532 					 * faster path sack processing. We
4533 					 * also allow window update sacks
4534 					 * with no missing segments to go
4535 					 * this way too.
4536 					 */
4537 					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4538 					    &abort_now);
4539 				} else {
4540 					if (netp && *netp)
4541 						sctp_handle_sack(m, *offset,
4542 						    sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4543 				}
4544 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4545 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4546 				    (stcb->asoc.stream_queue_cnt == 0)) {
4547 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4548 				}
4549 				if (abort_now) {
4550 					/* ABORT signal from sack processing */
4551 					*offset = length;
4552 					return (NULL);
4553 				}
4554 			}
4555 			break;
4556 			/*
4557 			 * EY - nr_sack:  If the received chunk is an
4558 			 * nr_sack chunk
4559 			 */
4560 		case SCTP_NR_SELECTIVE_ACK:
4561 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4562 			SCTP_STAT_INCR(sctps_recvsacks);
4563 			{
4564 				struct sctp_nr_sack_chunk *nr_sack;
4565 				int abort_now = 0;
4566 				uint32_t a_rwnd, cum_ack;
4567 				uint16_t num_seg, num_nr_seg;
4568 				int nonce_sum_flag, all_bit;
4569 
4570 				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
4571 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
4572 			ignore_nr_sack:
4573 					*offset = length;
4574 					if (locked_tcb) {
4575 						SCTP_TCB_UNLOCK(locked_tcb);
4576 					}
4577 					return (NULL);
4578 				}
4579 				/*
4580 				 * EY nr_sacks have not been negotiated but
4581 				 * the peer end sent an nr_sack, silently
4582 				 * discard the chunk
4583 				 */
4584 				if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
4585 					goto unknown_chunk;
4586 				}
4587 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4588 					/*-
4589 					 * If we have sent a shutdown-ack, we will pay no
4590 					 * attention to a sack sent in to us since
4591 					 * we don't care anymore.
4592 					 */
4593 					goto ignore_nr_sack;
4594 				}
4595 				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4596 				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4597 				all_bit = ch->chunk_flags & SCTP_NR_SACK_ALL_BIT;
4598 
4599 				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4600 				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4601 				/*
4602 				 * EY -if All bit  is set, then there are as
4603 				 * many gaps as nr_gaps
4604 				 */
4605 				if (all_bit) {
4606 					num_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4607 				}
4608 				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4609 				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4610 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4611 				    cum_ack,
4612 				    num_seg,
4613 				    a_rwnd
4614 				    );
4615 				stcb->asoc.seen_a_sack_this_pkt = 1;
4616 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4617 				    (num_seg == 0) &&
4618 				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4619 				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4620 				    (stcb->asoc.saw_sack_with_frags == 0) &&
4621 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4622 				    ) {
4623 					/*
4624 					 * We have a SIMPLE sack having no
4625 					 * prior segments and data on sent
4626 					 * queue to be acked.. Use the
4627 					 * faster path sack processing. We
4628 					 * also allow window update sacks
4629 					 * with no missing segments to go
4630 					 * this way too.
4631 					 */
4632 					sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4633 					    &abort_now);
4634 				} else {
4635 					if (netp && *netp)
4636 						sctp_handle_nr_sack(m, *offset,
4637 						    nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4638 				}
4639 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4640 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4641 				    (stcb->asoc.stream_queue_cnt == 0)) {
4642 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4643 				}
4644 				if (abort_now) {
4645 					/* ABORT signal from sack processing */
4646 					*offset = length;
4647 					return (NULL);
4648 				}
4649 			}
4650 			break;
4651 
4652 		case SCTP_HEARTBEAT_REQUEST:
4653 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4654 			if ((stcb) && netp && *netp) {
4655 				SCTP_STAT_INCR(sctps_recvheartbeat);
4656 				sctp_send_heartbeat_ack(stcb, m, *offset,
4657 				    chk_length, *netp);
4658 
4659 				/* He's alive so give him credit */
4660 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4661 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4662 					    stcb->asoc.overall_error_count,
4663 					    0,
4664 					    SCTP_FROM_SCTP_INPUT,
4665 					    __LINE__);
4666 				}
4667 				stcb->asoc.overall_error_count = 0;
4668 			}
4669 			break;
4670 		case SCTP_HEARTBEAT_ACK:
4671 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4672 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4673 				/* Its not ours */
4674 				*offset = length;
4675 				if (locked_tcb) {
4676 					SCTP_TCB_UNLOCK(locked_tcb);
4677 				}
4678 				return (NULL);
4679 			}
4680 			/* He's alive so give him credit */
4681 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4682 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4683 				    stcb->asoc.overall_error_count,
4684 				    0,
4685 				    SCTP_FROM_SCTP_INPUT,
4686 				    __LINE__);
4687 			}
4688 			stcb->asoc.overall_error_count = 0;
4689 			SCTP_STAT_INCR(sctps_recvheartbeatack);
4690 			if (netp && *netp)
4691 				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4692 				    stcb, *netp);
4693 			break;
4694 		case SCTP_ABORT_ASSOCIATION:
4695 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4696 			    stcb);
4697 			if ((stcb) && netp && *netp)
4698 				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4699 				    stcb, *netp);
4700 			*offset = length;
4701 			return (NULL);
4702 			break;
4703 		case SCTP_SHUTDOWN:
4704 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4705 			    stcb);
4706 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4707 				*offset = length;
4708 				if (locked_tcb) {
4709 					SCTP_TCB_UNLOCK(locked_tcb);
4710 				}
4711 				return (NULL);
4712 			}
4713 			if (netp && *netp) {
4714 				int abort_flag = 0;
4715 
4716 				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4717 				    stcb, *netp, &abort_flag);
4718 				if (abort_flag) {
4719 					*offset = length;
4720 					return (NULL);
4721 				}
4722 			}
4723 			break;
4724 		case SCTP_SHUTDOWN_ACK:
4725 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4726 			if ((stcb) && (netp) && (*netp))
4727 				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4728 			*offset = length;
4729 			return (NULL);
4730 			break;
4731 
4732 		case SCTP_OPERATION_ERROR:
4733 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4734 			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4735 
4736 				*offset = length;
4737 				return (NULL);
4738 			}
4739 			break;
4740 		case SCTP_COOKIE_ECHO:
4741 			SCTPDBG(SCTP_DEBUG_INPUT3,
4742 			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4743 			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4744 				;
4745 			} else {
4746 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4747 					/* We are not interested anymore */
4748 					*offset = length;
4749 					return (NULL);
4750 				}
4751 			}
4752 			/*
4753 			 * First are we accepting? We do this again here
4754 			 * sincen it is possible that a previous endpoint
4755 			 * WAS listening responded to a INIT-ACK and then
4756 			 * closed. We opened and bound.. and are now no
4757 			 * longer listening.
4758 			 */
4759 
4760 			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4761 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4762 				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4763 					struct mbuf *oper;
4764 					struct sctp_paramhdr *phdr;
4765 
4766 					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4767 					    0, M_DONTWAIT, 1, MT_DATA);
4768 					if (oper) {
4769 						SCTP_BUF_LEN(oper) =
4770 						    sizeof(struct sctp_paramhdr);
4771 						phdr = mtod(oper,
4772 						    struct sctp_paramhdr *);
4773 						phdr->param_type =
4774 						    htons(SCTP_CAUSE_OUT_OF_RESC);
4775 						phdr->param_length =
4776 						    htons(sizeof(struct sctp_paramhdr));
4777 					}
4778 					sctp_abort_association(inp, stcb, m,
4779 					    iphlen, sh, oper, vrf_id, port);
4780 				}
4781 				*offset = length;
4782 				return (NULL);
4783 			} else {
4784 				struct mbuf *ret_buf;
4785 				struct sctp_inpcb *linp;
4786 
4787 				if (stcb) {
4788 					linp = NULL;
4789 				} else {
4790 					linp = inp;
4791 				}
4792 
4793 				if (linp) {
4794 					SCTP_ASOC_CREATE_LOCK(linp);
4795 				}
4796 				if (netp) {
4797 					ret_buf =
4798 					    sctp_handle_cookie_echo(m, iphlen,
4799 					    *offset, sh,
4800 					    (struct sctp_cookie_echo_chunk *)ch,
4801 					    &inp, &stcb, netp,
4802 					    auth_skipped,
4803 					    auth_offset,
4804 					    auth_len,
4805 					    &locked_tcb,
4806 					    vrf_id,
4807 					    port);
4808 				} else {
4809 					ret_buf = NULL;
4810 				}
4811 				if (linp) {
4812 					SCTP_ASOC_CREATE_UNLOCK(linp);
4813 				}
4814 				if (ret_buf == NULL) {
4815 					if (locked_tcb) {
4816 						SCTP_TCB_UNLOCK(locked_tcb);
4817 					}
4818 					SCTPDBG(SCTP_DEBUG_INPUT3,
4819 					    "GAK, null buffer\n");
4820 					auth_skipped = 0;
4821 					*offset = length;
4822 					return (NULL);
4823 				}
4824 				/* if AUTH skipped, see if it verified... */
4825 				if (auth_skipped) {
4826 					got_auth = 1;
4827 					auth_skipped = 0;
4828 				}
4829 				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4830 					/*
4831 					 * Restart the timer if we have
4832 					 * pending data
4833 					 */
4834 					struct sctp_tmit_chunk *chk;
4835 
4836 					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4837 					if (chk) {
4838 						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4839 						    stcb->sctp_ep, stcb,
4840 						    chk->whoTo);
4841 					}
4842 				}
4843 			}
4844 			break;
4845 		case SCTP_COOKIE_ACK:
4846 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4847 			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4848 				if (locked_tcb) {
4849 					SCTP_TCB_UNLOCK(locked_tcb);
4850 				}
4851 				return (NULL);
4852 			}
4853 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4854 				/* We are not interested anymore */
4855 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4856 					;
4857 				} else if (stcb) {
4858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4859 					so = SCTP_INP_SO(inp);
4860 					atomic_add_int(&stcb->asoc.refcnt, 1);
4861 					SCTP_TCB_UNLOCK(stcb);
4862 					SCTP_SOCKET_LOCK(so, 1);
4863 					SCTP_TCB_LOCK(stcb);
4864 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4865 #endif
4866 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4867 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4868 					SCTP_SOCKET_UNLOCK(so, 1);
4869 #endif
4870 					*offset = length;
4871 					return (NULL);
4872 				}
4873 			}
4874 			/* He's alive so give him credit */
4875 			if ((stcb) && netp && *netp) {
4876 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4877 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4878 					    stcb->asoc.overall_error_count,
4879 					    0,
4880 					    SCTP_FROM_SCTP_INPUT,
4881 					    __LINE__);
4882 				}
4883 				stcb->asoc.overall_error_count = 0;
4884 				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4885 			}
4886 			break;
4887 		case SCTP_ECN_ECHO:
4888 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4889 			/* He's alive so give him credit */
4890 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4891 				/* Its not ours */
4892 				if (locked_tcb) {
4893 					SCTP_TCB_UNLOCK(locked_tcb);
4894 				}
4895 				*offset = length;
4896 				return (NULL);
4897 			}
4898 			if (stcb) {
4899 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4900 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4901 					    stcb->asoc.overall_error_count,
4902 					    0,
4903 					    SCTP_FROM_SCTP_INPUT,
4904 					    __LINE__);
4905 				}
4906 				stcb->asoc.overall_error_count = 0;
4907 				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4908 				    stcb);
4909 			}
4910 			break;
4911 		case SCTP_ECN_CWR:
4912 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4913 			/* He's alive so give him credit */
4914 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4915 				/* Its not ours */
4916 				if (locked_tcb) {
4917 					SCTP_TCB_UNLOCK(locked_tcb);
4918 				}
4919 				*offset = length;
4920 				return (NULL);
4921 			}
4922 			if (stcb) {
4923 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4924 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4925 					    stcb->asoc.overall_error_count,
4926 					    0,
4927 					    SCTP_FROM_SCTP_INPUT,
4928 					    __LINE__);
4929 				}
4930 				stcb->asoc.overall_error_count = 0;
4931 				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4932 			}
4933 			break;
4934 		case SCTP_SHUTDOWN_COMPLETE:
4935 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4936 			/* must be first and only chunk */
4937 			if ((num_chunks > 1) ||
4938 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4939 				*offset = length;
4940 				if (locked_tcb) {
4941 					SCTP_TCB_UNLOCK(locked_tcb);
4942 				}
4943 				return (NULL);
4944 			}
4945 			if ((stcb) && netp && *netp) {
4946 				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4947 				    stcb, *netp);
4948 			}
4949 			*offset = length;
4950 			return (NULL);
4951 			break;
4952 		case SCTP_ASCONF:
4953 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4954 			/* He's alive so give him credit */
4955 			if (stcb) {
4956 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4957 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4958 					    stcb->asoc.overall_error_count,
4959 					    0,
4960 					    SCTP_FROM_SCTP_INPUT,
4961 					    __LINE__);
4962 				}
4963 				stcb->asoc.overall_error_count = 0;
4964 				sctp_handle_asconf(m, *offset,
4965 				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
4966 				asconf_cnt++;
4967 			}
4968 			break;
4969 		case SCTP_ASCONF_ACK:
4970 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4971 			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4972 				/* Its not ours */
4973 				if (locked_tcb) {
4974 					SCTP_TCB_UNLOCK(locked_tcb);
4975 				}
4976 				*offset = length;
4977 				return (NULL);
4978 			}
4979 			if ((stcb) && netp && *netp) {
4980 				/* He's alive so give him credit */
4981 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4982 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4983 					    stcb->asoc.overall_error_count,
4984 					    0,
4985 					    SCTP_FROM_SCTP_INPUT,
4986 					    __LINE__);
4987 				}
4988 				stcb->asoc.overall_error_count = 0;
4989 				sctp_handle_asconf_ack(m, *offset,
4990 				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
4991 				if (abort_no_unlock)
4992 					return (NULL);
4993 			}
4994 			break;
4995 		case SCTP_FORWARD_CUM_TSN:
4996 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4997 			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4998 				/* Its not ours */
4999 				if (locked_tcb) {
5000 					SCTP_TCB_UNLOCK(locked_tcb);
5001 				}
5002 				*offset = length;
5003 				return (NULL);
5004 			}
5005 			/* He's alive so give him credit */
5006 			if (stcb) {
5007 				int abort_flag = 0;
5008 
5009 				stcb->asoc.overall_error_count = 0;
5010 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5011 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5012 					    stcb->asoc.overall_error_count,
5013 					    0,
5014 					    SCTP_FROM_SCTP_INPUT,
5015 					    __LINE__);
5016 				}
5017 				*fwd_tsn_seen = 1;
5018 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5019 					/* We are not interested anymore */
5020 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5021 					so = SCTP_INP_SO(inp);
5022 					atomic_add_int(&stcb->asoc.refcnt, 1);
5023 					SCTP_TCB_UNLOCK(stcb);
5024 					SCTP_SOCKET_LOCK(so, 1);
5025 					SCTP_TCB_LOCK(stcb);
5026 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5027 #endif
5028 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5029 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5030 					SCTP_SOCKET_UNLOCK(so, 1);
5031 #endif
5032 					*offset = length;
5033 					return (NULL);
5034 				}
5035 				sctp_handle_forward_tsn(stcb,
5036 				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5037 				if (abort_flag) {
5038 					*offset = length;
5039 					return (NULL);
5040 				} else {
5041 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5042 						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5043 						    stcb->asoc.overall_error_count,
5044 						    0,
5045 						    SCTP_FROM_SCTP_INPUT,
5046 						    __LINE__);
5047 					}
5048 					stcb->asoc.overall_error_count = 0;
5049 				}
5050 
5051 			}
5052 			break;
5053 		case SCTP_STREAM_RESET:
5054 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5055 			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5056 				/* Its not ours */
5057 				if (locked_tcb) {
5058 					SCTP_TCB_UNLOCK(locked_tcb);
5059 				}
5060 				*offset = length;
5061 				return (NULL);
5062 			}
5063 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5064 				/* We are not interested anymore */
5065 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5066 				so = SCTP_INP_SO(inp);
5067 				atomic_add_int(&stcb->asoc.refcnt, 1);
5068 				SCTP_TCB_UNLOCK(stcb);
5069 				SCTP_SOCKET_LOCK(so, 1);
5070 				SCTP_TCB_LOCK(stcb);
5071 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5072 #endif
5073 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5074 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5075 				SCTP_SOCKET_UNLOCK(so, 1);
5076 #endif
5077 				*offset = length;
5078 				return (NULL);
5079 			}
5080 			if (stcb->asoc.peer_supports_strreset == 0) {
5081 				/*
5082 				 * hmm, peer should have announced this, but
5083 				 * we will turn it on since he is sending us
5084 				 * a stream reset.
5085 				 */
5086 				stcb->asoc.peer_supports_strreset = 1;
5087 			}
5088 			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5089 				/* stop processing */
5090 				*offset = length;
5091 				return (NULL);
5092 			}
5093 			break;
5094 		case SCTP_PACKET_DROPPED:
5095 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5096 			/* re-get it all please */
5097 			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5098 				/* Its not ours */
5099 				if (locked_tcb) {
5100 					SCTP_TCB_UNLOCK(locked_tcb);
5101 				}
5102 				*offset = length;
5103 				return (NULL);
5104 			}
5105 			if (ch && (stcb) && netp && (*netp)) {
5106 				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5107 				    stcb, *netp,
5108 				    min(chk_length, (sizeof(chunk_buf) - 4)));
5109 
5110 			}
5111 			break;
5112 
5113 		case SCTP_AUTHENTICATION:
5114 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5115 			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5116 				goto unknown_chunk;
5117 
5118 			if (stcb == NULL) {
5119 				/* save the first AUTH for later processing */
5120 				if (auth_skipped == 0) {
5121 					auth_offset = *offset;
5122 					auth_len = chk_length;
5123 					auth_skipped = 1;
5124 				}
5125 				/* skip this chunk (temporarily) */
5126 				goto next_chunk;
5127 			}
5128 			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5129 			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5130 			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5131 				/* Its not ours */
5132 				if (locked_tcb) {
5133 					SCTP_TCB_UNLOCK(locked_tcb);
5134 				}
5135 				*offset = length;
5136 				return (NULL);
5137 			}
5138 			if (got_auth == 1) {
5139 				/* skip this chunk... it's already auth'd */
5140 				goto next_chunk;
5141 			}
5142 			got_auth = 1;
5143 			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5144 			    m, *offset)) {
5145 				/* auth HMAC failed so dump the packet */
5146 				*offset = length;
5147 				return (stcb);
5148 			} else {
5149 				/* remaining chunks are HMAC checked */
5150 				stcb->asoc.authenticated = 1;
5151 			}
5152 			break;
5153 
5154 		default:
5155 	unknown_chunk:
5156 			/* it's an unknown chunk! */
5157 			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5158 				struct mbuf *mm;
5159 				struct sctp_paramhdr *phd;
5160 
5161 				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5162 				    0, M_DONTWAIT, 1, MT_DATA);
5163 				if (mm) {
5164 					phd = mtod(mm, struct sctp_paramhdr *);
5165 					/*
5166 					 * We cheat and use param type since
5167 					 * we did not bother to define a
5168 					 * error cause struct. They are the
5169 					 * same basic format with different
5170 					 * names.
5171 					 */
5172 					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5173 					phd->param_length = htons(chk_length + sizeof(*phd));
5174 					SCTP_BUF_LEN(mm) = sizeof(*phd);
5175 					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5176 					    M_DONTWAIT);
5177 					if (SCTP_BUF_NEXT(mm)) {
5178 #ifdef SCTP_MBUF_LOGGING
5179 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5180 							struct mbuf *mat;
5181 
5182 							mat = SCTP_BUF_NEXT(mm);
5183 							while (mat) {
5184 								if (SCTP_BUF_IS_EXTENDED(mat)) {
5185 									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5186 								}
5187 								mat = SCTP_BUF_NEXT(mat);
5188 							}
5189 						}
5190 #endif
5191 						sctp_queue_op_err(stcb, mm);
5192 					} else {
5193 						sctp_m_freem(mm);
5194 					}
5195 				}
5196 			}
5197 			if ((ch->chunk_type & 0x80) == 0) {
5198 				/* discard this packet */
5199 				*offset = length;
5200 				return (stcb);
5201 			}	/* else skip this bad chunk and continue... */
5202 			break;
5203 		}		/* switch (ch->chunk_type) */
5204 
5205 
5206 next_chunk:
5207 		/* get the next chunk */
5208 		*offset += SCTP_SIZE32(chk_length);
5209 		if (*offset >= length) {
5210 			/* no more data left in the mbuf chain */
5211 			break;
5212 		}
5213 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5214 		    sizeof(struct sctp_chunkhdr), chunk_buf);
5215 		if (ch == NULL) {
5216 			if (locked_tcb) {
5217 				SCTP_TCB_UNLOCK(locked_tcb);
5218 			}
5219 			*offset = length;
5220 			return (NULL);
5221 		}
5222 	}			/* while */
5223 
5224 	if (asconf_cnt > 0 && stcb != NULL) {
5225 		sctp_send_asconf_ack(stcb);
5226 	}
5227 	return (stcb);
5228 }
5229 
5230 
5231 /*
5232  * Process the ECN bits we have something set so we must look to see if it is
5233  * ECN(0) or ECN(1) or CE
5234  */
5235 static void
5236 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
5237     uint8_t ecn_bits)
5238 {
5239 	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5240 		;
5241 	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
5242 		/*
5243 		 * we only add to the nonce sum for ECT1, ECT0 does not
5244 		 * change the NS bit (that we have yet to find a way to send
5245 		 * it yet).
5246 		 */
5247 
5248 		/* ECN Nonce stuff */
5249 		stcb->asoc.receiver_nonce_sum++;
5250 		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
5251 
5252 		/*
5253 		 * Drag up the last_echo point if cumack is larger since we
5254 		 * don't want the point falling way behind by more than
5255 		 * 2^^31 and then having it be incorrect.
5256 		 */
5257 		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5258 		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5259 			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5260 		}
5261 	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
5262 		/*
5263 		 * Drag up the last_echo point if cumack is larger since we
5264 		 * don't want the point falling way behind by more than
5265 		 * 2^^31 and then having it be incorrect.
5266 		 */
5267 		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5268 		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5269 			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5270 		}
5271 	}
5272 }
5273 
5274 static void
5275 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
5276     uint32_t high_tsn, uint8_t ecn_bits)
5277 {
5278 	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5279 		/*
5280 		 * we possibly must notify the sender that a congestion
5281 		 * window reduction is in order. We do this by adding a ECNE
5282 		 * chunk to the output chunk queue. The incoming CWR will
5283 		 * remove this chunk.
5284 		 */
5285 		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
5286 		    MAX_TSN)) {
5287 			/* Yep, we need to add a ECNE */
5288 			sctp_send_ecn_echo(stcb, net, high_tsn);
5289 			stcb->asoc.last_echo_tsn = high_tsn;
5290 		}
5291 	}
5292 }
5293 
5294 #ifdef INVARIANTS
5295 static void
5296 sctp_validate_no_locks(struct sctp_inpcb *inp)
5297 {
5298 	struct sctp_tcb *stcb;
5299 
5300 	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
5301 		if (mtx_owned(&stcb->tcb_mtx)) {
5302 			panic("Own lock on stcb at return from input");
5303 		}
5304 	}
5305 }
5306 
5307 #endif
5308 
5309 /*
5310  * common input chunk processing (v4 and v6)
5311  */
5312 void
5313 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5314     int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5315     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5316     uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5317 {
5318 	/*
5319 	 * Control chunk processing
5320 	 */
5321 	uint32_t high_tsn;
5322 	int fwd_tsn_seen = 0, data_processed = 0;
5323 	struct mbuf *m = *mm;
5324 	int abort_flag = 0;
5325 	int un_sent;
5326 
5327 	SCTP_STAT_INCR(sctps_recvdatagrams);
5328 #ifdef SCTP_AUDITING_ENABLED
5329 	sctp_audit_log(0xE0, 1);
5330 	sctp_auditing(0, inp, stcb, net);
5331 #endif
5332 
5333 	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5334 	    m, iphlen, offset, length, stcb);
5335 	if (stcb) {
5336 		/* always clear this before beginning a packet */
5337 		stcb->asoc.authenticated = 0;
5338 		stcb->asoc.seen_a_sack_this_pkt = 0;
5339 		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5340 		    stcb, stcb->asoc.state);
5341 
5342 		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5343 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5344 			/*-
5345 			 * If we hit here, we had a ref count
5346 			 * up when the assoc was aborted and the
5347 			 * timer is clearing out the assoc, we should
5348 			 * NOT respond to any packet.. its OOTB.
5349 			 */
5350 			SCTP_TCB_UNLOCK(stcb);
5351 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5352 			    vrf_id, port);
5353 			goto out_now;
5354 		}
5355 	}
5356 	if (IS_SCTP_CONTROL(ch)) {
5357 		/* process the control portion of the SCTP packet */
5358 		/* sa_ignore NO_NULL_CHK */
5359 		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5360 		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5361 		if (stcb) {
5362 			/*
5363 			 * This covers us if the cookie-echo was there and
5364 			 * it changes our INP.
5365 			 */
5366 			inp = stcb->sctp_ep;
5367 			if ((net) && (port)) {
5368 				if (net->port == 0) {
5369 					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5370 				}
5371 				net->port = port;
5372 			}
5373 		}
5374 	} else {
5375 		/*
5376 		 * no control chunks, so pre-process DATA chunks (these
5377 		 * checks are taken care of by control processing)
5378 		 */
5379 
5380 		/*
5381 		 * if DATA only packet, and auth is required, then punt...
5382 		 * can't have authenticated without any AUTH (control)
5383 		 * chunks
5384 		 */
5385 		if ((stcb != NULL) &&
5386 		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5387 		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5388 			/* "silently" ignore */
5389 			SCTP_STAT_INCR(sctps_recvauthmissing);
5390 			SCTP_TCB_UNLOCK(stcb);
5391 			goto out_now;
5392 		}
5393 		if (stcb == NULL) {
5394 			/* out of the blue DATA chunk */
5395 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5396 			    vrf_id, port);
5397 			goto out_now;
5398 		}
5399 		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5400 			/* v_tag mismatch! */
5401 			SCTP_STAT_INCR(sctps_badvtag);
5402 			SCTP_TCB_UNLOCK(stcb);
5403 			goto out_now;
5404 		}
5405 	}
5406 
5407 	if (stcb == NULL) {
5408 		/*
5409 		 * no valid TCB for this packet, or we found it's a bad
5410 		 * packet while processing control, or we're done with this
5411 		 * packet (done or skip rest of data), so we drop it...
5412 		 */
5413 		goto out_now;
5414 	}
5415 	/*
5416 	 * DATA chunk processing
5417 	 */
5418 	/* plow through the data chunks while length > offset */
5419 
5420 	/*
5421 	 * Rest should be DATA only.  Check authentication state if AUTH for
5422 	 * DATA is required.
5423 	 */
5424 	if ((length > offset) &&
5425 	    (stcb != NULL) &&
5426 	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5427 	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5428 	    !stcb->asoc.authenticated) {
5429 		/* "silently" ignore */
5430 		SCTP_STAT_INCR(sctps_recvauthmissing);
5431 		SCTPDBG(SCTP_DEBUG_AUTH1,
5432 		    "Data chunk requires AUTH, skipped\n");
5433 		goto trigger_send;
5434 	}
5435 	if (length > offset) {
5436 		int retval;
5437 
5438 		/*
5439 		 * First check to make sure our state is correct. We would
5440 		 * not get here unless we really did have a tag, so we don't
5441 		 * abort if this happens, just dump the chunk silently.
5442 		 */
5443 		switch (SCTP_GET_STATE(&stcb->asoc)) {
5444 		case SCTP_STATE_COOKIE_ECHOED:
5445 			/*
5446 			 * we consider data with valid tags in this state
5447 			 * shows us the cookie-ack was lost. Imply it was
5448 			 * there.
5449 			 */
5450 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5451 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5452 				    stcb->asoc.overall_error_count,
5453 				    0,
5454 				    SCTP_FROM_SCTP_INPUT,
5455 				    __LINE__);
5456 			}
5457 			stcb->asoc.overall_error_count = 0;
5458 			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5459 			break;
5460 		case SCTP_STATE_COOKIE_WAIT:
5461 			/*
5462 			 * We consider OOTB any data sent during asoc setup.
5463 			 */
5464 			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5465 			    vrf_id, port);
5466 			SCTP_TCB_UNLOCK(stcb);
5467 			goto out_now;
5468 			/* sa_ignore NOTREACHED */
5469 			break;
5470 		case SCTP_STATE_EMPTY:	/* should not happen */
5471 		case SCTP_STATE_INUSE:	/* should not happen */
5472 		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5473 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5474 		default:
5475 			SCTP_TCB_UNLOCK(stcb);
5476 			goto out_now;
5477 			/* sa_ignore NOTREACHED */
5478 			break;
5479 		case SCTP_STATE_OPEN:
5480 		case SCTP_STATE_SHUTDOWN_SENT:
5481 			break;
5482 		}
5483 		/* take care of ECN, part 1. */
5484 		if (stcb->asoc.ecn_allowed &&
5485 		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5486 			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5487 		}
5488 		/* plow through the data chunks while length > offset */
5489 		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5490 		    inp, stcb, net, &high_tsn);
5491 		if (retval == 2) {
5492 			/*
5493 			 * The association aborted, NO UNLOCK needed since
5494 			 * the association is destroyed.
5495 			 */
5496 			goto out_now;
5497 		}
5498 		data_processed = 1;
5499 		if (retval == 0) {
5500 			/* take care of ecn part 2. */
5501 			if (stcb->asoc.ecn_allowed &&
5502 			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5503 				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5504 				    ecn_bits);
5505 			}
5506 		}
5507 		/*
5508 		 * Anything important needs to have been m_copy'ed in
5509 		 * process_data
5510 		 */
5511 	}
5512 	if ((data_processed == 0) && (fwd_tsn_seen)) {
5513 		int was_a_gap = 0;
5514 
5515 		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
5516 		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
5517 			/* there was a gap before this data was processed */
5518 			was_a_gap = 1;
5519 		}
5520 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
5521 		if (abort_flag) {
5522 			/* Again, we aborted so NO UNLOCK needed */
5523 			goto out_now;
5524 		}
5525 	}
5526 	/* trigger send of any chunks in queue... */
5527 trigger_send:
5528 #ifdef SCTP_AUDITING_ENABLED
5529 	sctp_audit_log(0xE0, 2);
5530 	sctp_auditing(1, inp, stcb, net);
5531 #endif
5532 	SCTPDBG(SCTP_DEBUG_INPUT1,
5533 	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5534 	    stcb->asoc.peers_rwnd,
5535 	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5536 	    stcb->asoc.total_flight);
5537 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5538 
5539 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5540 	    ((un_sent) &&
5541 	    (stcb->asoc.peers_rwnd > 0 ||
5542 	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5543 		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5544 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5545 		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5546 	}
5547 #ifdef SCTP_AUDITING_ENABLED
5548 	sctp_audit_log(0xE0, 3);
5549 	sctp_auditing(2, inp, stcb, net);
5550 #endif
5551 	SCTP_TCB_UNLOCK(stcb);
5552 out_now:
5553 #ifdef INVARIANTS
5554 	sctp_validate_no_locks(inp);
5555 #endif
5556 	return;
5557 }
5558 
5559 #if 0
5560 static void
5561 sctp_print_mbuf_chain(struct mbuf *m)
5562 {
5563 	for (; m; m = SCTP_BUF_NEXT(m)) {
5564 		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5565 		if (SCTP_BUF_IS_EXTENDED(m))
5566 			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5567 	}
5568 }
5569 
5570 #endif
5571 
5572 void
5573 sctp_input_with_port(i_pak, off, port)
5574 	struct mbuf *i_pak;
5575 	int off;
5576 	uint16_t port;
5577 {
5578 #ifdef SCTP_MBUF_LOGGING
5579 	struct mbuf *mat;
5580 
5581 #endif
5582 	struct mbuf *m;
5583 	int iphlen;
5584 	uint32_t vrf_id = 0;
5585 	uint8_t ecn_bits;
5586 	struct ip *ip;
5587 	struct sctphdr *sh;
5588 	struct sctp_inpcb *inp = NULL;
5589 
5590 	uint32_t check, calc_check;
5591 	struct sctp_nets *net;
5592 	struct sctp_tcb *stcb = NULL;
5593 	struct sctp_chunkhdr *ch;
5594 	int refcount_up = 0;
5595 	int length, mlen, offset;
5596 
5597 
5598 	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5599 		SCTP_RELEASE_PKT(i_pak);
5600 		return;
5601 	}
5602 	mlen = SCTP_HEADER_LEN(i_pak);
5603 	iphlen = off;
5604 	m = SCTP_HEADER_TO_CHAIN(i_pak);
5605 
5606 	net = NULL;
5607 	SCTP_STAT_INCR(sctps_recvpackets);
5608 	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5609 
5610 
5611 #ifdef SCTP_MBUF_LOGGING
5612 	/* Log in any input mbufs */
5613 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5614 		mat = m;
5615 		while (mat) {
5616 			if (SCTP_BUF_IS_EXTENDED(mat)) {
5617 				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5618 			}
5619 			mat = SCTP_BUF_NEXT(mat);
5620 		}
5621 	}
5622 #endif
5623 #ifdef  SCTP_PACKET_LOGGING
5624 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5625 		sctp_packet_log(m, mlen);
5626 #endif
5627 	/*
5628 	 * Must take out the iphlen, since mlen expects this (only effect lb
5629 	 * case)
5630 	 */
5631 	mlen -= iphlen;
5632 
5633 	/*
5634 	 * Get IP, SCTP, and first chunk header together in first mbuf.
5635 	 */
5636 	ip = mtod(m, struct ip *);
5637 	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5638 	if (SCTP_BUF_LEN(m) < offset) {
5639 		if ((m = m_pullup(m, offset)) == 0) {
5640 			SCTP_STAT_INCR(sctps_hdrops);
5641 			return;
5642 		}
5643 		ip = mtod(m, struct ip *);
5644 	}
5645 	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5646 	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5647 	SCTPDBG(SCTP_DEBUG_INPUT1,
5648 	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5649 
5650 	/* SCTP does not allow broadcasts or multicasts */
5651 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5652 		goto bad;
5653 	}
5654 	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5655 		/*
5656 		 * We only look at broadcast if its a front state, All
5657 		 * others we will not have a tcb for anyway.
5658 		 */
5659 		goto bad;
5660 	}
5661 	/* validate SCTP checksum */
5662 	check = sh->checksum;	/* save incoming checksum */
5663 	if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
5664 	    ((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
5665 	    (SCTP_IS_IT_LOOPBACK(m)))
5666 	    ) {
5667 		goto sctp_skip_csum_4;
5668 	}
5669 	sh->checksum = 0;	/* prepare for calc */
5670 	calc_check = sctp_calculate_sum(m, &mlen, iphlen);
5671 	if (calc_check != check) {
5672 		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5673 		    calc_check, check, m, mlen, iphlen);
5674 
5675 		stcb = sctp_findassociation_addr(m, iphlen,
5676 		    offset - sizeof(*ch),
5677 		    sh, ch, &inp, &net,
5678 		    vrf_id);
5679 		if ((net) && (port)) {
5680 			if (net->port == 0) {
5681 				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5682 			}
5683 			net->port = port;
5684 		}
5685 		if ((inp) && (stcb)) {
5686 			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5687 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5688 		} else if ((inp != NULL) && (stcb == NULL)) {
5689 			refcount_up = 1;
5690 		}
5691 		SCTP_STAT_INCR(sctps_badsum);
5692 		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5693 		goto bad;
5694 	}
5695 	sh->checksum = calc_check;
5696 sctp_skip_csum_4:
5697 	/* destination port of 0 is illegal, based on RFC2960. */
5698 	if (sh->dest_port == 0) {
5699 		SCTP_STAT_INCR(sctps_hdrops);
5700 		goto bad;
5701 	}
5702 	/* validate mbuf chain length with IP payload length */
5703 	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5704 		SCTP_STAT_INCR(sctps_hdrops);
5705 		goto bad;
5706 	}
5707 	/*
5708 	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5709 	 * IP/SCTP/first chunk header...
5710 	 */
5711 	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5712 	    sh, ch, &inp, &net, vrf_id);
5713 	if ((net) && (port)) {
5714 		if (net->port == 0) {
5715 			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5716 		}
5717 		net->port = port;
5718 	}
5719 	/* inp's ref-count increased && stcb locked */
5720 	if (inp == NULL) {
5721 		struct sctp_init_chunk *init_chk, chunk_buf;
5722 
5723 		SCTP_STAT_INCR(sctps_noport);
5724 #ifdef ICMP_BANDLIM
5725 		/*
5726 		 * we use the bandwidth limiting to protect against sending
5727 		 * too many ABORTS all at once. In this case these count the
5728 		 * same as an ICMP message.
5729 		 */
5730 		if (badport_bandlim(0) < 0)
5731 			goto bad;
5732 #endif				/* ICMP_BANDLIM */
5733 		SCTPDBG(SCTP_DEBUG_INPUT1,
5734 		    "Sending a ABORT from packet entry!\n");
5735 		if (ch->chunk_type == SCTP_INITIATION) {
5736 			/*
5737 			 * we do a trick here to get the INIT tag, dig in
5738 			 * and get the tag from the INIT and put it in the
5739 			 * common header.
5740 			 */
5741 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5742 			    iphlen + sizeof(*sh), sizeof(*init_chk),
5743 			    (uint8_t *) & chunk_buf);
5744 			if (init_chk != NULL)
5745 				sh->v_tag = init_chk->init.initiate_tag;
5746 		}
5747 		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5748 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5749 			goto bad;
5750 		}
5751 		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5752 			goto bad;
5753 		}
5754 		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5755 			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5756 		goto bad;
5757 	} else if (stcb == NULL) {
5758 		refcount_up = 1;
5759 	}
5760 #ifdef IPSEC
5761 	/*
5762 	 * I very much doubt any of the IPSEC stuff will work but I have no
5763 	 * idea, so I will leave it in place.
5764 	 */
5765 	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5766 		MODULE_GLOBAL(MOD_IPSEC, ipsec4stat).in_polvio++;
5767 		SCTP_STAT_INCR(sctps_hdrops);
5768 		goto bad;
5769 	}
5770 #endif				/* IPSEC */
5771 
5772 	/*
5773 	 * common chunk processing
5774 	 */
5775 	length = ip->ip_len + iphlen;
5776 	offset -= sizeof(struct sctp_chunkhdr);
5777 
5778 	ecn_bits = ip->ip_tos;
5779 
5780 	/* sa_ignore NO_NULL_CHK */
5781 	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5782 	    inp, stcb, net, ecn_bits, vrf_id, port);
5783 	/* inp's ref-count reduced && stcb unlocked */
5784 	if (m) {
5785 		sctp_m_freem(m);
5786 	}
5787 	if ((inp) && (refcount_up)) {
5788 		/* reduce ref-count */
5789 		SCTP_INP_DECR_REF(inp);
5790 	}
5791 	return;
5792 bad:
5793 	if (stcb) {
5794 		SCTP_TCB_UNLOCK(stcb);
5795 	}
5796 	if ((inp) && (refcount_up)) {
5797 		/* reduce ref-count */
5798 		SCTP_INP_DECR_REF(inp);
5799 	}
5800 	if (m) {
5801 		sctp_m_freem(m);
5802 	}
5803 	return;
5804 }
5805 void
5806 sctp_input(i_pak, off)
5807 	struct mbuf *i_pak;
5808 	int off;
5809 {
5810 	sctp_input_with_port(i_pak, off, 0);
5811 }
5812