xref: /titanic_51/usr/src/uts/common/inet/sctp/sctp_common.c (revision d3a612ca42c17c3baa6c96ded00f98db349cc881)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/stream.h>
32 #include <sys/strsubr.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/kmem.h>
36 #include <sys/socket.h>
37 #include <sys/random.h>
38 #include <sys/tsol/tndb.h>
39 #include <sys/tsol/tnet.h>
40 
41 #include <netinet/in.h>
42 #include <netinet/ip6.h>
43 #include <netinet/sctp.h>
44 
45 #include <inet/common.h>
46 #include <inet/ip.h>
47 #include <inet/ip6.h>
48 #include <inet/ip_ire.h>
49 #include <inet/mib2.h>
50 #include <inet/nd.h>
51 #include <inet/optcom.h>
52 #include <inet/sctp_ip.h>
53 #include <inet/ipclassifier.h>
54 
55 #include "sctp_impl.h"
56 #include "sctp_addr.h"
57 #include "sctp_asconf.h"
58 
59 static struct kmem_cache *sctp_kmem_faddr_cache;
60 static void sctp_init_faddr(sctp_t *, sctp_faddr_t *, in6_addr_t *, mblk_t *);
61 
62 /* Set the source address.  Refer to comments in sctp_get_ire(). */
63 void
64 sctp_set_saddr(sctp_t *sctp, sctp_faddr_t *fp)
65 {
66 	boolean_t v6 = !fp->isv4;
67 	boolean_t addr_set;
68 
69 	fp->saddr = sctp_get_valid_addr(sctp, v6, &addr_set);
70 	/*
71 	 * If there is no source address avaialble, mark this peer address
72 	 * as unreachable for now.  When the heartbeat timer fires, it will
73 	 * call sctp_get_ire() to re-check if there is any source address
74 	 * available.
75 	 */
76 	if (!addr_set)
77 		fp->state = SCTP_FADDRS_UNREACH;
78 }
79 
80 /*
81  * Call this function to update the cached IRE of a peer addr fp.
82  */
83 void
84 sctp_get_ire(sctp_t *sctp, sctp_faddr_t *fp)
85 {
86 	ire_t		*ire;
87 	ipaddr_t	addr4;
88 	in6_addr_t	laddr;
89 	sctp_saddr_ipif_t *sp;
90 	int		hdrlen;
91 	ts_label_t	*tsl;
92 	sctp_stack_t	*sctps = sctp->sctp_sctps;
93 	ip_stack_t	*ipst = sctps->sctps_netstack->netstack_ip;
94 
95 	/* Remove the previous cache IRE */
96 	if ((ire = fp->ire) != NULL) {
97 		IRE_REFRELE_NOTR(ire);
98 		fp->ire = NULL;
99 	}
100 
101 	/*
102 	 * If this addr is not reachable, mark it as unconfirmed for now, the
103 	 * state will be changed back to unreachable later in this function
104 	 * if it is still the case.
105 	 */
106 	if (fp->state == SCTP_FADDRS_UNREACH) {
107 		fp->state = SCTP_FADDRS_UNCONFIRMED;
108 	}
109 
110 	tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
111 
112 	if (fp->isv4) {
113 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
114 		ire = ire_cache_lookup(addr4, sctp->sctp_zoneid, tsl, ipst);
115 		if (ire != NULL)
116 			IN6_IPADDR_TO_V4MAPPED(ire->ire_src_addr, &laddr);
117 	} else {
118 		ire = ire_cache_lookup_v6(&fp->faddr, sctp->sctp_zoneid, tsl,
119 		    ipst);
120 		if (ire != NULL)
121 			laddr = ire->ire_src_addr_v6;
122 	}
123 
124 	if (ire == NULL) {
125 		dprint(3, ("ire2faddr: no ire for %x:%x:%x:%x\n",
126 		    SCTP_PRINTADDR(fp->faddr)));
127 		/*
128 		 * It is tempting to just leave the src addr
129 		 * unspecified and let IP figure it out, but we
130 		 * *cannot* do this, since IP may choose a src addr
131 		 * that is not part of this association... unless
132 		 * this sctp has bound to all addrs.  So if the ire
133 		 * lookup fails, try to find one in our src addr
134 		 * list, unless the sctp has bound to all addrs, in
135 		 * which case we change the src addr to unspec.
136 		 *
137 		 * Note that if this is a v6 endpoint but it does
138 		 * not have any v4 address at this point (e.g. may
139 		 * have been  deleted), sctp_get_valid_addr() will
140 		 * return mapped INADDR_ANY.  In this case, this
141 		 * address should be marked not reachable so that
142 		 * it won't be used to send data.
143 		 */
144 		sctp_set_saddr(sctp, fp);
145 		if (fp->state == SCTP_FADDRS_UNREACH)
146 			return;
147 		goto check_current;
148 	}
149 
150 	/* Make sure the laddr is part of this association */
151 	if ((sp = sctp_saddr_lookup(sctp, &ire->ire_ipif->ipif_v6lcl_addr,
152 	    0)) != NULL && !sp->saddr_ipif_dontsrc) {
153 		if (sp->saddr_ipif_unconfirmed == 1)
154 			sp->saddr_ipif_unconfirmed = 0;
155 		fp->saddr = laddr;
156 	} else {
157 		dprint(2, ("ire2faddr: src addr is not part of assc\n"));
158 
159 		/*
160 		 * Set the src to the first saddr and hope for the best.
161 		 * Note that we will still do the ire caching below.
162 		 * Otherwise, whenever we send a packet, we need to do
163 		 * the ire lookup again and still may not get the correct
164 		 * source address.  Note that this case should very seldomly
165 		 * happen.  One scenario this can happen is an app
166 		 * explicitly bind() to an address.  But that address is
167 		 * not the preferred source address to send to the peer.
168 		 */
169 		sctp_set_saddr(sctp, fp);
170 		if (fp->state == SCTP_FADDRS_UNREACH) {
171 			IRE_REFRELE(ire);
172 			return;
173 		}
174 	}
175 
176 	/*
177 	 * Note that ire_cache_lookup_*() returns an ire with the tracing
178 	 * bits enabled.  This requires the thread holding the ire also
179 	 * do the IRE_REFRELE().  Thus we need to do IRE_REFHOLD_NOTR()
180 	 * and then IRE_REFRELE() the ire here to make the tracing bits
181 	 * work.
182 	 */
183 	IRE_REFHOLD_NOTR(ire);
184 	IRE_REFRELE(ire);
185 
186 	/* Cache the IRE */
187 	fp->ire = ire;
188 	if (fp->ire->ire_type == IRE_LOOPBACK && !sctp->sctp_loopback)
189 		sctp->sctp_loopback = 1;
190 
191 	/*
192 	 * Pull out RTO information for this faddr and use it if we don't
193 	 * have any yet.
194 	 */
195 	if (fp->srtt == -1 && ire->ire_uinfo.iulp_rtt != 0) {
196 		/* The cached value is in ms. */
197 		fp->srtt = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt);
198 		fp->rttvar = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt_sd);
199 		fp->rto = 3 * fp->srtt;
200 
201 		/* Bound the RTO by configured min and max values */
202 		if (fp->rto < sctp->sctp_rto_min) {
203 			fp->rto = sctp->sctp_rto_min;
204 		}
205 		if (fp->rto > sctp->sctp_rto_max) {
206 			fp->rto = sctp->sctp_rto_max;
207 		}
208 	}
209 
210 	/*
211 	 * Record the MTU for this faddr. If the MTU for this faddr has
212 	 * changed, check if the assc MTU will also change.
213 	 */
214 	if (fp->isv4) {
215 		hdrlen = sctp->sctp_hdr_len;
216 	} else {
217 		hdrlen = sctp->sctp_hdr6_len;
218 	}
219 	if ((fp->sfa_pmss + hdrlen) != ire->ire_max_frag) {
220 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
221 		fp->sfa_pmss = (ire->ire_max_frag - hdrlen) & ~(SCTP_ALIGN - 1);
222 		if (fp->cwnd < (fp->sfa_pmss * 2)) {
223 			SET_CWND(fp, fp->sfa_pmss,
224 			    sctps->sctps_slow_start_initial);
225 		}
226 	}
227 
228 check_current:
229 	if (fp == sctp->sctp_current)
230 		sctp_set_faddr_current(sctp, fp);
231 }
232 
233 void
234 sctp_update_ire(sctp_t *sctp)
235 {
236 	ire_t		*ire;
237 	sctp_faddr_t	*fp;
238 	sctp_stack_t	*sctps = sctp->sctp_sctps;
239 
240 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
241 		if ((ire = fp->ire) == NULL)
242 			continue;
243 		mutex_enter(&ire->ire_lock);
244 
245 		/*
246 		 * If the cached IRE is going away, there is no point to
247 		 * update it.
248 		 */
249 		if (ire->ire_marks & IRE_MARK_CONDEMNED) {
250 			mutex_exit(&ire->ire_lock);
251 			IRE_REFRELE_NOTR(ire);
252 			fp->ire = NULL;
253 			continue;
254 		}
255 
256 		/*
257 		 * Only record the PMTU for this faddr if we actually have
258 		 * done discovery. This prevents initialized default from
259 		 * clobbering any real info that IP may have.
260 		 */
261 		if (fp->pmtu_discovered) {
262 			if (fp->isv4) {
263 				ire->ire_max_frag = fp->sfa_pmss +
264 				    sctp->sctp_hdr_len;
265 			} else {
266 				ire->ire_max_frag = fp->sfa_pmss +
267 				    sctp->sctp_hdr6_len;
268 			}
269 		}
270 
271 		if (sctps->sctps_rtt_updates != 0 &&
272 		    fp->rtt_updates >= sctps->sctps_rtt_updates) {
273 			/*
274 			 * If there is no old cached values, initialize them
275 			 * conservatively.  Set them to be (1.5 * new value).
276 			 * This code copied from ip_ire_advise().  The cached
277 			 * value is in ms.
278 			 */
279 			if (ire->ire_uinfo.iulp_rtt != 0) {
280 				ire->ire_uinfo.iulp_rtt =
281 				    (ire->ire_uinfo.iulp_rtt +
282 				    TICK_TO_MSEC(fp->srtt)) >> 1;
283 			} else {
284 				ire->ire_uinfo.iulp_rtt =
285 				    TICK_TO_MSEC(fp->srtt + (fp->srtt >> 1));
286 			}
287 			if (ire->ire_uinfo.iulp_rtt_sd != 0) {
288 				ire->ire_uinfo.iulp_rtt_sd =
289 				    (ire->ire_uinfo.iulp_rtt_sd +
290 				    TICK_TO_MSEC(fp->rttvar)) >> 1;
291 			} else {
292 				ire->ire_uinfo.iulp_rtt_sd =
293 				    TICK_TO_MSEC(fp->rttvar +
294 				    (fp->rttvar >> 1));
295 			}
296 			fp->rtt_updates = 0;
297 		}
298 		mutex_exit(&ire->ire_lock);
299 	}
300 }
301 
302 /*
303  * The sender must set the total length in the IP header.
304  * If sendto == NULL, the current will be used.
305  */
306 mblk_t *
307 sctp_make_mp(sctp_t *sctp, sctp_faddr_t *sendto, int trailer)
308 {
309 	mblk_t *mp;
310 	size_t ipsctplen;
311 	int isv4;
312 	sctp_faddr_t *fp;
313 	sctp_stack_t *sctps = sctp->sctp_sctps;
314 	boolean_t src_changed = B_FALSE;
315 
316 	ASSERT(sctp->sctp_current != NULL || sendto != NULL);
317 	if (sendto == NULL) {
318 		fp = sctp->sctp_current;
319 	} else {
320 		fp = sendto;
321 	}
322 	isv4 = fp->isv4;
323 
324 	/* Try to look for another IRE again. */
325 	if (fp->ire == NULL) {
326 		sctp_get_ire(sctp, fp);
327 		/*
328 		 * Although we still may not get an IRE, the source address
329 		 * may be changed in sctp_get_ire().  Set src_changed to
330 		 * true so that the source address is copied again.
331 		 */
332 		src_changed = B_TRUE;
333 	}
334 
335 	/* There is no suitable source address to use, return. */
336 	if (fp->state == SCTP_FADDRS_UNREACH)
337 		return (NULL);
338 	ASSERT(!IN6_IS_ADDR_V4MAPPED_ANY(&fp->saddr));
339 
340 	if (isv4) {
341 		ipsctplen = sctp->sctp_hdr_len;
342 	} else {
343 		ipsctplen = sctp->sctp_hdr6_len;
344 	}
345 
346 	mp = allocb_cred(ipsctplen + sctps->sctps_wroff_xtra + trailer,
347 	    CONN_CRED(sctp->sctp_connp));
348 	if (mp == NULL) {
349 		ip1dbg(("sctp_make_mp: error making mp..\n"));
350 		return (NULL);
351 	}
352 	mp->b_rptr += sctps->sctps_wroff_xtra;
353 	mp->b_wptr = mp->b_rptr + ipsctplen;
354 
355 	ASSERT(OK_32PTR(mp->b_wptr));
356 
357 	if (isv4) {
358 		ipha_t *iph = (ipha_t *)mp->b_rptr;
359 
360 		bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen);
361 		if (fp != sctp->sctp_current || src_changed) {
362 			/* Fix the source and destination addresses. */
363 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst);
364 			IN6_V4MAPPED_TO_IPADDR(&fp->saddr, iph->ipha_src);
365 		}
366 		/* set or clear the don't fragment bit */
367 		if (fp->df) {
368 			iph->ipha_fragment_offset_and_flags = htons(IPH_DF);
369 		} else {
370 			iph->ipha_fragment_offset_and_flags = 0;
371 		}
372 	} else {
373 		bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen);
374 		if (fp != sctp->sctp_current || src_changed) {
375 			/* Fix the source and destination addresses. */
376 			((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr;
377 			((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr;
378 		}
379 	}
380 	ASSERT(sctp->sctp_connp != NULL);
381 
382 	/*
383 	 * IP will not free this IRE if it is condemned.  SCTP needs to
384 	 * free it.
385 	 */
386 	if ((fp->ire != NULL) && (fp->ire->ire_marks & IRE_MARK_CONDEMNED)) {
387 		IRE_REFRELE_NOTR(fp->ire);
388 		fp->ire = NULL;
389 	}
390 	/* Stash the conn and ire ptr info. for IP */
391 	SCTP_STASH_IPINFO(mp, fp->ire);
392 
393 	return (mp);
394 }
395 
396 /*
397  * Notify upper layers about preferred write offset, write size.
398  */
399 void
400 sctp_set_ulp_prop(sctp_t *sctp)
401 {
402 	int hdrlen;
403 	sctp_stack_t *sctps = sctp->sctp_sctps;
404 
405 	if (sctp->sctp_current->isv4) {
406 		hdrlen = sctp->sctp_hdr_len;
407 	} else {
408 		hdrlen = sctp->sctp_hdr6_len;
409 	}
410 	ASSERT(sctp->sctp_ulpd);
411 
412 	ASSERT(sctp->sctp_current->sfa_pmss == sctp->sctp_mss);
413 	sctp->sctp_ulp_prop(sctp->sctp_ulpd,
414 	    sctps->sctps_wroff_xtra + hdrlen + sizeof (sctp_data_hdr_t),
415 	    sctp->sctp_mss - sizeof (sctp_data_hdr_t));
416 }
417 
418 void
419 sctp_set_iplen(sctp_t *sctp, mblk_t *mp)
420 {
421 	uint16_t	sum = 0;
422 	ipha_t		*iph;
423 	ip6_t		*ip6h;
424 	mblk_t		*pmp = mp;
425 	boolean_t	isv4;
426 
427 	isv4 = (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION);
428 	for (; pmp; pmp = pmp->b_cont)
429 		sum += pmp->b_wptr - pmp->b_rptr;
430 
431 	if (isv4) {
432 		iph = (ipha_t *)mp->b_rptr;
433 		iph->ipha_length = htons(sum);
434 	} else {
435 		ip6h = (ip6_t *)mp->b_rptr;
436 		/*
437 		 * If an ip6i_t is present, the real IPv6 header
438 		 * immediately follows.
439 		 */
440 		if (ip6h->ip6_nxt == IPPROTO_RAW)
441 			ip6h = (ip6_t *)&ip6h[1];
442 		ip6h->ip6_plen = htons(sum - ((char *)&sctp->sctp_ip6h[1] -
443 		    sctp->sctp_iphc6));
444 	}
445 }
446 
447 int
448 sctp_compare_faddrsets(sctp_faddr_t *a1, sctp_faddr_t *a2)
449 {
450 	int na1 = 0;
451 	int overlap = 0;
452 	int equal = 1;
453 	int onematch;
454 	sctp_faddr_t *fp1, *fp2;
455 
456 	for (fp1 = a1; fp1; fp1 = fp1->next) {
457 		onematch = 0;
458 		for (fp2 = a2; fp2; fp2 = fp2->next) {
459 			if (IN6_ARE_ADDR_EQUAL(&fp1->faddr, &fp2->faddr)) {
460 				overlap++;
461 				onematch = 1;
462 				break;
463 			}
464 			if (!onematch) {
465 				equal = 0;
466 			}
467 		}
468 		na1++;
469 	}
470 
471 	if (equal) {
472 		return (SCTP_ADDR_EQUAL);
473 	}
474 	if (overlap == na1) {
475 		return (SCTP_ADDR_SUBSET);
476 	}
477 	if (overlap) {
478 		return (SCTP_ADDR_OVERLAP);
479 	}
480 	return (SCTP_ADDR_DISJOINT);
481 }
482 
483 /*
484  * Returns 0 on success, -1 on memory allocation failure. If sleep
485  * is true, this function should never fail.  The boolean parameter
486  * first decides whether the newly created faddr structure should be
487  * added at the beginning of the list or at the end.
488  *
489  * Note: caller must hold conn fanout lock.
490  */
491 int
492 sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first)
493 {
494 	sctp_faddr_t	*faddr;
495 	mblk_t		*timer_mp;
496 
497 	if (is_system_labeled()) {
498 		ts_label_t *tsl;
499 		tsol_tpc_t *rhtp;
500 		int retv;
501 
502 		tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
503 		ASSERT(tsl != NULL);
504 
505 		/* find_tpc automatically does the right thing with IPv4 */
506 		rhtp = find_tpc(addr, IPV6_VERSION, B_FALSE);
507 		if (rhtp == NULL)
508 			return (EACCES);
509 
510 		retv = EACCES;
511 		if (tsl->tsl_doi == rhtp->tpc_tp.tp_doi) {
512 			switch (rhtp->tpc_tp.host_type) {
513 			case UNLABELED:
514 				/*
515 				 * Can talk to unlabeled hosts if any of the
516 				 * following are true:
517 				 *   1. zone's label matches the remote host's
518 				 *	default label,
519 				 *   2. mac_exempt is on and the zone dominates
520 				 *	the remote host's label, or
521 				 *   3. mac_exempt is on and the socket is from
522 				 *	the global zone.
523 				 */
524 				if (blequal(&rhtp->tpc_tp.tp_def_label,
525 				    &tsl->tsl_label) ||
526 				    (sctp->sctp_mac_exempt &&
527 				    (sctp->sctp_zoneid == GLOBAL_ZONEID ||
528 				    bldominates(&tsl->tsl_label,
529 				    &rhtp->tpc_tp.tp_def_label))))
530 					retv = 0;
531 				break;
532 			case SUN_CIPSO:
533 				if (_blinrange(&tsl->tsl_label,
534 				    &rhtp->tpc_tp.tp_sl_range_cipso) ||
535 				    blinlset(&tsl->tsl_label,
536 				    rhtp->tpc_tp.tp_sl_set_cipso))
537 					retv = 0;
538 				break;
539 			}
540 		}
541 		TPC_RELE(rhtp);
542 		if (retv != 0)
543 			return (retv);
544 	}
545 
546 	if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL)
547 		return (ENOMEM);
548 	timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep);
549 	if (timer_mp == NULL) {
550 		kmem_cache_free(sctp_kmem_faddr_cache, faddr);
551 		return (ENOMEM);
552 	}
553 	((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr;
554 
555 	sctp_init_faddr(sctp, faddr, addr, timer_mp);
556 
557 	/* Check for subnet broadcast. */
558 	if (faddr->ire != NULL && faddr->ire->ire_type & IRE_BROADCAST) {
559 		IRE_REFRELE_NOTR(faddr->ire);
560 		sctp_timer_free(timer_mp);
561 		kmem_cache_free(sctp_kmem_faddr_cache, faddr);
562 		return (EADDRNOTAVAIL);
563 	}
564 	ASSERT(faddr->next == NULL);
565 
566 	if (sctp->sctp_faddrs == NULL) {
567 		ASSERT(sctp->sctp_lastfaddr == NULL);
568 		/* only element on list; first and last are same */
569 		sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr;
570 	} else if (first) {
571 		ASSERT(sctp->sctp_lastfaddr != NULL);
572 		faddr->next = sctp->sctp_faddrs;
573 		sctp->sctp_faddrs = faddr;
574 	} else {
575 		sctp->sctp_lastfaddr->next = faddr;
576 		sctp->sctp_lastfaddr = faddr;
577 	}
578 	sctp->sctp_nfaddrs++;
579 
580 	return (0);
581 }
582 
583 sctp_faddr_t *
584 sctp_lookup_faddr(sctp_t *sctp, in6_addr_t *addr)
585 {
586 	sctp_faddr_t *fp;
587 
588 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
589 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr))
590 			break;
591 	}
592 
593 	return (fp);
594 }
595 
596 sctp_faddr_t *
597 sctp_lookup_faddr_nosctp(sctp_faddr_t *fp, in6_addr_t *addr)
598 {
599 	for (; fp; fp = fp->next) {
600 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr)) {
601 			break;
602 		}
603 	}
604 
605 	return (fp);
606 }
607 
608 /*
609  * To change the currently used peer address to the specified one.
610  */
611 void
612 sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp)
613 {
614 	/* Now setup the composite header. */
615 	if (fp->isv4) {
616 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
617 		    sctp->sctp_ipha->ipha_dst);
618 		IN6_V4MAPPED_TO_IPADDR(&fp->saddr, sctp->sctp_ipha->ipha_src);
619 		/* update don't fragment bit */
620 		if (fp->df) {
621 			sctp->sctp_ipha->ipha_fragment_offset_and_flags =
622 			    htons(IPH_DF);
623 		} else {
624 			sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0;
625 		}
626 	} else {
627 		sctp->sctp_ip6h->ip6_dst = fp->faddr;
628 		sctp->sctp_ip6h->ip6_src = fp->saddr;
629 	}
630 
631 	sctp->sctp_current = fp;
632 	sctp->sctp_mss = fp->sfa_pmss;
633 
634 	/* Update the uppper layer for the change. */
635 	if (!SCTP_IS_DETACHED(sctp))
636 		sctp_set_ulp_prop(sctp);
637 }
638 
639 void
640 sctp_redo_faddr_srcs(sctp_t *sctp)
641 {
642 	sctp_faddr_t *fp;
643 
644 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
645 		sctp_get_ire(sctp, fp);
646 	}
647 }
648 
649 void
650 sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp)
651 {
652 	int64_t now = lbolt64;
653 
654 	fp->strikes = 0;
655 	sctp->sctp_strikes = 0;
656 	fp->lastactive = now;
657 	fp->hb_expiry = now + SET_HB_INTVL(fp);
658 	fp->hb_pending = B_FALSE;
659 	if (fp->state != SCTP_FADDRS_ALIVE) {
660 		fp->state = SCTP_FADDRS_ALIVE;
661 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_AVAILABLE, 0);
662 		/* Should have a full IRE now */
663 		sctp_get_ire(sctp, fp);
664 
665 		/*
666 		 * If this is the primary, switch back to it now.  And
667 		 * we probably want to reset the source addr used to reach
668 		 * it.
669 		 */
670 		if (fp == sctp->sctp_primary) {
671 			ASSERT(fp->state != SCTP_FADDRS_UNREACH);
672 			sctp_set_faddr_current(sctp, fp);
673 			return;
674 		}
675 	}
676 }
677 
678 int
679 sctp_is_a_faddr_clean(sctp_t *sctp)
680 {
681 	sctp_faddr_t *fp;
682 
683 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
684 		if (fp->state == SCTP_FADDRS_ALIVE && fp->strikes == 0) {
685 			return (1);
686 		}
687 	}
688 
689 	return (0);
690 }
691 
692 /*
693  * Returns 0 if there is at leave one other active faddr, -1 if there
694  * are none. If there are none left, faddr_dead() will start killing the
695  * association.
696  * If the downed faddr was the current faddr, a new current faddr
697  * will be chosen.
698  */
699 int
700 sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate)
701 {
702 	sctp_faddr_t *ofp;
703 	sctp_stack_t *sctps = sctp->sctp_sctps;
704 
705 	if (fp->state == SCTP_FADDRS_ALIVE) {
706 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_UNREACHABLE, 0);
707 	}
708 	fp->state = newstate;
709 
710 	dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n",
711 	    SCTP_PRINTADDR(fp->faddr), newstate));
712 
713 	if (fp == sctp->sctp_current) {
714 		/* Current faddr down; need to switch it */
715 		sctp->sctp_current = NULL;
716 	}
717 
718 	/* Find next alive faddr */
719 	ofp = fp;
720 	for (fp = fp->next; fp != NULL; fp = fp->next) {
721 		if (fp->state == SCTP_FADDRS_ALIVE) {
722 			break;
723 		}
724 	}
725 
726 	if (fp == NULL) {
727 		/* Continue from beginning of list */
728 		for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->next) {
729 			if (fp->state == SCTP_FADDRS_ALIVE) {
730 				break;
731 			}
732 		}
733 	}
734 
735 	/*
736 	 * Find a new fp, so if the current faddr is dead, use the new fp
737 	 * as the current one.
738 	 */
739 	if (fp != ofp) {
740 		if (sctp->sctp_current == NULL) {
741 			dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n",
742 			    SCTP_PRINTADDR(fp->faddr)));
743 			/*
744 			 * Note that we don't need to reset the source addr
745 			 * of the new fp.
746 			 */
747 			sctp_set_faddr_current(sctp, fp);
748 		}
749 		return (0);
750 	}
751 
752 
753 	/* All faddrs are down; kill the association */
754 	dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n"));
755 	BUMP_MIB(&sctps->sctps_mib, sctpAborted);
756 	sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ?
757 	    SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL);
758 	sctp_clean_death(sctp, sctp->sctp_client_errno ?
759 	    sctp->sctp_client_errno : ETIMEDOUT);
760 
761 	return (-1);
762 }
763 
764 sctp_faddr_t *
765 sctp_rotate_faddr(sctp_t *sctp, sctp_faddr_t *ofp)
766 {
767 	sctp_faddr_t *nfp = NULL;
768 
769 	if (ofp == NULL) {
770 		ofp = sctp->sctp_current;
771 	}
772 
773 	/* Find the next live one */
774 	for (nfp = ofp->next; nfp != NULL; nfp = nfp->next) {
775 		if (nfp->state == SCTP_FADDRS_ALIVE) {
776 			break;
777 		}
778 	}
779 
780 	if (nfp == NULL) {
781 		/* Continue from beginning of list */
782 		for (nfp = sctp->sctp_faddrs; nfp != ofp; nfp = nfp->next) {
783 			if (nfp->state == SCTP_FADDRS_ALIVE) {
784 				break;
785 			}
786 		}
787 	}
788 
789 	/*
790 	 * nfp could only be NULL if all faddrs are down, and when
791 	 * this happens, faddr_dead() should have killed the
792 	 * association. Hence this assertion...
793 	 */
794 	ASSERT(nfp != NULL);
795 	return (nfp);
796 }
797 
798 void
799 sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp)
800 {
801 	sctp_faddr_t *fpp;
802 
803 	if (!sctp->sctp_faddrs) {
804 		return;
805 	}
806 
807 	if (fp->timer_mp != NULL) {
808 		sctp_timer_free(fp->timer_mp);
809 		fp->timer_mp = NULL;
810 		fp->timer_running = 0;
811 	}
812 	if (fp->rc_timer_mp != NULL) {
813 		sctp_timer_free(fp->rc_timer_mp);
814 		fp->rc_timer_mp = NULL;
815 		fp->rc_timer_running = 0;
816 	}
817 	if (fp->ire != NULL) {
818 		IRE_REFRELE_NOTR(fp->ire);
819 		fp->ire = NULL;
820 	}
821 
822 	if (fp == sctp->sctp_faddrs) {
823 		goto gotit;
824 	}
825 
826 	for (fpp = sctp->sctp_faddrs; fpp->next != fp; fpp = fpp->next)
827 		;
828 
829 gotit:
830 	ASSERT(sctp->sctp_conn_tfp != NULL);
831 	mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
832 	if (fp == sctp->sctp_faddrs) {
833 		sctp->sctp_faddrs = fp->next;
834 	} else {
835 		fpp->next = fp->next;
836 	}
837 	mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
838 	/* XXX faddr2ire? */
839 	kmem_cache_free(sctp_kmem_faddr_cache, fp);
840 	sctp->sctp_nfaddrs--;
841 }
842 
843 void
844 sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock)
845 {
846 	sctp_faddr_t *fp, *fpn;
847 
848 	if (sctp->sctp_faddrs == NULL) {
849 		ASSERT(sctp->sctp_lastfaddr == NULL);
850 		return;
851 	}
852 
853 	ASSERT(sctp->sctp_lastfaddr != NULL);
854 	sctp->sctp_lastfaddr = NULL;
855 	sctp->sctp_current = NULL;
856 	sctp->sctp_primary = NULL;
857 
858 	sctp_free_faddr_timers(sctp);
859 
860 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
861 		/* in conn fanout; need to hold lock */
862 		mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
863 	}
864 
865 	for (fp = sctp->sctp_faddrs; fp; fp = fpn) {
866 		fpn = fp->next;
867 		if (fp->ire != NULL)
868 			IRE_REFRELE_NOTR(fp->ire);
869 		kmem_cache_free(sctp_kmem_faddr_cache, fp);
870 		sctp->sctp_nfaddrs--;
871 	}
872 
873 	sctp->sctp_faddrs = NULL;
874 	ASSERT(sctp->sctp_nfaddrs == 0);
875 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
876 		mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
877 	}
878 
879 }
880 
881 void
882 sctp_zap_addrs(sctp_t *sctp)
883 {
884 	sctp_zap_faddrs(sctp, 0);
885 	sctp_free_saddrs(sctp);
886 }
887 
888 /*
889  * Initialize the IPv4 header. Loses any record of any IP options.
890  */
891 int
892 sctp_header_init_ipv4(sctp_t *sctp, int sleep)
893 {
894 	sctp_hdr_t	*sctph;
895 	sctp_stack_t	*sctps = sctp->sctp_sctps;
896 
897 	/*
898 	 * This is a simple initialization. If there's
899 	 * already a template, it should never be too small,
900 	 * so reuse it.  Otherwise, allocate space for the new one.
901 	 */
902 	if (sctp->sctp_iphc != NULL) {
903 		ASSERT(sctp->sctp_iphc_len >= SCTP_MAX_COMBINED_HEADER_LENGTH);
904 		bzero(sctp->sctp_iphc, sctp->sctp_iphc_len);
905 	} else {
906 		sctp->sctp_iphc_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
907 		sctp->sctp_iphc = kmem_zalloc(sctp->sctp_iphc_len, sleep);
908 		if (sctp->sctp_iphc == NULL) {
909 			sctp->sctp_iphc_len = 0;
910 			return (ENOMEM);
911 		}
912 	}
913 
914 	sctp->sctp_ipha = (ipha_t *)sctp->sctp_iphc;
915 
916 	sctp->sctp_hdr_len = sizeof (ipha_t) + sizeof (sctp_hdr_t);
917 	sctp->sctp_ip_hdr_len = sizeof (ipha_t);
918 	sctp->sctp_ipha->ipha_length = htons(sizeof (ipha_t) +
919 	    sizeof (sctp_hdr_t));
920 	sctp->sctp_ipha->ipha_version_and_hdr_length =
921 	    (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS;
922 
923 	/*
924 	 * These two fields should be zero, and are already set above.
925 	 *
926 	 * sctp->sctp_ipha->ipha_ident,
927 	 * sctp->sctp_ipha->ipha_fragment_offset_and_flags.
928 	 */
929 
930 	sctp->sctp_ipha->ipha_ttl = sctps->sctps_ipv4_ttl;
931 	sctp->sctp_ipha->ipha_protocol = IPPROTO_SCTP;
932 
933 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc + sizeof (ipha_t));
934 	sctp->sctp_sctph = sctph;
935 
936 	return (0);
937 }
938 
939 /*
940  * Update sctp_sticky_hdrs based on sctp_sticky_ipp.
941  * The headers include ip6i_t (if needed), ip6_t, any sticky extension
942  * headers, and the maximum size sctp header (to avoid reallocation
943  * on the fly for additional sctp options).
944  * Returns failure if can't allocate memory.
945  */
946 int
947 sctp_build_hdrs(sctp_t *sctp)
948 {
949 	char		*hdrs;
950 	uint_t		hdrs_len;
951 	ip6i_t		*ip6i;
952 	char		buf[SCTP_MAX_HDR_LENGTH];
953 	ip6_pkt_t	*ipp = &sctp->sctp_sticky_ipp;
954 	in6_addr_t	src;
955 	in6_addr_t	dst;
956 	sctp_stack_t	*sctps = sctp->sctp_sctps;
957 
958 	/*
959 	 * save the existing sctp header and source/dest IP addresses
960 	 */
961 	bcopy(sctp->sctp_sctph6, buf, sizeof (sctp_hdr_t));
962 	src = sctp->sctp_ip6h->ip6_src;
963 	dst = sctp->sctp_ip6h->ip6_dst;
964 	hdrs_len = ip_total_hdrs_len_v6(ipp) + SCTP_MAX_HDR_LENGTH;
965 	ASSERT(hdrs_len != 0);
966 	if (hdrs_len > sctp->sctp_iphc6_len) {
967 		/* Need to reallocate */
968 		hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP);
969 		if (hdrs == NULL)
970 			return (ENOMEM);
971 
972 		if (sctp->sctp_iphc6_len != 0)
973 			kmem_free(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
974 		sctp->sctp_iphc6 = hdrs;
975 		sctp->sctp_iphc6_len = hdrs_len;
976 	}
977 	ip_build_hdrs_v6((uchar_t *)sctp->sctp_iphc6,
978 	    hdrs_len - SCTP_MAX_HDR_LENGTH, ipp, IPPROTO_SCTP);
979 
980 	/* Set header fields not in ipp */
981 	if (ipp->ipp_fields & IPPF_HAS_IP6I) {
982 		ip6i = (ip6i_t *)sctp->sctp_iphc6;
983 		sctp->sctp_ip6h = (ip6_t *)&ip6i[1];
984 	} else {
985 		sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
986 	}
987 	/*
988 	 * sctp->sctp_ip_hdr_len will include ip6i_t if there is one.
989 	 */
990 	sctp->sctp_ip_hdr6_len = hdrs_len - SCTP_MAX_HDR_LENGTH;
991 	sctp->sctp_sctph6 = (sctp_hdr_t *)(sctp->sctp_iphc6 +
992 	    sctp->sctp_ip_hdr6_len);
993 	sctp->sctp_hdr6_len = sctp->sctp_ip_hdr6_len + sizeof (sctp_hdr_t);
994 
995 	bcopy(buf, sctp->sctp_sctph6, sizeof (sctp_hdr_t));
996 
997 	sctp->sctp_ip6h->ip6_src = src;
998 	sctp->sctp_ip6h->ip6_dst = dst;
999 	/*
1000 	 * If the hoplimit was not set by ip_build_hdrs_v6(), we need to
1001 	 * set it to the default value for SCTP.
1002 	 */
1003 	if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS))
1004 		sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit;
1005 	/*
1006 	 * If we're setting extension headers after a connection
1007 	 * has been established, and if we have a routing header
1008 	 * among the extension headers, call ip_massage_options_v6 to
1009 	 * manipulate the routing header/ip6_dst set the checksum
1010 	 * difference in the sctp header template.
1011 	 * (This happens in sctp_connect_ipv6 if the routing header
1012 	 * is set prior to the connect.)
1013 	 */
1014 
1015 	if ((sctp->sctp_state >= SCTPS_COOKIE_WAIT) &&
1016 	    (sctp->sctp_sticky_ipp.ipp_fields & IPPF_RTHDR)) {
1017 		ip6_rthdr_t *rth;
1018 
1019 		rth = ip_find_rthdr_v6(sctp->sctp_ip6h,
1020 		    (uint8_t *)sctp->sctp_sctph6);
1021 		if (rth != NULL) {
1022 			(void) ip_massage_options_v6(sctp->sctp_ip6h, rth,
1023 			    sctps->sctps_netstack);
1024 		}
1025 	}
1026 	return (0);
1027 }
1028 
1029 /*
1030  * Initialize the IPv6 header. Loses any record of any IPv6 extension headers.
1031  */
1032 int
1033 sctp_header_init_ipv6(sctp_t *sctp, int sleep)
1034 {
1035 	sctp_hdr_t	*sctph;
1036 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1037 
1038 	/*
1039 	 * This is a simple initialization. If there's
1040 	 * already a template, it should never be too small,
1041 	 * so reuse it. Otherwise, allocate space for the new one.
1042 	 * Ensure that there is enough space to "downgrade" the sctp_t
1043 	 * to an IPv4 sctp_t. This requires having space for a full load
1044 	 * of IPv4 options
1045 	 */
1046 	if (sctp->sctp_iphc6 != NULL) {
1047 		ASSERT(sctp->sctp_iphc6_len >=
1048 		    SCTP_MAX_COMBINED_HEADER_LENGTH);
1049 		bzero(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
1050 	} else {
1051 		sctp->sctp_iphc6_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
1052 		sctp->sctp_iphc6 = kmem_zalloc(sctp->sctp_iphc_len, sleep);
1053 		if (sctp->sctp_iphc6 == NULL) {
1054 			sctp->sctp_iphc6_len = 0;
1055 			return (ENOMEM);
1056 		}
1057 	}
1058 	sctp->sctp_hdr6_len = IPV6_HDR_LEN + sizeof (sctp_hdr_t);
1059 	sctp->sctp_ip_hdr6_len = IPV6_HDR_LEN;
1060 	sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
1061 
1062 	/* Initialize the header template */
1063 
1064 	sctp->sctp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW;
1065 	sctp->sctp_ip6h->ip6_plen = ntohs(sizeof (sctp_hdr_t));
1066 	sctp->sctp_ip6h->ip6_nxt = IPPROTO_SCTP;
1067 	sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit;
1068 
1069 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc6 + IPV6_HDR_LEN);
1070 	sctp->sctp_sctph6 = sctph;
1071 
1072 	return (0);
1073 }
1074 
1075 static int
1076 sctp_v4_label(sctp_t *sctp)
1077 {
1078 	uchar_t optbuf[IP_MAX_OPT_LENGTH];
1079 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1080 	int added;
1081 
1082 	if (tsol_compute_label(cr, sctp->sctp_ipha->ipha_dst, optbuf,
1083 	    sctp->sctp_mac_exempt,
1084 	    sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0)
1085 		return (EACCES);
1086 
1087 	added = tsol_remove_secopt(sctp->sctp_ipha, sctp->sctp_hdr_len);
1088 	if (added == -1)
1089 		return (EACCES);
1090 	sctp->sctp_hdr_len += added;
1091 	sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph + added);
1092 	sctp->sctp_ip_hdr_len += added;
1093 	if ((sctp->sctp_v4label_len = optbuf[IPOPT_OLEN]) != 0) {
1094 		sctp->sctp_v4label_len = (sctp->sctp_v4label_len + 3) & ~3;
1095 		added = tsol_prepend_option(optbuf, sctp->sctp_ipha,
1096 		    sctp->sctp_hdr_len);
1097 		if (added == -1)
1098 			return (EACCES);
1099 		sctp->sctp_hdr_len += added;
1100 		sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph +
1101 		    added);
1102 		sctp->sctp_ip_hdr_len += added;
1103 	}
1104 	return (0);
1105 }
1106 
1107 static int
1108 sctp_v6_label(sctp_t *sctp)
1109 {
1110 	uchar_t optbuf[TSOL_MAX_IPV6_OPTION];
1111 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1112 
1113 	if (tsol_compute_label_v6(cr, &sctp->sctp_ip6h->ip6_dst, optbuf,
1114 	    sctp->sctp_mac_exempt,
1115 	    sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0)
1116 		return (EACCES);
1117 	if (tsol_update_sticky(&sctp->sctp_sticky_ipp, &sctp->sctp_v6label_len,
1118 	    optbuf) != 0)
1119 		return (EACCES);
1120 	if (sctp_build_hdrs(sctp) != 0)
1121 		return (EACCES);
1122 	return (0);
1123 }
1124 
1125 /*
1126  * XXX implement more sophisticated logic
1127  */
1128 int
1129 sctp_set_hdraddrs(sctp_t *sctp)
1130 {
1131 	sctp_faddr_t *fp;
1132 	int gotv4 = 0;
1133 	int gotv6 = 0;
1134 
1135 	ASSERT(sctp->sctp_faddrs != NULL);
1136 	ASSERT(sctp->sctp_nsaddrs > 0);
1137 
1138 	/* Set up using the primary first */
1139 	if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->faddr)) {
1140 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->faddr,
1141 		    sctp->sctp_ipha->ipha_dst);
1142 		/* saddr may be unspec; make_mp() will handle this */
1143 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->saddr,
1144 		    sctp->sctp_ipha->ipha_src);
1145 		if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1146 			gotv4 = 1;
1147 			if (sctp->sctp_ipversion == IPV4_VERSION) {
1148 				goto copyports;
1149 			}
1150 		}
1151 	} else {
1152 		sctp->sctp_ip6h->ip6_dst = sctp->sctp_primary->faddr;
1153 		/* saddr may be unspec; make_mp() will handle this */
1154 		sctp->sctp_ip6h->ip6_src = sctp->sctp_primary->saddr;
1155 		if (!is_system_labeled() || sctp_v6_label(sctp) == 0)
1156 			gotv6 = 1;
1157 	}
1158 
1159 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
1160 		if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1161 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
1162 			    sctp->sctp_ipha->ipha_dst);
1163 			/* copy in the faddr_t's saddr */
1164 			IN6_V4MAPPED_TO_IPADDR(&fp->saddr,
1165 			    sctp->sctp_ipha->ipha_src);
1166 			if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1167 				gotv4 = 1;
1168 				if (sctp->sctp_ipversion == IPV4_VERSION ||
1169 				    gotv6) {
1170 					break;
1171 				}
1172 			}
1173 		} else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1174 			sctp->sctp_ip6h->ip6_dst = fp->faddr;
1175 			/* copy in the faddr_t's saddr */
1176 			sctp->sctp_ip6h->ip6_src = fp->saddr;
1177 			if (!is_system_labeled() || sctp_v6_label(sctp) == 0) {
1178 				gotv6 = 1;
1179 				if (gotv4)
1180 					break;
1181 			}
1182 		}
1183 	}
1184 
1185 copyports:
1186 	if (!gotv4 && !gotv6)
1187 		return (EACCES);
1188 
1189 	/* copy in the ports for good measure */
1190 	sctp->sctp_sctph->sh_sport = sctp->sctp_lport;
1191 	sctp->sctp_sctph->sh_dport = sctp->sctp_fport;
1192 
1193 	sctp->sctp_sctph6->sh_sport = sctp->sctp_lport;
1194 	sctp->sctp_sctph6->sh_dport = sctp->sctp_fport;
1195 	return (0);
1196 }
1197 
1198 void
1199 sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp)
1200 {
1201 	mblk_t *mp;
1202 	sctp_parm_hdr_t *ph;
1203 	size_t len;
1204 	int pad;
1205 
1206 	len = sizeof (*ph) + ntohs(uph->sph_len);
1207 	if ((pad = len % 4) != 0) {
1208 		pad = 4 - pad;
1209 		len += pad;
1210 	}
1211 	mp = allocb(len, BPRI_MED);
1212 	if (mp == NULL) {
1213 		return;
1214 	}
1215 
1216 	ph = (sctp_parm_hdr_t *)(mp->b_rptr);
1217 	ph->sph_type = htons(PARM_UNRECOGNIZED);
1218 	ph->sph_len = htons(len - pad);
1219 
1220 	/* copy in the unrecognized parameter */
1221 	bcopy(uph, ph + 1, ntohs(uph->sph_len));
1222 
1223 	mp->b_wptr = mp->b_rptr + len;
1224 	if (*errmp != NULL) {
1225 		linkb(*errmp, mp);
1226 	} else {
1227 		*errmp = mp;
1228 	}
1229 }
1230 
1231 /*
1232  * o Bounds checking
1233  * o Updates remaining
1234  * o Checks alignment
1235  */
1236 sctp_parm_hdr_t *
1237 sctp_next_parm(sctp_parm_hdr_t *current, ssize_t *remaining)
1238 {
1239 	int pad;
1240 	uint16_t len;
1241 
1242 	len = ntohs(current->sph_len);
1243 	*remaining -= len;
1244 	if (*remaining < sizeof (*current) || len < sizeof (*current)) {
1245 		return (NULL);
1246 	}
1247 	if ((pad = len & (SCTP_ALIGN - 1)) != 0) {
1248 		pad = SCTP_ALIGN - pad;
1249 		*remaining -= pad;
1250 	}
1251 	/*LINTED pointer cast may result in improper alignment*/
1252 	current = (sctp_parm_hdr_t *)((char *)current + len + pad);
1253 	return (current);
1254 }
1255 
1256 /*
1257  * Sets the address parameters given in the INIT chunk into sctp's
1258  * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are
1259  * no address parameters in the INIT chunk, a single faddr is created
1260  * from the ip hdr at the beginning of pkt.
1261  * If there already are existing addresses hanging from sctp, merge
1262  * them in, if the old info contains addresses which are not present
1263  * in this new info, get rid of them, and clean the pointers if there's
1264  * messages which have this as their target address.
1265  *
1266  * We also re-adjust the source address list here since the list may
1267  * contain more than what is actually part of the association. If
1268  * we get here from sctp_send_cookie_echo(), we are on the active
1269  * side and psctp will be NULL and ich will be the INIT-ACK chunk.
1270  * If we get here from sctp_accept_comm(), ich will be the INIT chunk
1271  * and psctp will the listening endpoint.
1272  *
1273  * INIT processing: When processing the INIT we inherit the src address
1274  * list from the listener. For a loopback or linklocal association, we
1275  * delete the list and just take the address from the IP header (since
1276  * that's how we created the INIT-ACK). Additionally, for loopback we
1277  * ignore the address params in the INIT. For determining which address
1278  * types were sent in the INIT-ACK we follow the same logic as in
1279  * creating the INIT-ACK. We delete addresses of the type that are not
1280  * supported by the peer.
1281  *
1282  * INIT-ACK processing: When processing the INIT-ACK since we had not
1283  * included addr params for loopback or linklocal addresses when creating
1284  * the INIT, we just use the address from the IP header. Further, for
1285  * loopback we ignore the addr param list. We mark addresses of the
1286  * type not supported by the peer as unconfirmed.
1287  *
1288  * In case of INIT processing we look for supported address types in the
1289  * supported address param, if present. In both cases the address type in
1290  * the IP header is supported as well as types for addresses in the param
1291  * list, if any.
1292  *
1293  * Once we have the supported address types sctp_check_saddr() runs through
1294  * the source address list and deletes or marks as unconfirmed address of
1295  * types not supported by the peer.
1296  *
1297  * Returns 0 on success, sys errno on failure
1298  */
1299 int
1300 sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt,
1301     sctp_chunk_hdr_t *ich, uint_t *sctp_options)
1302 {
1303 	sctp_init_chunk_t	*init;
1304 	ipha_t			*iph;
1305 	ip6_t			*ip6h;
1306 	in6_addr_t		hdrsaddr[1];
1307 	in6_addr_t		hdrdaddr[1];
1308 	sctp_parm_hdr_t		*ph;
1309 	ssize_t			remaining;
1310 	int			isv4;
1311 	int			err;
1312 	sctp_faddr_t		*fp;
1313 	int			supp_af = 0;
1314 	boolean_t		check_saddr = B_TRUE;
1315 	in6_addr_t		curaddr;
1316 	sctp_stack_t		*sctps = sctp->sctp_sctps;
1317 
1318 	if (sctp_options != NULL)
1319 		*sctp_options = 0;
1320 
1321 	/* extract the address from the IP header */
1322 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1323 	if (isv4) {
1324 		iph = (ipha_t *)pkt->b_rptr;
1325 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr);
1326 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr);
1327 		supp_af |= PARM_SUPP_V4;
1328 	} else {
1329 		ip6h = (ip6_t *)pkt->b_rptr;
1330 		hdrsaddr[0] = ip6h->ip6_src;
1331 		hdrdaddr[0] = ip6h->ip6_dst;
1332 		supp_af |= PARM_SUPP_V6;
1333 	}
1334 
1335 	/*
1336 	 * Unfortunately, we can't delay this because adding an faddr
1337 	 * looks for the presence of the source address (from the ire
1338 	 * for the faddr) in the source address list. We could have
1339 	 * delayed this if, say, this was a loopback/linklocal connection.
1340 	 * Now, we just end up nuking this list and taking the addr from
1341 	 * the IP header for loopback/linklocal.
1342 	 */
1343 	if (psctp != NULL && psctp->sctp_nsaddrs > 0) {
1344 		ASSERT(sctp->sctp_nsaddrs == 0);
1345 
1346 		err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP);
1347 		if (err != 0)
1348 			return (err);
1349 	}
1350 	/*
1351 	 * We will add the faddr before parsing the address list as this
1352 	 * might be a loopback connection and we would not have to
1353 	 * go through the list.
1354 	 *
1355 	 * Make sure the header's addr is in the list
1356 	 */
1357 	fp = sctp_lookup_faddr(sctp, hdrsaddr);
1358 	if (fp == NULL) {
1359 		/* not included; add it now */
1360 		err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE);
1361 		if (err != 0)
1362 			return (err);
1363 
1364 		/* sctp_faddrs will be the hdr addr */
1365 		fp = sctp->sctp_faddrs;
1366 	}
1367 	/* make the header addr the primary */
1368 
1369 	if (cl_sctp_assoc_change != NULL && psctp == NULL)
1370 		curaddr = sctp->sctp_current->faddr;
1371 
1372 	sctp->sctp_primary = fp;
1373 	sctp->sctp_current = fp;
1374 	sctp->sctp_mss = fp->sfa_pmss;
1375 
1376 	/* For loopback connections & linklocal get address from the header */
1377 	if (sctp->sctp_loopback || sctp->sctp_linklocal) {
1378 		if (sctp->sctp_nsaddrs != 0)
1379 			sctp_free_saddrs(sctp);
1380 		if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0)
1381 			return (err);
1382 		/* For loopback ignore address list */
1383 		if (sctp->sctp_loopback)
1384 			return (0);
1385 		check_saddr = B_FALSE;
1386 	}
1387 
1388 	/* Walk the params in the INIT [ACK], pulling out addr params */
1389 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1390 	    sizeof (sctp_init_chunk_t);
1391 	if (remaining < sizeof (*ph)) {
1392 		if (check_saddr) {
1393 			sctp_check_saddr(sctp, supp_af, psctp == NULL ?
1394 			    B_FALSE : B_TRUE, hdrdaddr);
1395 		}
1396 		ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1397 		return (0);
1398 	}
1399 
1400 	init = (sctp_init_chunk_t *)(ich + 1);
1401 	ph = (sctp_parm_hdr_t *)(init + 1);
1402 
1403 	/* params will have already been byteordered when validating */
1404 	while (ph != NULL) {
1405 		if (ph->sph_type == htons(PARM_SUPP_ADDRS)) {
1406 			int		plen;
1407 			uint16_t	*p;
1408 			uint16_t	addrtype;
1409 
1410 			ASSERT(psctp != NULL);
1411 			plen = ntohs(ph->sph_len);
1412 			p = (uint16_t *)(ph + 1);
1413 			while (plen > 0) {
1414 				addrtype = ntohs(*p);
1415 				switch (addrtype) {
1416 					case PARM_ADDR6:
1417 						supp_af |= PARM_SUPP_V6;
1418 						break;
1419 					case PARM_ADDR4:
1420 						supp_af |= PARM_SUPP_V4;
1421 						break;
1422 					default:
1423 						break;
1424 				}
1425 				p++;
1426 				plen -= sizeof (*p);
1427 			}
1428 		} else if (ph->sph_type == htons(PARM_ADDR4)) {
1429 			if (remaining >= PARM_ADDR4_LEN) {
1430 				in6_addr_t addr;
1431 				ipaddr_t ta;
1432 
1433 				supp_af |= PARM_SUPP_V4;
1434 				/*
1435 				 * Screen out broad/multicasts & loopback.
1436 				 * If the endpoint only accepts v6 address,
1437 				 * go to the next one.
1438 				 *
1439 				 * Subnet broadcast check is done in
1440 				 * sctp_add_faddr().  If the address is
1441 				 * a broadcast address, it won't be added.
1442 				 */
1443 				bcopy(ph + 1, &ta, sizeof (ta));
1444 				if (ta == 0 ||
1445 				    ta == INADDR_BROADCAST ||
1446 				    ta == htonl(INADDR_LOOPBACK) ||
1447 				    CLASSD(ta) ||
1448 				    sctp->sctp_connp->conn_ipv6_v6only) {
1449 					goto next;
1450 				}
1451 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1452 				    (ph + 1), &addr);
1453 
1454 				/* Check for duplicate. */
1455 				if (sctp_lookup_faddr(sctp, &addr) != NULL)
1456 					goto next;
1457 
1458 				/* OK, add it to the faddr set */
1459 				err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP,
1460 				    B_FALSE);
1461 				/* Something is wrong...  Try the next one. */
1462 				if (err != 0)
1463 					goto next;
1464 			}
1465 		} else if (ph->sph_type == htons(PARM_ADDR6) &&
1466 		    sctp->sctp_family == AF_INET6) {
1467 			/* An v4 socket should not take v6 addresses. */
1468 			if (remaining >= PARM_ADDR6_LEN) {
1469 				in6_addr_t *addr6;
1470 
1471 				supp_af |= PARM_SUPP_V6;
1472 				addr6 = (in6_addr_t *)(ph + 1);
1473 				/*
1474 				 * Screen out link locals, mcast, loopback
1475 				 * and bogus v6 address.
1476 				 */
1477 				if (IN6_IS_ADDR_LINKLOCAL(addr6) ||
1478 				    IN6_IS_ADDR_MULTICAST(addr6) ||
1479 				    IN6_IS_ADDR_LOOPBACK(addr6) ||
1480 				    IN6_IS_ADDR_V4MAPPED(addr6)) {
1481 					goto next;
1482 				}
1483 				/* Check for duplicate. */
1484 				if (sctp_lookup_faddr(sctp, addr6) != NULL)
1485 					goto next;
1486 
1487 				err = sctp_add_faddr(sctp,
1488 				    (in6_addr_t *)(ph + 1), KM_NOSLEEP,
1489 				    B_FALSE);
1490 				/* Something is wrong...  Try the next one. */
1491 				if (err != 0)
1492 					goto next;
1493 			}
1494 		} else if (ph->sph_type == htons(PARM_FORWARD_TSN)) {
1495 			if (sctp_options != NULL)
1496 				*sctp_options |= SCTP_PRSCTP_OPTION;
1497 		} /* else; skip */
1498 
1499 next:
1500 		ph = sctp_next_parm(ph, &remaining);
1501 	}
1502 	if (check_saddr) {
1503 		sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE :
1504 		    B_TRUE, hdrdaddr);
1505 	}
1506 	ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1507 	/*
1508 	 * We have the right address list now, update clustering's
1509 	 * knowledge because when we sent the INIT we had just added
1510 	 * the address the INIT was sent to.
1511 	 */
1512 	if (psctp == NULL && cl_sctp_assoc_change != NULL) {
1513 		uchar_t	*alist;
1514 		size_t	asize;
1515 		uchar_t	*dlist;
1516 		size_t	dsize;
1517 
1518 		asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;
1519 		alist = kmem_alloc(asize, KM_NOSLEEP);
1520 		if (alist == NULL) {
1521 			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
1522 			return (ENOMEM);
1523 		}
1524 		/*
1525 		 * Just include the address the INIT was sent to in the
1526 		 * delete list and send the entire faddr list. We could
1527 		 * do it differently (i.e include all the addresses in the
1528 		 * add list even if it contains the original address OR
1529 		 * remove the original address from the add list etc.), but
1530 		 * this seems reasonable enough.
1531 		 */
1532 		dsize = sizeof (in6_addr_t);
1533 		dlist = kmem_alloc(dsize, KM_NOSLEEP);
1534 		if (dlist == NULL) {
1535 			kmem_free(alist, asize);
1536 			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
1537 			return (ENOMEM);
1538 		}
1539 		bcopy(&curaddr, dlist, sizeof (curaddr));
1540 		sctp_get_faddr_list(sctp, alist, asize);
1541 		(*cl_sctp_assoc_change)(sctp->sctp_family, alist, asize,
1542 		    sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR,
1543 		    (cl_sctp_handle_t)sctp);
1544 		/* alist and dlist will be freed by the clustering module */
1545 	}
1546 	return (0);
1547 }
1548 
1549 /*
1550  * Returns 0 if the check failed and the restart should be refused,
1551  * 1 if the check succeeded.
1552  */
1553 int
1554 sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports,
1555     int sleep, sctp_stack_t *sctps)
1556 {
1557 	sctp_faddr_t *fp, *fphead = NULL;
1558 	sctp_parm_hdr_t *ph;
1559 	ssize_t remaining;
1560 	int isv4;
1561 	ipha_t *iph;
1562 	ip6_t *ip6h;
1563 	in6_addr_t hdraddr[1];
1564 	int retval = 0;
1565 	sctp_tf_t *tf;
1566 	sctp_t *sctp;
1567 	int compres;
1568 	sctp_init_chunk_t *init;
1569 	int nadded = 0;
1570 
1571 	/* extract the address from the IP header */
1572 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1573 	if (isv4) {
1574 		iph = (ipha_t *)pkt->b_rptr;
1575 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr);
1576 	} else {
1577 		ip6h = (ip6_t *)pkt->b_rptr;
1578 		hdraddr[0] = ip6h->ip6_src;
1579 	}
1580 
1581 	/* Walk the params in the INIT [ACK], pulling out addr params */
1582 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1583 	    sizeof (sctp_init_chunk_t);
1584 	if (remaining < sizeof (*ph)) {
1585 		/* no parameters; restart OK */
1586 		return (1);
1587 	}
1588 	init = (sctp_init_chunk_t *)(ich + 1);
1589 	ph = (sctp_parm_hdr_t *)(init + 1);
1590 
1591 	while (ph != NULL) {
1592 		sctp_faddr_t *fpa = NULL;
1593 
1594 		/* params will have already been byteordered when validating */
1595 		if (ph->sph_type == htons(PARM_ADDR4)) {
1596 			if (remaining >= PARM_ADDR4_LEN) {
1597 				in6_addr_t addr;
1598 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1599 				    (ph + 1), &addr);
1600 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1601 				    sleep);
1602 				if (fpa == NULL) {
1603 					goto done;
1604 				}
1605 				bzero(fpa, sizeof (*fpa));
1606 				fpa->faddr = addr;
1607 				fpa->next = NULL;
1608 			}
1609 		} else if (ph->sph_type == htons(PARM_ADDR6)) {
1610 			if (remaining >= PARM_ADDR6_LEN) {
1611 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1612 				    sleep);
1613 				if (fpa == NULL) {
1614 					goto done;
1615 				}
1616 				bzero(fpa, sizeof (*fpa));
1617 				bcopy(ph + 1, &fpa->faddr,
1618 				    sizeof (fpa->faddr));
1619 				fpa->next = NULL;
1620 			}
1621 		}
1622 		/* link in the new addr, if it was an addr param */
1623 		if (fpa != NULL) {
1624 			if (fphead == NULL) {
1625 				fphead = fpa;
1626 			} else {
1627 				fpa->next = fphead;
1628 				fphead = fpa;
1629 			}
1630 		}
1631 
1632 		ph = sctp_next_parm(ph, &remaining);
1633 	}
1634 
1635 	if (fphead == NULL) {
1636 		/* no addr parameters; restart OK */
1637 		return (1);
1638 	}
1639 
1640 	/*
1641 	 * got at least one; make sure the header's addr is
1642 	 * in the list
1643 	 */
1644 	fp = sctp_lookup_faddr_nosctp(fphead, hdraddr);
1645 	if (fp == NULL) {
1646 		/* not included; add it now */
1647 		fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep);
1648 		if (fp == NULL) {
1649 			goto done;
1650 		}
1651 		bzero(fp, sizeof (*fp));
1652 		fp->faddr = *hdraddr;
1653 		fp->next = fphead;
1654 		fphead = fp;
1655 	}
1656 
1657 	/*
1658 	 * Now, we can finally do the check: For each sctp instance
1659 	 * on the hash line for ports, compare its faddr set against
1660 	 * the new one. If the new one is a strict subset of any
1661 	 * existing sctp's faddrs, the restart is OK. However, if there
1662 	 * is an overlap, this could be an attack, so return failure.
1663 	 * If all sctp's faddrs are disjoint, this is a legitimate new
1664 	 * association.
1665 	 */
1666 	tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]);
1667 	mutex_enter(&tf->tf_lock);
1668 
1669 	for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) {
1670 		if (ports != sctp->sctp_ports) {
1671 			continue;
1672 		}
1673 		compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs);
1674 		if (compres <= SCTP_ADDR_SUBSET) {
1675 			retval = 1;
1676 			mutex_exit(&tf->tf_lock);
1677 			goto done;
1678 		}
1679 		if (compres == SCTP_ADDR_OVERLAP) {
1680 			dprint(1,
1681 			    ("new assoc from %x:%x:%x:%x overlaps with %p\n",
1682 			    SCTP_PRINTADDR(*hdraddr), (void *)sctp));
1683 			/*
1684 			 * While we still hold the lock, we need to
1685 			 * figure out which addresses have been
1686 			 * added so we can include them in the abort
1687 			 * we will send back. Since these faddrs will
1688 			 * never be used, we overload the rto field
1689 			 * here, setting it to 0 if the address was
1690 			 * not added, 1 if it was added.
1691 			 */
1692 			for (fp = fphead; fp; fp = fp->next) {
1693 				if (sctp_lookup_faddr(sctp, &fp->faddr)) {
1694 					fp->rto = 0;
1695 				} else {
1696 					fp->rto = 1;
1697 					nadded++;
1698 				}
1699 			}
1700 			mutex_exit(&tf->tf_lock);
1701 			goto done;
1702 		}
1703 	}
1704 	mutex_exit(&tf->tf_lock);
1705 
1706 	/* All faddrs are disjoint; legit new association */
1707 	retval = 1;
1708 
1709 done:
1710 	/* If are attempted adds, send back an abort listing the addrs */
1711 	if (nadded > 0) {
1712 		void *dtail;
1713 		size_t dlen;
1714 
1715 		dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP);
1716 		if (dtail == NULL) {
1717 			goto cleanup;
1718 		}
1719 
1720 		ph = dtail;
1721 		dlen = 0;
1722 		for (fp = fphead; fp; fp = fp->next) {
1723 			if (fp->rto == 0) {
1724 				continue;
1725 			}
1726 			if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1727 				ipaddr_t addr4;
1728 
1729 				ph->sph_type = htons(PARM_ADDR4);
1730 				ph->sph_len = htons(PARM_ADDR4_LEN);
1731 				IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
1732 				ph++;
1733 				bcopy(&addr4, ph, sizeof (addr4));
1734 				ph = (sctp_parm_hdr_t *)
1735 				    ((char *)ph + sizeof (addr4));
1736 				dlen += PARM_ADDR4_LEN;
1737 			} else {
1738 				ph->sph_type = htons(PARM_ADDR6);
1739 				ph->sph_len = htons(PARM_ADDR6_LEN);
1740 				ph++;
1741 				bcopy(&fp->faddr, ph, sizeof (fp->faddr));
1742 				ph = (sctp_parm_hdr_t *)
1743 				    ((char *)ph + sizeof (fp->faddr));
1744 				dlen += PARM_ADDR6_LEN;
1745 			}
1746 		}
1747 
1748 		/* Send off the abort */
1749 		sctp_send_abort(sctp, sctp_init2vtag(ich),
1750 		    SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE);
1751 
1752 		kmem_free(dtail, PARM_ADDR6_LEN * nadded);
1753 	}
1754 
1755 cleanup:
1756 	/* Clean up */
1757 	if (fphead) {
1758 		sctp_faddr_t *fpn;
1759 		for (fp = fphead; fp; fp = fpn) {
1760 			fpn = fp->next;
1761 			kmem_cache_free(sctp_kmem_faddr_cache, fp);
1762 		}
1763 	}
1764 
1765 	return (retval);
1766 }
1767 
1768 /*
1769  * Reset any state related to transmitted chunks.
1770  */
1771 void
1772 sctp_congest_reset(sctp_t *sctp)
1773 {
1774 	sctp_faddr_t	*fp;
1775 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1776 	mblk_t		*mp;
1777 
1778 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
1779 		fp->ssthresh = sctps->sctps_initial_mtu;
1780 		SET_CWND(fp, fp->sfa_pmss, sctps->sctps_slow_start_initial);
1781 		fp->suna = 0;
1782 		fp->pba = 0;
1783 	}
1784 	/*
1785 	 * Clean up the transmit list as well since we have reset accounting
1786 	 * on all the fps. Send event upstream, if required.
1787 	 */
1788 	while ((mp = sctp->sctp_xmit_head) != NULL) {
1789 		sctp->sctp_xmit_head = mp->b_next;
1790 		mp->b_next = NULL;
1791 		if (sctp->sctp_xmit_head != NULL)
1792 			sctp->sctp_xmit_head->b_prev = NULL;
1793 		sctp_sendfail_event(sctp, mp, 0, B_TRUE);
1794 	}
1795 	sctp->sctp_xmit_head = NULL;
1796 	sctp->sctp_xmit_tail = NULL;
1797 	sctp->sctp_xmit_unacked = NULL;
1798 
1799 	sctp->sctp_unacked = 0;
1800 	/*
1801 	 * Any control message as well. We will clean-up this list as well.
1802 	 * This contains any pending ASCONF request that we have queued/sent.
1803 	 * If we do get an ACK we will just drop it. However, given that
1804 	 * we are restarting chances are we aren't going to get any.
1805 	 */
1806 	if (sctp->sctp_cxmit_list != NULL)
1807 		sctp_asconf_free_cxmit(sctp, NULL);
1808 	sctp->sctp_cxmit_list = NULL;
1809 	sctp->sctp_cchunk_pend = 0;
1810 
1811 	sctp->sctp_rexmitting = B_FALSE;
1812 	sctp->sctp_rxt_nxttsn = 0;
1813 	sctp->sctp_rxt_maxtsn = 0;
1814 
1815 	sctp->sctp_zero_win_probe = B_FALSE;
1816 }
1817 
1818 static void
1819 sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr,
1820     mblk_t *timer_mp)
1821 {
1822 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1823 
1824 	bcopy(addr, &fp->faddr, sizeof (*addr));
1825 	if (IN6_IS_ADDR_V4MAPPED(addr)) {
1826 		fp->isv4 = 1;
1827 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
1828 		fp->sfa_pmss =
1829 		    (sctps->sctps_initial_mtu - sctp->sctp_hdr_len) &
1830 		    ~(SCTP_ALIGN - 1);
1831 	} else {
1832 		fp->isv4 = 0;
1833 		fp->sfa_pmss =
1834 		    (sctps->sctps_initial_mtu - sctp->sctp_hdr6_len) &
1835 		    ~(SCTP_ALIGN - 1);
1836 	}
1837 	fp->cwnd = sctps->sctps_slow_start_initial * fp->sfa_pmss;
1838 	fp->rto = MIN(sctp->sctp_rto_initial, sctp->sctp_init_rto_max);
1839 	fp->srtt = -1;
1840 	fp->rtt_updates = 0;
1841 	fp->strikes = 0;
1842 	fp->max_retr = sctp->sctp_pp_max_rxt;
1843 	/* Mark it as not confirmed. */
1844 	fp->state = SCTP_FADDRS_UNCONFIRMED;
1845 	fp->hb_interval = sctp->sctp_hb_interval;
1846 	fp->ssthresh = sctps->sctps_initial_ssthresh;
1847 	fp->suna = 0;
1848 	fp->pba = 0;
1849 	fp->acked = 0;
1850 	fp->lastactive = lbolt64;
1851 	fp->timer_mp = timer_mp;
1852 	fp->hb_pending = B_FALSE;
1853 	fp->hb_enabled = B_TRUE;
1854 	fp->timer_running = 0;
1855 	fp->df = 1;
1856 	fp->pmtu_discovered = 0;
1857 	fp->rc_timer_mp = NULL;
1858 	fp->rc_timer_running = 0;
1859 	fp->next = NULL;
1860 	fp->ire = NULL;
1861 	fp->T3expire = 0;
1862 	(void) random_get_pseudo_bytes((uint8_t *)&fp->hb_secret,
1863 	    sizeof (fp->hb_secret));
1864 	fp->hb_expiry = lbolt64;
1865 	fp->rxt_unacked = 0;
1866 
1867 	sctp_get_ire(sctp, fp);
1868 }
1869 
1870 /*ARGSUSED*/
1871 static void
1872 faddr_destructor(void *buf, void *cdrarg)
1873 {
1874 	sctp_faddr_t *fp = buf;
1875 
1876 	ASSERT(fp->timer_mp == NULL);
1877 	ASSERT(fp->timer_running == 0);
1878 
1879 	ASSERT(fp->rc_timer_mp == NULL);
1880 	ASSERT(fp->rc_timer_running == 0);
1881 }
1882 
1883 void
1884 sctp_faddr_init(void)
1885 {
1886 	sctp_kmem_faddr_cache = kmem_cache_create("sctp_faddr_cache",
1887 	    sizeof (sctp_faddr_t), 0, NULL, faddr_destructor,
1888 	    NULL, NULL, NULL, 0);
1889 }
1890 
1891 void
1892 sctp_faddr_fini(void)
1893 {
1894 	kmem_cache_destroy(sctp_kmem_faddr_cache);
1895 }
1896