xref: /titanic_51/usr/src/uts/common/inet/sctp/sctp_common.c (revision 6b5764c36d253d178caa447fa2a6d7e0c7dfd6e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/systm.h>
29 #include <sys/stream.h>
30 #include <sys/strsubr.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/kmem.h>
34 #include <sys/socket.h>
35 #include <sys/random.h>
36 #include <sys/tsol/tndb.h>
37 #include <sys/tsol/tnet.h>
38 
39 #include <netinet/in.h>
40 #include <netinet/ip6.h>
41 #include <netinet/sctp.h>
42 
43 #include <inet/common.h>
44 #include <inet/ip.h>
45 #include <inet/ip6.h>
46 #include <inet/ip_ire.h>
47 #include <inet/mib2.h>
48 #include <inet/nd.h>
49 #include <inet/optcom.h>
50 #include <inet/sctp_ip.h>
51 #include <inet/ipclassifier.h>
52 
53 #include "sctp_impl.h"
54 #include "sctp_addr.h"
55 #include "sctp_asconf.h"
56 
57 static struct kmem_cache *sctp_kmem_faddr_cache;
58 static void sctp_init_faddr(sctp_t *, sctp_faddr_t *, in6_addr_t *, mblk_t *);
59 
60 /* Set the source address.  Refer to comments in sctp_get_ire(). */
61 void
62 sctp_set_saddr(sctp_t *sctp, sctp_faddr_t *fp)
63 {
64 	boolean_t v6 = !fp->isv4;
65 	boolean_t addr_set;
66 
67 	fp->saddr = sctp_get_valid_addr(sctp, v6, &addr_set);
68 	/*
69 	 * If there is no source address avaialble, mark this peer address
70 	 * as unreachable for now.  When the heartbeat timer fires, it will
71 	 * call sctp_get_ire() to re-check if there is any source address
72 	 * available.
73 	 */
74 	if (!addr_set)
75 		fp->state = SCTP_FADDRS_UNREACH;
76 }
77 
78 /*
79  * Call this function to update the cached IRE of a peer addr fp.
80  */
81 void
82 sctp_get_ire(sctp_t *sctp, sctp_faddr_t *fp)
83 {
84 	ire_t		*ire;
85 	ipaddr_t	addr4;
86 	in6_addr_t	laddr;
87 	sctp_saddr_ipif_t *sp;
88 	int		hdrlen;
89 	ts_label_t	*tsl;
90 	sctp_stack_t	*sctps = sctp->sctp_sctps;
91 	ip_stack_t	*ipst = sctps->sctps_netstack->netstack_ip;
92 
93 	/* Remove the previous cache IRE */
94 	if ((ire = fp->ire) != NULL) {
95 		IRE_REFRELE_NOTR(ire);
96 		fp->ire = NULL;
97 	}
98 
99 	/*
100 	 * If this addr is not reachable, mark it as unconfirmed for now, the
101 	 * state will be changed back to unreachable later in this function
102 	 * if it is still the case.
103 	 */
104 	if (fp->state == SCTP_FADDRS_UNREACH) {
105 		fp->state = SCTP_FADDRS_UNCONFIRMED;
106 	}
107 
108 	tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
109 
110 	if (fp->isv4) {
111 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
112 		ire = ire_cache_lookup(addr4, sctp->sctp_zoneid, tsl, ipst);
113 		if (ire != NULL)
114 			IN6_IPADDR_TO_V4MAPPED(ire->ire_src_addr, &laddr);
115 	} else {
116 		ire = ire_cache_lookup_v6(&fp->faddr, sctp->sctp_zoneid, tsl,
117 		    ipst);
118 		if (ire != NULL)
119 			laddr = ire->ire_src_addr_v6;
120 	}
121 
122 	if (ire == NULL) {
123 		dprint(3, ("ire2faddr: no ire for %x:%x:%x:%x\n",
124 		    SCTP_PRINTADDR(fp->faddr)));
125 		/*
126 		 * It is tempting to just leave the src addr
127 		 * unspecified and let IP figure it out, but we
128 		 * *cannot* do this, since IP may choose a src addr
129 		 * that is not part of this association... unless
130 		 * this sctp has bound to all addrs.  So if the ire
131 		 * lookup fails, try to find one in our src addr
132 		 * list, unless the sctp has bound to all addrs, in
133 		 * which case we change the src addr to unspec.
134 		 *
135 		 * Note that if this is a v6 endpoint but it does
136 		 * not have any v4 address at this point (e.g. may
137 		 * have been  deleted), sctp_get_valid_addr() will
138 		 * return mapped INADDR_ANY.  In this case, this
139 		 * address should be marked not reachable so that
140 		 * it won't be used to send data.
141 		 */
142 		sctp_set_saddr(sctp, fp);
143 		if (fp->state == SCTP_FADDRS_UNREACH)
144 			return;
145 		goto check_current;
146 	}
147 
148 	/* Make sure the laddr is part of this association */
149 	if ((sp = sctp_saddr_lookup(sctp, &ire->ire_ipif->ipif_v6lcl_addr,
150 	    0)) != NULL && !sp->saddr_ipif_dontsrc) {
151 		if (sp->saddr_ipif_unconfirmed == 1)
152 			sp->saddr_ipif_unconfirmed = 0;
153 		fp->saddr = laddr;
154 	} else {
155 		dprint(2, ("ire2faddr: src addr is not part of assc\n"));
156 
157 		/*
158 		 * Set the src to the first saddr and hope for the best.
159 		 * Note that we will still do the ire caching below.
160 		 * Otherwise, whenever we send a packet, we need to do
161 		 * the ire lookup again and still may not get the correct
162 		 * source address.  Note that this case should very seldomly
163 		 * happen.  One scenario this can happen is an app
164 		 * explicitly bind() to an address.  But that address is
165 		 * not the preferred source address to send to the peer.
166 		 */
167 		sctp_set_saddr(sctp, fp);
168 		if (fp->state == SCTP_FADDRS_UNREACH) {
169 			IRE_REFRELE(ire);
170 			return;
171 		}
172 	}
173 
174 	/*
175 	 * Note that ire_cache_lookup_*() returns an ire with the tracing
176 	 * bits enabled.  This requires the thread holding the ire also
177 	 * do the IRE_REFRELE().  Thus we need to do IRE_REFHOLD_NOTR()
178 	 * and then IRE_REFRELE() the ire here to make the tracing bits
179 	 * work.
180 	 */
181 	IRE_REFHOLD_NOTR(ire);
182 	IRE_REFRELE(ire);
183 
184 	/* Cache the IRE */
185 	fp->ire = ire;
186 	if (fp->ire->ire_type == IRE_LOOPBACK && !sctp->sctp_loopback)
187 		sctp->sctp_loopback = 1;
188 
189 	/*
190 	 * Pull out RTO information for this faddr and use it if we don't
191 	 * have any yet.
192 	 */
193 	if (fp->srtt == -1 && ire->ire_uinfo.iulp_rtt != 0) {
194 		/* The cached value is in ms. */
195 		fp->srtt = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt);
196 		fp->rttvar = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt_sd);
197 		fp->rto = 3 * fp->srtt;
198 
199 		/* Bound the RTO by configured min and max values */
200 		if (fp->rto < sctp->sctp_rto_min) {
201 			fp->rto = sctp->sctp_rto_min;
202 		}
203 		if (fp->rto > sctp->sctp_rto_max) {
204 			fp->rto = sctp->sctp_rto_max;
205 		}
206 	}
207 
208 	/*
209 	 * Record the MTU for this faddr. If the MTU for this faddr has
210 	 * changed, check if the assc MTU will also change.
211 	 */
212 	if (fp->isv4) {
213 		hdrlen = sctp->sctp_hdr_len;
214 	} else {
215 		hdrlen = sctp->sctp_hdr6_len;
216 	}
217 	if ((fp->sfa_pmss + hdrlen) != ire->ire_max_frag) {
218 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
219 		fp->sfa_pmss = (ire->ire_max_frag - hdrlen) & ~(SCTP_ALIGN - 1);
220 		if (fp->cwnd < (fp->sfa_pmss * 2)) {
221 			SET_CWND(fp, fp->sfa_pmss,
222 			    sctps->sctps_slow_start_initial);
223 		}
224 	}
225 
226 check_current:
227 	if (fp == sctp->sctp_current)
228 		sctp_set_faddr_current(sctp, fp);
229 }
230 
231 void
232 sctp_update_ire(sctp_t *sctp)
233 {
234 	ire_t		*ire;
235 	sctp_faddr_t	*fp;
236 	sctp_stack_t	*sctps = sctp->sctp_sctps;
237 
238 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
239 		if ((ire = fp->ire) == NULL)
240 			continue;
241 		mutex_enter(&ire->ire_lock);
242 
243 		/*
244 		 * If the cached IRE is going away, there is no point to
245 		 * update it.
246 		 */
247 		if (ire->ire_marks & IRE_MARK_CONDEMNED) {
248 			mutex_exit(&ire->ire_lock);
249 			IRE_REFRELE_NOTR(ire);
250 			fp->ire = NULL;
251 			continue;
252 		}
253 
254 		/*
255 		 * Only record the PMTU for this faddr if we actually have
256 		 * done discovery. This prevents initialized default from
257 		 * clobbering any real info that IP may have.
258 		 */
259 		if (fp->pmtu_discovered) {
260 			if (fp->isv4) {
261 				ire->ire_max_frag = fp->sfa_pmss +
262 				    sctp->sctp_hdr_len;
263 			} else {
264 				ire->ire_max_frag = fp->sfa_pmss +
265 				    sctp->sctp_hdr6_len;
266 			}
267 		}
268 
269 		if (sctps->sctps_rtt_updates != 0 &&
270 		    fp->rtt_updates >= sctps->sctps_rtt_updates) {
271 			/*
272 			 * If there is no old cached values, initialize them
273 			 * conservatively.  Set them to be (1.5 * new value).
274 			 * This code copied from ip_ire_advise().  The cached
275 			 * value is in ms.
276 			 */
277 			if (ire->ire_uinfo.iulp_rtt != 0) {
278 				ire->ire_uinfo.iulp_rtt =
279 				    (ire->ire_uinfo.iulp_rtt +
280 				    TICK_TO_MSEC(fp->srtt)) >> 1;
281 			} else {
282 				ire->ire_uinfo.iulp_rtt =
283 				    TICK_TO_MSEC(fp->srtt + (fp->srtt >> 1));
284 			}
285 			if (ire->ire_uinfo.iulp_rtt_sd != 0) {
286 				ire->ire_uinfo.iulp_rtt_sd =
287 				    (ire->ire_uinfo.iulp_rtt_sd +
288 				    TICK_TO_MSEC(fp->rttvar)) >> 1;
289 			} else {
290 				ire->ire_uinfo.iulp_rtt_sd =
291 				    TICK_TO_MSEC(fp->rttvar +
292 				    (fp->rttvar >> 1));
293 			}
294 			fp->rtt_updates = 0;
295 		}
296 		mutex_exit(&ire->ire_lock);
297 	}
298 }
299 
300 /*
301  * The sender must set the total length in the IP header.
302  * If sendto == NULL, the current will be used.
303  */
304 mblk_t *
305 sctp_make_mp(sctp_t *sctp, sctp_faddr_t *sendto, int trailer)
306 {
307 	mblk_t *mp;
308 	size_t ipsctplen;
309 	int isv4;
310 	sctp_faddr_t *fp;
311 	sctp_stack_t *sctps = sctp->sctp_sctps;
312 	boolean_t src_changed = B_FALSE;
313 
314 	ASSERT(sctp->sctp_current != NULL || sendto != NULL);
315 	if (sendto == NULL) {
316 		fp = sctp->sctp_current;
317 	} else {
318 		fp = sendto;
319 	}
320 	isv4 = fp->isv4;
321 
322 	/* Try to look for another IRE again. */
323 	if (fp->ire == NULL) {
324 		sctp_get_ire(sctp, fp);
325 		/*
326 		 * Although we still may not get an IRE, the source address
327 		 * may be changed in sctp_get_ire().  Set src_changed to
328 		 * true so that the source address is copied again.
329 		 */
330 		src_changed = B_TRUE;
331 	}
332 
333 	/* There is no suitable source address to use, return. */
334 	if (fp->state == SCTP_FADDRS_UNREACH)
335 		return (NULL);
336 	ASSERT(!SCTP_IS_ADDR_UNSPEC(fp->isv4, fp->saddr));
337 
338 	if (isv4) {
339 		ipsctplen = sctp->sctp_hdr_len;
340 	} else {
341 		ipsctplen = sctp->sctp_hdr6_len;
342 	}
343 
344 	mp = allocb_cred(ipsctplen + sctps->sctps_wroff_xtra + trailer,
345 	    CONN_CRED(sctp->sctp_connp), sctp->sctp_cpid);
346 	if (mp == NULL) {
347 		ip1dbg(("sctp_make_mp: error making mp..\n"));
348 		return (NULL);
349 	}
350 	mp->b_rptr += sctps->sctps_wroff_xtra;
351 	mp->b_wptr = mp->b_rptr + ipsctplen;
352 
353 	ASSERT(OK_32PTR(mp->b_wptr));
354 
355 	if (isv4) {
356 		ipha_t *iph = (ipha_t *)mp->b_rptr;
357 
358 		bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen);
359 		if (fp != sctp->sctp_current || src_changed) {
360 			/* Fix the source and destination addresses. */
361 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst);
362 			IN6_V4MAPPED_TO_IPADDR(&fp->saddr, iph->ipha_src);
363 		}
364 		/* set or clear the don't fragment bit */
365 		if (fp->df) {
366 			iph->ipha_fragment_offset_and_flags = htons(IPH_DF);
367 		} else {
368 			iph->ipha_fragment_offset_and_flags = 0;
369 		}
370 	} else {
371 		bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen);
372 		if (fp != sctp->sctp_current || src_changed) {
373 			/* Fix the source and destination addresses. */
374 			((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr;
375 			((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr;
376 		}
377 	}
378 	ASSERT(sctp->sctp_connp != NULL);
379 
380 	/*
381 	 * IP will not free this IRE if it is condemned.  SCTP needs to
382 	 * free it.
383 	 */
384 	if ((fp->ire != NULL) && (fp->ire->ire_marks & IRE_MARK_CONDEMNED)) {
385 		IRE_REFRELE_NOTR(fp->ire);
386 		fp->ire = NULL;
387 	}
388 	/* Stash the conn and ire ptr info. for IP */
389 	SCTP_STASH_IPINFO(mp, fp->ire);
390 
391 	return (mp);
392 }
393 
394 /*
395  * Notify upper layers about preferred write offset, write size.
396  */
397 void
398 sctp_set_ulp_prop(sctp_t *sctp)
399 {
400 	int hdrlen;
401 	struct sock_proto_props sopp;
402 
403 	sctp_stack_t *sctps = sctp->sctp_sctps;
404 
405 	if (sctp->sctp_current->isv4) {
406 		hdrlen = sctp->sctp_hdr_len;
407 	} else {
408 		hdrlen = sctp->sctp_hdr6_len;
409 	}
410 	ASSERT(sctp->sctp_ulpd);
411 
412 	ASSERT(sctp->sctp_current->sfa_pmss == sctp->sctp_mss);
413 	bzero(&sopp, sizeof (sopp));
414 	sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF;
415 	sopp.sopp_wroff = sctps->sctps_wroff_xtra + hdrlen +
416 	    sizeof (sctp_data_hdr_t);
417 	sopp.sopp_maxblk = sctp->sctp_mss - sizeof (sctp_data_hdr_t);
418 	sctp->sctp_ulp_prop(sctp->sctp_ulpd, &sopp);
419 }
420 
421 void
422 sctp_set_iplen(sctp_t *sctp, mblk_t *mp)
423 {
424 	uint16_t	sum = 0;
425 	ipha_t		*iph;
426 	ip6_t		*ip6h;
427 	mblk_t		*pmp = mp;
428 	boolean_t	isv4;
429 
430 	isv4 = (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION);
431 	for (; pmp; pmp = pmp->b_cont)
432 		sum += pmp->b_wptr - pmp->b_rptr;
433 
434 	if (isv4) {
435 		iph = (ipha_t *)mp->b_rptr;
436 		iph->ipha_length = htons(sum);
437 	} else {
438 		ip6h = (ip6_t *)mp->b_rptr;
439 		/*
440 		 * If an ip6i_t is present, the real IPv6 header
441 		 * immediately follows.
442 		 */
443 		if (ip6h->ip6_nxt == IPPROTO_RAW)
444 			ip6h = (ip6_t *)&ip6h[1];
445 		ip6h->ip6_plen = htons(sum - ((char *)&sctp->sctp_ip6h[1] -
446 		    sctp->sctp_iphc6));
447 	}
448 }
449 
450 int
451 sctp_compare_faddrsets(sctp_faddr_t *a1, sctp_faddr_t *a2)
452 {
453 	int na1 = 0;
454 	int overlap = 0;
455 	int equal = 1;
456 	int onematch;
457 	sctp_faddr_t *fp1, *fp2;
458 
459 	for (fp1 = a1; fp1; fp1 = fp1->next) {
460 		onematch = 0;
461 		for (fp2 = a2; fp2; fp2 = fp2->next) {
462 			if (IN6_ARE_ADDR_EQUAL(&fp1->faddr, &fp2->faddr)) {
463 				overlap++;
464 				onematch = 1;
465 				break;
466 			}
467 			if (!onematch) {
468 				equal = 0;
469 			}
470 		}
471 		na1++;
472 	}
473 
474 	if (equal) {
475 		return (SCTP_ADDR_EQUAL);
476 	}
477 	if (overlap == na1) {
478 		return (SCTP_ADDR_SUBSET);
479 	}
480 	if (overlap) {
481 		return (SCTP_ADDR_OVERLAP);
482 	}
483 	return (SCTP_ADDR_DISJOINT);
484 }
485 
486 /*
487  * Returns 0 on success, -1 on memory allocation failure. If sleep
488  * is true, this function should never fail.  The boolean parameter
489  * first decides whether the newly created faddr structure should be
490  * added at the beginning of the list or at the end.
491  *
492  * Note: caller must hold conn fanout lock.
493  */
494 int
495 sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first)
496 {
497 	sctp_faddr_t	*faddr;
498 	mblk_t		*timer_mp;
499 
500 	if (is_system_labeled()) {
501 		ts_label_t *tsl;
502 		tsol_tpc_t *rhtp;
503 		int retv;
504 
505 		tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
506 		ASSERT(tsl != NULL);
507 
508 		/* find_tpc automatically does the right thing with IPv4 */
509 		rhtp = find_tpc(addr, IPV6_VERSION, B_FALSE);
510 		if (rhtp == NULL)
511 			return (EACCES);
512 
513 		retv = EACCES;
514 		if (tsl->tsl_doi == rhtp->tpc_tp.tp_doi) {
515 			switch (rhtp->tpc_tp.host_type) {
516 			case UNLABELED:
517 				/*
518 				 * Can talk to unlabeled hosts if any of the
519 				 * following are true:
520 				 *   1. zone's label matches the remote host's
521 				 *	default label,
522 				 *   2. mac_exempt is on and the zone dominates
523 				 *	the remote host's label, or
524 				 *   3. mac_exempt is on and the socket is from
525 				 *	the global zone.
526 				 */
527 				if (blequal(&rhtp->tpc_tp.tp_def_label,
528 				    &tsl->tsl_label) ||
529 				    (sctp->sctp_mac_exempt &&
530 				    (sctp->sctp_zoneid == GLOBAL_ZONEID ||
531 				    bldominates(&tsl->tsl_label,
532 				    &rhtp->tpc_tp.tp_def_label))))
533 					retv = 0;
534 				break;
535 			case SUN_CIPSO:
536 				if (_blinrange(&tsl->tsl_label,
537 				    &rhtp->tpc_tp.tp_sl_range_cipso) ||
538 				    blinlset(&tsl->tsl_label,
539 				    rhtp->tpc_tp.tp_sl_set_cipso))
540 					retv = 0;
541 				break;
542 			}
543 		}
544 		TPC_RELE(rhtp);
545 		if (retv != 0)
546 			return (retv);
547 	}
548 
549 	if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL)
550 		return (ENOMEM);
551 	timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep);
552 	if (timer_mp == NULL) {
553 		kmem_cache_free(sctp_kmem_faddr_cache, faddr);
554 		return (ENOMEM);
555 	}
556 	((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr;
557 
558 	sctp_init_faddr(sctp, faddr, addr, timer_mp);
559 
560 	/* Check for subnet broadcast. */
561 	if (faddr->ire != NULL && faddr->ire->ire_type & IRE_BROADCAST) {
562 		IRE_REFRELE_NOTR(faddr->ire);
563 		sctp_timer_free(timer_mp);
564 		faddr->timer_mp = NULL;
565 		kmem_cache_free(sctp_kmem_faddr_cache, faddr);
566 		return (EADDRNOTAVAIL);
567 	}
568 	ASSERT(faddr->next == NULL);
569 
570 	if (sctp->sctp_faddrs == NULL) {
571 		ASSERT(sctp->sctp_lastfaddr == NULL);
572 		/* only element on list; first and last are same */
573 		sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr;
574 	} else if (first) {
575 		ASSERT(sctp->sctp_lastfaddr != NULL);
576 		faddr->next = sctp->sctp_faddrs;
577 		sctp->sctp_faddrs = faddr;
578 	} else {
579 		sctp->sctp_lastfaddr->next = faddr;
580 		sctp->sctp_lastfaddr = faddr;
581 	}
582 	sctp->sctp_nfaddrs++;
583 
584 	return (0);
585 }
586 
587 sctp_faddr_t *
588 sctp_lookup_faddr(sctp_t *sctp, in6_addr_t *addr)
589 {
590 	sctp_faddr_t *fp;
591 
592 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
593 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr))
594 			break;
595 	}
596 
597 	return (fp);
598 }
599 
600 sctp_faddr_t *
601 sctp_lookup_faddr_nosctp(sctp_faddr_t *fp, in6_addr_t *addr)
602 {
603 	for (; fp; fp = fp->next) {
604 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr)) {
605 			break;
606 		}
607 	}
608 
609 	return (fp);
610 }
611 
612 /*
613  * To change the currently used peer address to the specified one.
614  */
615 void
616 sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp)
617 {
618 	/* Now setup the composite header. */
619 	if (fp->isv4) {
620 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
621 		    sctp->sctp_ipha->ipha_dst);
622 		IN6_V4MAPPED_TO_IPADDR(&fp->saddr, sctp->sctp_ipha->ipha_src);
623 		/* update don't fragment bit */
624 		if (fp->df) {
625 			sctp->sctp_ipha->ipha_fragment_offset_and_flags =
626 			    htons(IPH_DF);
627 		} else {
628 			sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0;
629 		}
630 	} else {
631 		sctp->sctp_ip6h->ip6_dst = fp->faddr;
632 		sctp->sctp_ip6h->ip6_src = fp->saddr;
633 	}
634 
635 	sctp->sctp_current = fp;
636 	sctp->sctp_mss = fp->sfa_pmss;
637 
638 	/* Update the uppper layer for the change. */
639 	if (!SCTP_IS_DETACHED(sctp))
640 		sctp_set_ulp_prop(sctp);
641 }
642 
643 void
644 sctp_redo_faddr_srcs(sctp_t *sctp)
645 {
646 	sctp_faddr_t *fp;
647 
648 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
649 		sctp_get_ire(sctp, fp);
650 	}
651 }
652 
653 void
654 sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp)
655 {
656 	int64_t now = lbolt64;
657 
658 	fp->strikes = 0;
659 	sctp->sctp_strikes = 0;
660 	fp->lastactive = now;
661 	fp->hb_expiry = now + SET_HB_INTVL(fp);
662 	fp->hb_pending = B_FALSE;
663 	if (fp->state != SCTP_FADDRS_ALIVE) {
664 		fp->state = SCTP_FADDRS_ALIVE;
665 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_AVAILABLE, 0);
666 		/* Should have a full IRE now */
667 		sctp_get_ire(sctp, fp);
668 
669 		/*
670 		 * If this is the primary, switch back to it now.  And
671 		 * we probably want to reset the source addr used to reach
672 		 * it.
673 		 */
674 		if (fp == sctp->sctp_primary) {
675 			ASSERT(fp->state != SCTP_FADDRS_UNREACH);
676 			sctp_set_faddr_current(sctp, fp);
677 			return;
678 		}
679 	}
680 }
681 
682 int
683 sctp_is_a_faddr_clean(sctp_t *sctp)
684 {
685 	sctp_faddr_t *fp;
686 
687 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
688 		if (fp->state == SCTP_FADDRS_ALIVE && fp->strikes == 0) {
689 			return (1);
690 		}
691 	}
692 
693 	return (0);
694 }
695 
696 /*
697  * Returns 0 if there is at leave one other active faddr, -1 if there
698  * are none. If there are none left, faddr_dead() will start killing the
699  * association.
700  * If the downed faddr was the current faddr, a new current faddr
701  * will be chosen.
702  */
703 int
704 sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate)
705 {
706 	sctp_faddr_t *ofp;
707 	sctp_stack_t *sctps = sctp->sctp_sctps;
708 
709 	if (fp->state == SCTP_FADDRS_ALIVE) {
710 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_UNREACHABLE, 0);
711 	}
712 	fp->state = newstate;
713 
714 	dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n",
715 	    SCTP_PRINTADDR(fp->faddr), newstate));
716 
717 	if (fp == sctp->sctp_current) {
718 		/* Current faddr down; need to switch it */
719 		sctp->sctp_current = NULL;
720 	}
721 
722 	/* Find next alive faddr */
723 	ofp = fp;
724 	for (fp = fp->next; fp != NULL; fp = fp->next) {
725 		if (fp->state == SCTP_FADDRS_ALIVE) {
726 			break;
727 		}
728 	}
729 
730 	if (fp == NULL) {
731 		/* Continue from beginning of list */
732 		for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->next) {
733 			if (fp->state == SCTP_FADDRS_ALIVE) {
734 				break;
735 			}
736 		}
737 	}
738 
739 	/*
740 	 * Find a new fp, so if the current faddr is dead, use the new fp
741 	 * as the current one.
742 	 */
743 	if (fp != ofp) {
744 		if (sctp->sctp_current == NULL) {
745 			dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n",
746 			    SCTP_PRINTADDR(fp->faddr)));
747 			/*
748 			 * Note that we don't need to reset the source addr
749 			 * of the new fp.
750 			 */
751 			sctp_set_faddr_current(sctp, fp);
752 		}
753 		return (0);
754 	}
755 
756 
757 	/* All faddrs are down; kill the association */
758 	dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n"));
759 	BUMP_MIB(&sctps->sctps_mib, sctpAborted);
760 	sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ?
761 	    SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL);
762 	sctp_clean_death(sctp, sctp->sctp_client_errno ?
763 	    sctp->sctp_client_errno : ETIMEDOUT);
764 
765 	return (-1);
766 }
767 
768 sctp_faddr_t *
769 sctp_rotate_faddr(sctp_t *sctp, sctp_faddr_t *ofp)
770 {
771 	sctp_faddr_t *nfp = NULL;
772 
773 	if (ofp == NULL) {
774 		ofp = sctp->sctp_current;
775 	}
776 
777 	/* Find the next live one */
778 	for (nfp = ofp->next; nfp != NULL; nfp = nfp->next) {
779 		if (nfp->state == SCTP_FADDRS_ALIVE) {
780 			break;
781 		}
782 	}
783 
784 	if (nfp == NULL) {
785 		/* Continue from beginning of list */
786 		for (nfp = sctp->sctp_faddrs; nfp != ofp; nfp = nfp->next) {
787 			if (nfp->state == SCTP_FADDRS_ALIVE) {
788 				break;
789 			}
790 		}
791 	}
792 
793 	/*
794 	 * nfp could only be NULL if all faddrs are down, and when
795 	 * this happens, faddr_dead() should have killed the
796 	 * association. Hence this assertion...
797 	 */
798 	ASSERT(nfp != NULL);
799 	return (nfp);
800 }
801 
802 void
803 sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp)
804 {
805 	sctp_faddr_t *fpp;
806 
807 	if (!sctp->sctp_faddrs) {
808 		return;
809 	}
810 
811 	if (fp->timer_mp != NULL) {
812 		sctp_timer_free(fp->timer_mp);
813 		fp->timer_mp = NULL;
814 		fp->timer_running = 0;
815 	}
816 	if (fp->rc_timer_mp != NULL) {
817 		sctp_timer_free(fp->rc_timer_mp);
818 		fp->rc_timer_mp = NULL;
819 		fp->rc_timer_running = 0;
820 	}
821 	if (fp->ire != NULL) {
822 		IRE_REFRELE_NOTR(fp->ire);
823 		fp->ire = NULL;
824 	}
825 
826 	if (fp == sctp->sctp_faddrs) {
827 		goto gotit;
828 	}
829 
830 	for (fpp = sctp->sctp_faddrs; fpp->next != fp; fpp = fpp->next)
831 		;
832 
833 gotit:
834 	ASSERT(sctp->sctp_conn_tfp != NULL);
835 	mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
836 	if (fp == sctp->sctp_faddrs) {
837 		sctp->sctp_faddrs = fp->next;
838 	} else {
839 		fpp->next = fp->next;
840 	}
841 	mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
842 	/* XXX faddr2ire? */
843 	kmem_cache_free(sctp_kmem_faddr_cache, fp);
844 	sctp->sctp_nfaddrs--;
845 }
846 
847 void
848 sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock)
849 {
850 	sctp_faddr_t *fp, *fpn;
851 
852 	if (sctp->sctp_faddrs == NULL) {
853 		ASSERT(sctp->sctp_lastfaddr == NULL);
854 		return;
855 	}
856 
857 	ASSERT(sctp->sctp_lastfaddr != NULL);
858 	sctp->sctp_lastfaddr = NULL;
859 	sctp->sctp_current = NULL;
860 	sctp->sctp_primary = NULL;
861 
862 	sctp_free_faddr_timers(sctp);
863 
864 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
865 		/* in conn fanout; need to hold lock */
866 		mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
867 	}
868 
869 	for (fp = sctp->sctp_faddrs; fp; fp = fpn) {
870 		fpn = fp->next;
871 		if (fp->ire != NULL)
872 			IRE_REFRELE_NOTR(fp->ire);
873 		kmem_cache_free(sctp_kmem_faddr_cache, fp);
874 		sctp->sctp_nfaddrs--;
875 	}
876 
877 	sctp->sctp_faddrs = NULL;
878 	ASSERT(sctp->sctp_nfaddrs == 0);
879 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
880 		mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
881 	}
882 
883 }
884 
885 void
886 sctp_zap_addrs(sctp_t *sctp)
887 {
888 	sctp_zap_faddrs(sctp, 0);
889 	sctp_free_saddrs(sctp);
890 }
891 
892 /*
893  * Initialize the IPv4 header. Loses any record of any IP options.
894  */
895 int
896 sctp_header_init_ipv4(sctp_t *sctp, int sleep)
897 {
898 	sctp_hdr_t	*sctph;
899 	sctp_stack_t	*sctps = sctp->sctp_sctps;
900 
901 	/*
902 	 * This is a simple initialization. If there's
903 	 * already a template, it should never be too small,
904 	 * so reuse it.  Otherwise, allocate space for the new one.
905 	 */
906 	if (sctp->sctp_iphc != NULL) {
907 		ASSERT(sctp->sctp_iphc_len >= SCTP_MAX_COMBINED_HEADER_LENGTH);
908 		bzero(sctp->sctp_iphc, sctp->sctp_iphc_len);
909 	} else {
910 		sctp->sctp_iphc_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
911 		sctp->sctp_iphc = kmem_zalloc(sctp->sctp_iphc_len, sleep);
912 		if (sctp->sctp_iphc == NULL) {
913 			sctp->sctp_iphc_len = 0;
914 			return (ENOMEM);
915 		}
916 	}
917 
918 	sctp->sctp_ipha = (ipha_t *)sctp->sctp_iphc;
919 
920 	sctp->sctp_hdr_len = sizeof (ipha_t) + sizeof (sctp_hdr_t);
921 	sctp->sctp_ip_hdr_len = sizeof (ipha_t);
922 	sctp->sctp_ipha->ipha_length = htons(sizeof (ipha_t) +
923 	    sizeof (sctp_hdr_t));
924 	sctp->sctp_ipha->ipha_version_and_hdr_length =
925 	    (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS;
926 
927 	/*
928 	 * These two fields should be zero, and are already set above.
929 	 *
930 	 * sctp->sctp_ipha->ipha_ident,
931 	 * sctp->sctp_ipha->ipha_fragment_offset_and_flags.
932 	 */
933 
934 	sctp->sctp_ipha->ipha_ttl = sctps->sctps_ipv4_ttl;
935 	sctp->sctp_ipha->ipha_protocol = IPPROTO_SCTP;
936 
937 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc + sizeof (ipha_t));
938 	sctp->sctp_sctph = sctph;
939 
940 	return (0);
941 }
942 
943 /*
944  * Update sctp_sticky_hdrs based on sctp_sticky_ipp.
945  * The headers include ip6i_t (if needed), ip6_t, any sticky extension
946  * headers, and the maximum size sctp header (to avoid reallocation
947  * on the fly for additional sctp options).
948  * Returns failure if can't allocate memory.
949  */
950 int
951 sctp_build_hdrs(sctp_t *sctp)
952 {
953 	char		*hdrs;
954 	uint_t		hdrs_len;
955 	ip6i_t		*ip6i;
956 	char		buf[SCTP_MAX_HDR_LENGTH];
957 	ip6_pkt_t	*ipp = &sctp->sctp_sticky_ipp;
958 	in6_addr_t	src;
959 	in6_addr_t	dst;
960 	sctp_stack_t	*sctps = sctp->sctp_sctps;
961 
962 	/*
963 	 * save the existing sctp header and source/dest IP addresses
964 	 */
965 	bcopy(sctp->sctp_sctph6, buf, sizeof (sctp_hdr_t));
966 	src = sctp->sctp_ip6h->ip6_src;
967 	dst = sctp->sctp_ip6h->ip6_dst;
968 	hdrs_len = ip_total_hdrs_len_v6(ipp) + SCTP_MAX_HDR_LENGTH;
969 	ASSERT(hdrs_len != 0);
970 	if (hdrs_len > sctp->sctp_iphc6_len) {
971 		/* Need to reallocate */
972 		hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP);
973 		if (hdrs == NULL)
974 			return (ENOMEM);
975 
976 		if (sctp->sctp_iphc6_len != 0)
977 			kmem_free(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
978 		sctp->sctp_iphc6 = hdrs;
979 		sctp->sctp_iphc6_len = hdrs_len;
980 	}
981 	ip_build_hdrs_v6((uchar_t *)sctp->sctp_iphc6,
982 	    hdrs_len - SCTP_MAX_HDR_LENGTH, ipp, IPPROTO_SCTP);
983 
984 	/* Set header fields not in ipp */
985 	if (ipp->ipp_fields & IPPF_HAS_IP6I) {
986 		ip6i = (ip6i_t *)sctp->sctp_iphc6;
987 		sctp->sctp_ip6h = (ip6_t *)&ip6i[1];
988 	} else {
989 		sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
990 	}
991 	/*
992 	 * sctp->sctp_ip_hdr_len will include ip6i_t if there is one.
993 	 */
994 	sctp->sctp_ip_hdr6_len = hdrs_len - SCTP_MAX_HDR_LENGTH;
995 	sctp->sctp_sctph6 = (sctp_hdr_t *)(sctp->sctp_iphc6 +
996 	    sctp->sctp_ip_hdr6_len);
997 	sctp->sctp_hdr6_len = sctp->sctp_ip_hdr6_len + sizeof (sctp_hdr_t);
998 
999 	bcopy(buf, sctp->sctp_sctph6, sizeof (sctp_hdr_t));
1000 
1001 	sctp->sctp_ip6h->ip6_src = src;
1002 	sctp->sctp_ip6h->ip6_dst = dst;
1003 	/*
1004 	 * If the hoplimit was not set by ip_build_hdrs_v6(), we need to
1005 	 * set it to the default value for SCTP.
1006 	 */
1007 	if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS))
1008 		sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit;
1009 	/*
1010 	 * If we're setting extension headers after a connection
1011 	 * has been established, and if we have a routing header
1012 	 * among the extension headers, call ip_massage_options_v6 to
1013 	 * manipulate the routing header/ip6_dst set the checksum
1014 	 * difference in the sctp header template.
1015 	 * (This happens in sctp_connect_ipv6 if the routing header
1016 	 * is set prior to the connect.)
1017 	 */
1018 
1019 	if ((sctp->sctp_state >= SCTPS_COOKIE_WAIT) &&
1020 	    (sctp->sctp_sticky_ipp.ipp_fields & IPPF_RTHDR)) {
1021 		ip6_rthdr_t *rth;
1022 
1023 		rth = ip_find_rthdr_v6(sctp->sctp_ip6h,
1024 		    (uint8_t *)sctp->sctp_sctph6);
1025 		if (rth != NULL) {
1026 			(void) ip_massage_options_v6(sctp->sctp_ip6h, rth,
1027 			    sctps->sctps_netstack);
1028 		}
1029 	}
1030 	return (0);
1031 }
1032 
1033 /*
1034  * Initialize the IPv6 header. Loses any record of any IPv6 extension headers.
1035  */
1036 int
1037 sctp_header_init_ipv6(sctp_t *sctp, int sleep)
1038 {
1039 	sctp_hdr_t	*sctph;
1040 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1041 
1042 	/*
1043 	 * This is a simple initialization. If there's
1044 	 * already a template, it should never be too small,
1045 	 * so reuse it. Otherwise, allocate space for the new one.
1046 	 * Ensure that there is enough space to "downgrade" the sctp_t
1047 	 * to an IPv4 sctp_t. This requires having space for a full load
1048 	 * of IPv4 options
1049 	 */
1050 	if (sctp->sctp_iphc6 != NULL) {
1051 		ASSERT(sctp->sctp_iphc6_len >=
1052 		    SCTP_MAX_COMBINED_HEADER_LENGTH);
1053 		bzero(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
1054 	} else {
1055 		sctp->sctp_iphc6_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
1056 		sctp->sctp_iphc6 = kmem_zalloc(sctp->sctp_iphc_len, sleep);
1057 		if (sctp->sctp_iphc6 == NULL) {
1058 			sctp->sctp_iphc6_len = 0;
1059 			return (ENOMEM);
1060 		}
1061 	}
1062 	sctp->sctp_hdr6_len = IPV6_HDR_LEN + sizeof (sctp_hdr_t);
1063 	sctp->sctp_ip_hdr6_len = IPV6_HDR_LEN;
1064 	sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
1065 
1066 	/* Initialize the header template */
1067 
1068 	sctp->sctp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW;
1069 	sctp->sctp_ip6h->ip6_plen = ntohs(sizeof (sctp_hdr_t));
1070 	sctp->sctp_ip6h->ip6_nxt = IPPROTO_SCTP;
1071 	sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit;
1072 
1073 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc6 + IPV6_HDR_LEN);
1074 	sctp->sctp_sctph6 = sctph;
1075 
1076 	return (0);
1077 }
1078 
1079 static int
1080 sctp_v4_label(sctp_t *sctp)
1081 {
1082 	uchar_t optbuf[IP_MAX_OPT_LENGTH];
1083 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1084 	int added;
1085 
1086 	if (tsol_compute_label(cr, sctp->sctp_ipha->ipha_dst, optbuf,
1087 	    sctp->sctp_mac_exempt,
1088 	    sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0)
1089 		return (EACCES);
1090 
1091 	added = tsol_remove_secopt(sctp->sctp_ipha, sctp->sctp_hdr_len);
1092 	if (added == -1)
1093 		return (EACCES);
1094 	sctp->sctp_hdr_len += added;
1095 	sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph + added);
1096 	sctp->sctp_ip_hdr_len += added;
1097 	if ((sctp->sctp_v4label_len = optbuf[IPOPT_OLEN]) != 0) {
1098 		sctp->sctp_v4label_len = (sctp->sctp_v4label_len + 3) & ~3;
1099 		added = tsol_prepend_option(optbuf, sctp->sctp_ipha,
1100 		    sctp->sctp_hdr_len);
1101 		if (added == -1)
1102 			return (EACCES);
1103 		sctp->sctp_hdr_len += added;
1104 		sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph +
1105 		    added);
1106 		sctp->sctp_ip_hdr_len += added;
1107 	}
1108 	return (0);
1109 }
1110 
1111 static int
1112 sctp_v6_label(sctp_t *sctp)
1113 {
1114 	uchar_t optbuf[TSOL_MAX_IPV6_OPTION];
1115 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1116 
1117 	if (tsol_compute_label_v6(cr, &sctp->sctp_ip6h->ip6_dst, optbuf,
1118 	    sctp->sctp_mac_exempt,
1119 	    sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0)
1120 		return (EACCES);
1121 	if (tsol_update_sticky(&sctp->sctp_sticky_ipp, &sctp->sctp_v6label_len,
1122 	    optbuf) != 0)
1123 		return (EACCES);
1124 	if (sctp_build_hdrs(sctp) != 0)
1125 		return (EACCES);
1126 	return (0);
1127 }
1128 
1129 /*
1130  * XXX implement more sophisticated logic
1131  */
1132 int
1133 sctp_set_hdraddrs(sctp_t *sctp)
1134 {
1135 	sctp_faddr_t *fp;
1136 	int gotv4 = 0;
1137 	int gotv6 = 0;
1138 
1139 	ASSERT(sctp->sctp_faddrs != NULL);
1140 	ASSERT(sctp->sctp_nsaddrs > 0);
1141 
1142 	/* Set up using the primary first */
1143 	if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->faddr)) {
1144 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->faddr,
1145 		    sctp->sctp_ipha->ipha_dst);
1146 		/* saddr may be unspec; make_mp() will handle this */
1147 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->saddr,
1148 		    sctp->sctp_ipha->ipha_src);
1149 		if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1150 			gotv4 = 1;
1151 			if (sctp->sctp_ipversion == IPV4_VERSION) {
1152 				goto copyports;
1153 			}
1154 		}
1155 	} else {
1156 		sctp->sctp_ip6h->ip6_dst = sctp->sctp_primary->faddr;
1157 		/* saddr may be unspec; make_mp() will handle this */
1158 		sctp->sctp_ip6h->ip6_src = sctp->sctp_primary->saddr;
1159 		if (!is_system_labeled() || sctp_v6_label(sctp) == 0)
1160 			gotv6 = 1;
1161 	}
1162 
1163 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
1164 		if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1165 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
1166 			    sctp->sctp_ipha->ipha_dst);
1167 			/* copy in the faddr_t's saddr */
1168 			IN6_V4MAPPED_TO_IPADDR(&fp->saddr,
1169 			    sctp->sctp_ipha->ipha_src);
1170 			if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1171 				gotv4 = 1;
1172 				if (sctp->sctp_ipversion == IPV4_VERSION ||
1173 				    gotv6) {
1174 					break;
1175 				}
1176 			}
1177 		} else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1178 			sctp->sctp_ip6h->ip6_dst = fp->faddr;
1179 			/* copy in the faddr_t's saddr */
1180 			sctp->sctp_ip6h->ip6_src = fp->saddr;
1181 			if (!is_system_labeled() || sctp_v6_label(sctp) == 0) {
1182 				gotv6 = 1;
1183 				if (gotv4)
1184 					break;
1185 			}
1186 		}
1187 	}
1188 
1189 copyports:
1190 	if (!gotv4 && !gotv6)
1191 		return (EACCES);
1192 
1193 	/* copy in the ports for good measure */
1194 	sctp->sctp_sctph->sh_sport = sctp->sctp_lport;
1195 	sctp->sctp_sctph->sh_dport = sctp->sctp_fport;
1196 
1197 	sctp->sctp_sctph6->sh_sport = sctp->sctp_lport;
1198 	sctp->sctp_sctph6->sh_dport = sctp->sctp_fport;
1199 	return (0);
1200 }
1201 
1202 /*
1203  * got_errchunk is set B_TRUE only if called from validate_init_params(), when
1204  * an ERROR chunk is already prepended the size of which needs updating for
1205  * additional unrecognized parameters. Other callers either prepend the ERROR
1206  * chunk with the correct size after calling this function, or they are calling
1207  * to add an invalid parameter to an INIT_ACK chunk, in that case no ERROR chunk
1208  * exists, the CAUSE blocks go into the INIT_ACK directly.
1209  *
1210  * *errmp will be non-NULL both when adding an additional CAUSE block to an
1211  * existing prepended COOKIE ERROR chunk (processing params of an INIT_ACK),
1212  * and when adding unrecognized parameters after the first, to an INIT_ACK
1213  * (processing params of an INIT chunk).
1214  */
1215 void
1216 sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp,
1217     boolean_t got_errchunk)
1218 {
1219 	mblk_t *mp;
1220 	sctp_parm_hdr_t *ph;
1221 	size_t len;
1222 	int pad;
1223 	sctp_chunk_hdr_t *ecp;
1224 
1225 	len = sizeof (*ph) + ntohs(uph->sph_len);
1226 	if ((pad = len % SCTP_ALIGN) != 0) {
1227 		pad = SCTP_ALIGN - pad;
1228 		len += pad;
1229 	}
1230 	mp = allocb(len, BPRI_MED);
1231 	if (mp == NULL) {
1232 		return;
1233 	}
1234 
1235 	ph = (sctp_parm_hdr_t *)(mp->b_rptr);
1236 	ph->sph_type = htons(PARM_UNRECOGNIZED);
1237 	ph->sph_len = htons(len - pad);
1238 
1239 	/* copy in the unrecognized parameter */
1240 	bcopy(uph, ph + 1, ntohs(uph->sph_len));
1241 
1242 	if (pad != 0)
1243 		bzero((mp->b_rptr + len - pad), pad);
1244 
1245 	mp->b_wptr = mp->b_rptr + len;
1246 	if (*errmp != NULL) {
1247 		/*
1248 		 * Update total length if an ERROR chunk, then link
1249 		 * this CAUSE block to the possible chain of CAUSE
1250 		 * blocks attached to the ERROR chunk or INIT_ACK
1251 		 * being created.
1252 		 */
1253 		if (got_errchunk) {
1254 			/* ERROR chunk already prepended */
1255 			ecp = (sctp_chunk_hdr_t *)((*errmp)->b_rptr);
1256 			ecp->sch_len = htons(ntohs(ecp->sch_len) + len);
1257 		}
1258 		linkb(*errmp, mp);
1259 	} else {
1260 		*errmp = mp;
1261 	}
1262 }
1263 
1264 /*
1265  * o Bounds checking
1266  * o Updates remaining
1267  * o Checks alignment
1268  */
1269 sctp_parm_hdr_t *
1270 sctp_next_parm(sctp_parm_hdr_t *current, ssize_t *remaining)
1271 {
1272 	int pad;
1273 	uint16_t len;
1274 
1275 	len = ntohs(current->sph_len);
1276 	*remaining -= len;
1277 	if (*remaining < sizeof (*current) || len < sizeof (*current)) {
1278 		return (NULL);
1279 	}
1280 	if ((pad = len & (SCTP_ALIGN - 1)) != 0) {
1281 		pad = SCTP_ALIGN - pad;
1282 		*remaining -= pad;
1283 	}
1284 	/*LINTED pointer cast may result in improper alignment*/
1285 	current = (sctp_parm_hdr_t *)((char *)current + len + pad);
1286 	return (current);
1287 }
1288 
1289 /*
1290  * Sets the address parameters given in the INIT chunk into sctp's
1291  * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are
1292  * no address parameters in the INIT chunk, a single faddr is created
1293  * from the ip hdr at the beginning of pkt.
1294  * If there already are existing addresses hanging from sctp, merge
1295  * them in, if the old info contains addresses which are not present
1296  * in this new info, get rid of them, and clean the pointers if there's
1297  * messages which have this as their target address.
1298  *
1299  * We also re-adjust the source address list here since the list may
1300  * contain more than what is actually part of the association. If
1301  * we get here from sctp_send_cookie_echo(), we are on the active
1302  * side and psctp will be NULL and ich will be the INIT-ACK chunk.
1303  * If we get here from sctp_accept_comm(), ich will be the INIT chunk
1304  * and psctp will the listening endpoint.
1305  *
1306  * INIT processing: When processing the INIT we inherit the src address
1307  * list from the listener. For a loopback or linklocal association, we
1308  * delete the list and just take the address from the IP header (since
1309  * that's how we created the INIT-ACK). Additionally, for loopback we
1310  * ignore the address params in the INIT. For determining which address
1311  * types were sent in the INIT-ACK we follow the same logic as in
1312  * creating the INIT-ACK. We delete addresses of the type that are not
1313  * supported by the peer.
1314  *
1315  * INIT-ACK processing: When processing the INIT-ACK since we had not
1316  * included addr params for loopback or linklocal addresses when creating
1317  * the INIT, we just use the address from the IP header. Further, for
1318  * loopback we ignore the addr param list. We mark addresses of the
1319  * type not supported by the peer as unconfirmed.
1320  *
1321  * In case of INIT processing we look for supported address types in the
1322  * supported address param, if present. In both cases the address type in
1323  * the IP header is supported as well as types for addresses in the param
1324  * list, if any.
1325  *
1326  * Once we have the supported address types sctp_check_saddr() runs through
1327  * the source address list and deletes or marks as unconfirmed address of
1328  * types not supported by the peer.
1329  *
1330  * Returns 0 on success, sys errno on failure
1331  */
1332 int
1333 sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt,
1334     sctp_chunk_hdr_t *ich, uint_t *sctp_options)
1335 {
1336 	sctp_init_chunk_t	*init;
1337 	ipha_t			*iph;
1338 	ip6_t			*ip6h;
1339 	in6_addr_t		hdrsaddr[1];
1340 	in6_addr_t		hdrdaddr[1];
1341 	sctp_parm_hdr_t		*ph;
1342 	ssize_t			remaining;
1343 	int			isv4;
1344 	int			err;
1345 	sctp_faddr_t		*fp;
1346 	int			supp_af = 0;
1347 	boolean_t		check_saddr = B_TRUE;
1348 	in6_addr_t		curaddr;
1349 	sctp_stack_t		*sctps = sctp->sctp_sctps;
1350 
1351 	if (sctp_options != NULL)
1352 		*sctp_options = 0;
1353 
1354 	/* extract the address from the IP header */
1355 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1356 	if (isv4) {
1357 		iph = (ipha_t *)pkt->b_rptr;
1358 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr);
1359 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr);
1360 		supp_af |= PARM_SUPP_V4;
1361 	} else {
1362 		ip6h = (ip6_t *)pkt->b_rptr;
1363 		hdrsaddr[0] = ip6h->ip6_src;
1364 		hdrdaddr[0] = ip6h->ip6_dst;
1365 		supp_af |= PARM_SUPP_V6;
1366 	}
1367 
1368 	/*
1369 	 * Unfortunately, we can't delay this because adding an faddr
1370 	 * looks for the presence of the source address (from the ire
1371 	 * for the faddr) in the source address list. We could have
1372 	 * delayed this if, say, this was a loopback/linklocal connection.
1373 	 * Now, we just end up nuking this list and taking the addr from
1374 	 * the IP header for loopback/linklocal.
1375 	 */
1376 	if (psctp != NULL && psctp->sctp_nsaddrs > 0) {
1377 		ASSERT(sctp->sctp_nsaddrs == 0);
1378 
1379 		err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP);
1380 		if (err != 0)
1381 			return (err);
1382 	}
1383 	/*
1384 	 * We will add the faddr before parsing the address list as this
1385 	 * might be a loopback connection and we would not have to
1386 	 * go through the list.
1387 	 *
1388 	 * Make sure the header's addr is in the list
1389 	 */
1390 	fp = sctp_lookup_faddr(sctp, hdrsaddr);
1391 	if (fp == NULL) {
1392 		/* not included; add it now */
1393 		err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE);
1394 		if (err != 0)
1395 			return (err);
1396 
1397 		/* sctp_faddrs will be the hdr addr */
1398 		fp = sctp->sctp_faddrs;
1399 	}
1400 	/* make the header addr the primary */
1401 
1402 	if (cl_sctp_assoc_change != NULL && psctp == NULL)
1403 		curaddr = sctp->sctp_current->faddr;
1404 
1405 	sctp->sctp_primary = fp;
1406 	sctp->sctp_current = fp;
1407 	sctp->sctp_mss = fp->sfa_pmss;
1408 
1409 	/* For loopback connections & linklocal get address from the header */
1410 	if (sctp->sctp_loopback || sctp->sctp_linklocal) {
1411 		if (sctp->sctp_nsaddrs != 0)
1412 			sctp_free_saddrs(sctp);
1413 		if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0)
1414 			return (err);
1415 		/* For loopback ignore address list */
1416 		if (sctp->sctp_loopback)
1417 			return (0);
1418 		check_saddr = B_FALSE;
1419 	}
1420 
1421 	/* Walk the params in the INIT [ACK], pulling out addr params */
1422 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1423 	    sizeof (sctp_init_chunk_t);
1424 	if (remaining < sizeof (*ph)) {
1425 		if (check_saddr) {
1426 			sctp_check_saddr(sctp, supp_af, psctp == NULL ?
1427 			    B_FALSE : B_TRUE, hdrdaddr);
1428 		}
1429 		ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1430 		return (0);
1431 	}
1432 
1433 	init = (sctp_init_chunk_t *)(ich + 1);
1434 	ph = (sctp_parm_hdr_t *)(init + 1);
1435 
1436 	/* params will have already been byteordered when validating */
1437 	while (ph != NULL) {
1438 		if (ph->sph_type == htons(PARM_SUPP_ADDRS)) {
1439 			int		plen;
1440 			uint16_t	*p;
1441 			uint16_t	addrtype;
1442 
1443 			ASSERT(psctp != NULL);
1444 			plen = ntohs(ph->sph_len);
1445 			p = (uint16_t *)(ph + 1);
1446 			while (plen > 0) {
1447 				addrtype = ntohs(*p);
1448 				switch (addrtype) {
1449 					case PARM_ADDR6:
1450 						supp_af |= PARM_SUPP_V6;
1451 						break;
1452 					case PARM_ADDR4:
1453 						supp_af |= PARM_SUPP_V4;
1454 						break;
1455 					default:
1456 						break;
1457 				}
1458 				p++;
1459 				plen -= sizeof (*p);
1460 			}
1461 		} else if (ph->sph_type == htons(PARM_ADDR4)) {
1462 			if (remaining >= PARM_ADDR4_LEN) {
1463 				in6_addr_t addr;
1464 				ipaddr_t ta;
1465 
1466 				supp_af |= PARM_SUPP_V4;
1467 				/*
1468 				 * Screen out broad/multicasts & loopback.
1469 				 * If the endpoint only accepts v6 address,
1470 				 * go to the next one.
1471 				 *
1472 				 * Subnet broadcast check is done in
1473 				 * sctp_add_faddr().  If the address is
1474 				 * a broadcast address, it won't be added.
1475 				 */
1476 				bcopy(ph + 1, &ta, sizeof (ta));
1477 				if (ta == 0 ||
1478 				    ta == INADDR_BROADCAST ||
1479 				    ta == htonl(INADDR_LOOPBACK) ||
1480 				    CLASSD(ta) ||
1481 				    sctp->sctp_connp->conn_ipv6_v6only) {
1482 					goto next;
1483 				}
1484 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1485 				    (ph + 1), &addr);
1486 
1487 				/* Check for duplicate. */
1488 				if (sctp_lookup_faddr(sctp, &addr) != NULL)
1489 					goto next;
1490 
1491 				/* OK, add it to the faddr set */
1492 				err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP,
1493 				    B_FALSE);
1494 				/* Something is wrong...  Try the next one. */
1495 				if (err != 0)
1496 					goto next;
1497 			}
1498 		} else if (ph->sph_type == htons(PARM_ADDR6) &&
1499 		    sctp->sctp_family == AF_INET6) {
1500 			/* An v4 socket should not take v6 addresses. */
1501 			if (remaining >= PARM_ADDR6_LEN) {
1502 				in6_addr_t *addr6;
1503 
1504 				supp_af |= PARM_SUPP_V6;
1505 				addr6 = (in6_addr_t *)(ph + 1);
1506 				/*
1507 				 * Screen out link locals, mcast, loopback
1508 				 * and bogus v6 address.
1509 				 */
1510 				if (IN6_IS_ADDR_LINKLOCAL(addr6) ||
1511 				    IN6_IS_ADDR_MULTICAST(addr6) ||
1512 				    IN6_IS_ADDR_LOOPBACK(addr6) ||
1513 				    IN6_IS_ADDR_V4MAPPED(addr6)) {
1514 					goto next;
1515 				}
1516 				/* Check for duplicate. */
1517 				if (sctp_lookup_faddr(sctp, addr6) != NULL)
1518 					goto next;
1519 
1520 				err = sctp_add_faddr(sctp,
1521 				    (in6_addr_t *)(ph + 1), KM_NOSLEEP,
1522 				    B_FALSE);
1523 				/* Something is wrong...  Try the next one. */
1524 				if (err != 0)
1525 					goto next;
1526 			}
1527 		} else if (ph->sph_type == htons(PARM_FORWARD_TSN)) {
1528 			if (sctp_options != NULL)
1529 				*sctp_options |= SCTP_PRSCTP_OPTION;
1530 		} /* else; skip */
1531 
1532 next:
1533 		ph = sctp_next_parm(ph, &remaining);
1534 	}
1535 	if (check_saddr) {
1536 		sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE :
1537 		    B_TRUE, hdrdaddr);
1538 	}
1539 	ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1540 	/*
1541 	 * We have the right address list now, update clustering's
1542 	 * knowledge because when we sent the INIT we had just added
1543 	 * the address the INIT was sent to.
1544 	 */
1545 	if (psctp == NULL && cl_sctp_assoc_change != NULL) {
1546 		uchar_t	*alist;
1547 		size_t	asize;
1548 		uchar_t	*dlist;
1549 		size_t	dsize;
1550 
1551 		asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;
1552 		alist = kmem_alloc(asize, KM_NOSLEEP);
1553 		if (alist == NULL) {
1554 			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
1555 			return (ENOMEM);
1556 		}
1557 		/*
1558 		 * Just include the address the INIT was sent to in the
1559 		 * delete list and send the entire faddr list. We could
1560 		 * do it differently (i.e include all the addresses in the
1561 		 * add list even if it contains the original address OR
1562 		 * remove the original address from the add list etc.), but
1563 		 * this seems reasonable enough.
1564 		 */
1565 		dsize = sizeof (in6_addr_t);
1566 		dlist = kmem_alloc(dsize, KM_NOSLEEP);
1567 		if (dlist == NULL) {
1568 			kmem_free(alist, asize);
1569 			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
1570 			return (ENOMEM);
1571 		}
1572 		bcopy(&curaddr, dlist, sizeof (curaddr));
1573 		sctp_get_faddr_list(sctp, alist, asize);
1574 		(*cl_sctp_assoc_change)(sctp->sctp_family, alist, asize,
1575 		    sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR,
1576 		    (cl_sctp_handle_t)sctp);
1577 		/* alist and dlist will be freed by the clustering module */
1578 	}
1579 	return (0);
1580 }
1581 
1582 /*
1583  * Returns 0 if the check failed and the restart should be refused,
1584  * 1 if the check succeeded.
1585  */
1586 int
1587 sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports,
1588     int sleep, sctp_stack_t *sctps)
1589 {
1590 	sctp_faddr_t *fp, *fphead = NULL;
1591 	sctp_parm_hdr_t *ph;
1592 	ssize_t remaining;
1593 	int isv4;
1594 	ipha_t *iph;
1595 	ip6_t *ip6h;
1596 	in6_addr_t hdraddr[1];
1597 	int retval = 0;
1598 	sctp_tf_t *tf;
1599 	sctp_t *sctp;
1600 	int compres;
1601 	sctp_init_chunk_t *init;
1602 	int nadded = 0;
1603 
1604 	/* extract the address from the IP header */
1605 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1606 	if (isv4) {
1607 		iph = (ipha_t *)pkt->b_rptr;
1608 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr);
1609 	} else {
1610 		ip6h = (ip6_t *)pkt->b_rptr;
1611 		hdraddr[0] = ip6h->ip6_src;
1612 	}
1613 
1614 	/* Walk the params in the INIT [ACK], pulling out addr params */
1615 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1616 	    sizeof (sctp_init_chunk_t);
1617 	if (remaining < sizeof (*ph)) {
1618 		/* no parameters; restart OK */
1619 		return (1);
1620 	}
1621 	init = (sctp_init_chunk_t *)(ich + 1);
1622 	ph = (sctp_parm_hdr_t *)(init + 1);
1623 
1624 	while (ph != NULL) {
1625 		sctp_faddr_t *fpa = NULL;
1626 
1627 		/* params will have already been byteordered when validating */
1628 		if (ph->sph_type == htons(PARM_ADDR4)) {
1629 			if (remaining >= PARM_ADDR4_LEN) {
1630 				in6_addr_t addr;
1631 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1632 				    (ph + 1), &addr);
1633 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1634 				    sleep);
1635 				if (fpa == NULL) {
1636 					goto done;
1637 				}
1638 				bzero(fpa, sizeof (*fpa));
1639 				fpa->faddr = addr;
1640 				fpa->next = NULL;
1641 			}
1642 		} else if (ph->sph_type == htons(PARM_ADDR6)) {
1643 			if (remaining >= PARM_ADDR6_LEN) {
1644 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1645 				    sleep);
1646 				if (fpa == NULL) {
1647 					goto done;
1648 				}
1649 				bzero(fpa, sizeof (*fpa));
1650 				bcopy(ph + 1, &fpa->faddr,
1651 				    sizeof (fpa->faddr));
1652 				fpa->next = NULL;
1653 			}
1654 		}
1655 		/* link in the new addr, if it was an addr param */
1656 		if (fpa != NULL) {
1657 			if (fphead == NULL) {
1658 				fphead = fpa;
1659 			} else {
1660 				fpa->next = fphead;
1661 				fphead = fpa;
1662 			}
1663 		}
1664 
1665 		ph = sctp_next_parm(ph, &remaining);
1666 	}
1667 
1668 	if (fphead == NULL) {
1669 		/* no addr parameters; restart OK */
1670 		return (1);
1671 	}
1672 
1673 	/*
1674 	 * got at least one; make sure the header's addr is
1675 	 * in the list
1676 	 */
1677 	fp = sctp_lookup_faddr_nosctp(fphead, hdraddr);
1678 	if (fp == NULL) {
1679 		/* not included; add it now */
1680 		fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep);
1681 		if (fp == NULL) {
1682 			goto done;
1683 		}
1684 		bzero(fp, sizeof (*fp));
1685 		fp->faddr = *hdraddr;
1686 		fp->next = fphead;
1687 		fphead = fp;
1688 	}
1689 
1690 	/*
1691 	 * Now, we can finally do the check: For each sctp instance
1692 	 * on the hash line for ports, compare its faddr set against
1693 	 * the new one. If the new one is a strict subset of any
1694 	 * existing sctp's faddrs, the restart is OK. However, if there
1695 	 * is an overlap, this could be an attack, so return failure.
1696 	 * If all sctp's faddrs are disjoint, this is a legitimate new
1697 	 * association.
1698 	 */
1699 	tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]);
1700 	mutex_enter(&tf->tf_lock);
1701 
1702 	for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) {
1703 		if (ports != sctp->sctp_ports) {
1704 			continue;
1705 		}
1706 		compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs);
1707 		if (compres <= SCTP_ADDR_SUBSET) {
1708 			retval = 1;
1709 			mutex_exit(&tf->tf_lock);
1710 			goto done;
1711 		}
1712 		if (compres == SCTP_ADDR_OVERLAP) {
1713 			dprint(1,
1714 			    ("new assoc from %x:%x:%x:%x overlaps with %p\n",
1715 			    SCTP_PRINTADDR(*hdraddr), (void *)sctp));
1716 			/*
1717 			 * While we still hold the lock, we need to
1718 			 * figure out which addresses have been
1719 			 * added so we can include them in the abort
1720 			 * we will send back. Since these faddrs will
1721 			 * never be used, we overload the rto field
1722 			 * here, setting it to 0 if the address was
1723 			 * not added, 1 if it was added.
1724 			 */
1725 			for (fp = fphead; fp; fp = fp->next) {
1726 				if (sctp_lookup_faddr(sctp, &fp->faddr)) {
1727 					fp->rto = 0;
1728 				} else {
1729 					fp->rto = 1;
1730 					nadded++;
1731 				}
1732 			}
1733 			mutex_exit(&tf->tf_lock);
1734 			goto done;
1735 		}
1736 	}
1737 	mutex_exit(&tf->tf_lock);
1738 
1739 	/* All faddrs are disjoint; legit new association */
1740 	retval = 1;
1741 
1742 done:
1743 	/* If are attempted adds, send back an abort listing the addrs */
1744 	if (nadded > 0) {
1745 		void *dtail;
1746 		size_t dlen;
1747 
1748 		dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP);
1749 		if (dtail == NULL) {
1750 			goto cleanup;
1751 		}
1752 
1753 		ph = dtail;
1754 		dlen = 0;
1755 		for (fp = fphead; fp; fp = fp->next) {
1756 			if (fp->rto == 0) {
1757 				continue;
1758 			}
1759 			if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1760 				ipaddr_t addr4;
1761 
1762 				ph->sph_type = htons(PARM_ADDR4);
1763 				ph->sph_len = htons(PARM_ADDR4_LEN);
1764 				IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
1765 				ph++;
1766 				bcopy(&addr4, ph, sizeof (addr4));
1767 				ph = (sctp_parm_hdr_t *)
1768 				    ((char *)ph + sizeof (addr4));
1769 				dlen += PARM_ADDR4_LEN;
1770 			} else {
1771 				ph->sph_type = htons(PARM_ADDR6);
1772 				ph->sph_len = htons(PARM_ADDR6_LEN);
1773 				ph++;
1774 				bcopy(&fp->faddr, ph, sizeof (fp->faddr));
1775 				ph = (sctp_parm_hdr_t *)
1776 				    ((char *)ph + sizeof (fp->faddr));
1777 				dlen += PARM_ADDR6_LEN;
1778 			}
1779 		}
1780 
1781 		/* Send off the abort */
1782 		sctp_send_abort(sctp, sctp_init2vtag(ich),
1783 		    SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE);
1784 
1785 		kmem_free(dtail, PARM_ADDR6_LEN * nadded);
1786 	}
1787 
1788 cleanup:
1789 	/* Clean up */
1790 	if (fphead) {
1791 		sctp_faddr_t *fpn;
1792 		for (fp = fphead; fp; fp = fpn) {
1793 			fpn = fp->next;
1794 			kmem_cache_free(sctp_kmem_faddr_cache, fp);
1795 		}
1796 	}
1797 
1798 	return (retval);
1799 }
1800 
1801 /*
1802  * Reset any state related to transmitted chunks.
1803  */
1804 void
1805 sctp_congest_reset(sctp_t *sctp)
1806 {
1807 	sctp_faddr_t	*fp;
1808 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1809 	mblk_t		*mp;
1810 
1811 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
1812 		fp->ssthresh = sctps->sctps_initial_mtu;
1813 		SET_CWND(fp, fp->sfa_pmss, sctps->sctps_slow_start_initial);
1814 		fp->suna = 0;
1815 		fp->pba = 0;
1816 	}
1817 	/*
1818 	 * Clean up the transmit list as well since we have reset accounting
1819 	 * on all the fps. Send event upstream, if required.
1820 	 */
1821 	while ((mp = sctp->sctp_xmit_head) != NULL) {
1822 		sctp->sctp_xmit_head = mp->b_next;
1823 		mp->b_next = NULL;
1824 		if (sctp->sctp_xmit_head != NULL)
1825 			sctp->sctp_xmit_head->b_prev = NULL;
1826 		sctp_sendfail_event(sctp, mp, 0, B_TRUE);
1827 	}
1828 	sctp->sctp_xmit_head = NULL;
1829 	sctp->sctp_xmit_tail = NULL;
1830 	sctp->sctp_xmit_unacked = NULL;
1831 
1832 	sctp->sctp_unacked = 0;
1833 	/*
1834 	 * Any control message as well. We will clean-up this list as well.
1835 	 * This contains any pending ASCONF request that we have queued/sent.
1836 	 * If we do get an ACK we will just drop it. However, given that
1837 	 * we are restarting chances are we aren't going to get any.
1838 	 */
1839 	if (sctp->sctp_cxmit_list != NULL)
1840 		sctp_asconf_free_cxmit(sctp, NULL);
1841 	sctp->sctp_cxmit_list = NULL;
1842 	sctp->sctp_cchunk_pend = 0;
1843 
1844 	sctp->sctp_rexmitting = B_FALSE;
1845 	sctp->sctp_rxt_nxttsn = 0;
1846 	sctp->sctp_rxt_maxtsn = 0;
1847 
1848 	sctp->sctp_zero_win_probe = B_FALSE;
1849 }
1850 
1851 static void
1852 sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr,
1853     mblk_t *timer_mp)
1854 {
1855 	sctp_stack_t	*sctps = sctp->sctp_sctps;
1856 
1857 	bcopy(addr, &fp->faddr, sizeof (*addr));
1858 	if (IN6_IS_ADDR_V4MAPPED(addr)) {
1859 		fp->isv4 = 1;
1860 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
1861 		fp->sfa_pmss =
1862 		    (sctps->sctps_initial_mtu - sctp->sctp_hdr_len) &
1863 		    ~(SCTP_ALIGN - 1);
1864 	} else {
1865 		fp->isv4 = 0;
1866 		fp->sfa_pmss =
1867 		    (sctps->sctps_initial_mtu - sctp->sctp_hdr6_len) &
1868 		    ~(SCTP_ALIGN - 1);
1869 	}
1870 	fp->cwnd = sctps->sctps_slow_start_initial * fp->sfa_pmss;
1871 	fp->rto = MIN(sctp->sctp_rto_initial, sctp->sctp_init_rto_max);
1872 	fp->srtt = -1;
1873 	fp->rtt_updates = 0;
1874 	fp->strikes = 0;
1875 	fp->max_retr = sctp->sctp_pp_max_rxt;
1876 	/* Mark it as not confirmed. */
1877 	fp->state = SCTP_FADDRS_UNCONFIRMED;
1878 	fp->hb_interval = sctp->sctp_hb_interval;
1879 	fp->ssthresh = sctps->sctps_initial_ssthresh;
1880 	fp->suna = 0;
1881 	fp->pba = 0;
1882 	fp->acked = 0;
1883 	fp->lastactive = lbolt64;
1884 	fp->timer_mp = timer_mp;
1885 	fp->hb_pending = B_FALSE;
1886 	fp->hb_enabled = B_TRUE;
1887 	fp->df = 1;
1888 	fp->pmtu_discovered = 0;
1889 	fp->next = NULL;
1890 	fp->ire = NULL;
1891 	fp->T3expire = 0;
1892 	(void) random_get_pseudo_bytes((uint8_t *)&fp->hb_secret,
1893 	    sizeof (fp->hb_secret));
1894 	fp->hb_expiry = lbolt64;
1895 	fp->rxt_unacked = 0;
1896 
1897 	sctp_get_ire(sctp, fp);
1898 }
1899 
1900 /*ARGSUSED*/
1901 static int
1902 faddr_constructor(void *buf, void *arg, int flags)
1903 {
1904 	sctp_faddr_t *fp = buf;
1905 
1906 	fp->timer_mp = NULL;
1907 	fp->timer_running = 0;
1908 
1909 	fp->rc_timer_mp = NULL;
1910 	fp->rc_timer_running = 0;
1911 
1912 	return (0);
1913 }
1914 
1915 /*ARGSUSED*/
1916 static void
1917 faddr_destructor(void *buf, void *arg)
1918 {
1919 	sctp_faddr_t *fp = buf;
1920 
1921 	ASSERT(fp->timer_mp == NULL);
1922 	ASSERT(fp->timer_running == 0);
1923 
1924 	ASSERT(fp->rc_timer_mp == NULL);
1925 	ASSERT(fp->rc_timer_running == 0);
1926 }
1927 
1928 void
1929 sctp_faddr_init(void)
1930 {
1931 	sctp_kmem_faddr_cache = kmem_cache_create("sctp_faddr_cache",
1932 	    sizeof (sctp_faddr_t), 0, faddr_constructor, faddr_destructor,
1933 	    NULL, NULL, NULL, 0);
1934 }
1935 
1936 void
1937 sctp_faddr_fini(void)
1938 {
1939 	kmem_cache_destroy(sctp_kmem_faddr_cache);
1940 }
1941