xref: /freebsd/sys/netinet/sctputil.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #ifdef SCTP_STAT_LOGGING
57 int global_sctp_cwnd_log_at = 0;
58 int global_sctp_cwnd_log_rolled = 0;
59 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
60 
61 static uint32_t
62 sctp_get_time_of_event(void)
63 {
64 	struct timeval now;
65 	uint32_t timeval;
66 
67 	SCTP_GETPTIME_TIMEVAL(&now);
68 	timeval = (now.tv_sec % 0x00000fff);
69 	timeval <<= 20;
70 	timeval |= now.tv_usec & 0xfffff;
71 	return (timeval);
72 }
73 
74 
75 void
76 sctp_clr_stat_log(void)
77 {
78 	global_sctp_cwnd_log_at = 0;
79 	global_sctp_cwnd_log_rolled = 0;
80 }
81 
82 
83 void
84 sctp_sblog(struct sockbuf *sb,
85     struct sctp_tcb *stcb, int from, int incr)
86 {
87 	int sctp_cwnd_log_at;
88 
89 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
90 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
91 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
92 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB;
93 	sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb;
94 	sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc;
95 	if (stcb)
96 		sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc;
97 	else
98 		sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0;
99 	sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr;
100 }
101 
102 void
103 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
104 {
105 	int sctp_cwnd_log_at;
106 
107 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
108 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
109 	sctp_clog[sctp_cwnd_log_at].from = 0;
110 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE;
111 	sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp;
112 	sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags;
113 	if (stcb) {
114 		sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb;
115 		sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state;
116 	} else {
117 		sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0;
118 		sctp_clog[sctp_cwnd_log_at].x.close.state = 0;
119 	}
120 	sctp_clog[sctp_cwnd_log_at].x.close.loc = loc;
121 }
122 
123 
124 void
125 rto_logging(struct sctp_nets *net, int from)
126 {
127 	int sctp_cwnd_log_at;
128 
129 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
130 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
131 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
132 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
133 	sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net;
134 	sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
135 }
136 
137 void
138 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
139 {
140 	int sctp_cwnd_log_at;
141 
142 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
143 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
144 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
145 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
146 	sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb;
147 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
148 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
149 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
150 	sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
151 	sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream;
152 }
153 
154 void
155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
156 {
157 	int sctp_cwnd_log_at;
158 
159 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
160 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
161 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action;
162 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE;
163 	sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb;
164 	sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight;
165 	sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
166 	sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
167 	sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count;
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	int sctp_cwnd_log_at;
175 
176 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
177 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
178 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
179 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK;
180 	sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack;
181 	sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack;
182 	sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn;
183 	sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps;
184 	sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups;
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	int sctp_cwnd_log_at;
191 
192 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
193 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
194 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
195 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP;
196 	sctp_clog[sctp_cwnd_log_at].x.map.base = map;
197 	sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
198 	sctp_clog[sctp_cwnd_log_at].x.map.high = high;
199 }
200 
201 void
202 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
203     int from)
204 {
205 	int sctp_cwnd_log_at;
206 
207 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
208 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
209 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
210 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR;
211 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
214 }
215 
216 
217 void
218 sctp_log_mb(struct mbuf *m, int from)
219 {
220 	int sctp_cwnd_log_at;
221 
222 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
223 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
224 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
225 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF;
226 	sctp_clog[sctp_cwnd_log_at].x.mb.mp = m;
227 	sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0;
235 		sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0;
236 	}
237 }
238 
239 
240 void
241 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
242     int from)
243 {
244 	int sctp_cwnd_log_at;
245 
246 	if (control == NULL) {
247 		SCTP_PRINTF("Gak log of NULL?\n");
248 		return;
249 	}
250 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
251 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
252 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
253 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
254 	sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb;
255 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
263 		sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
264 	}
265 }
266 
267 void
268 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
269 {
270 	int sctp_cwnd_log_at;
271 
272 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
273 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
274 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
275 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND;
276 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
277 	if (stcb->asoc.send_queue_cnt > 255)
278 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
279 	else
280 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
281 	if (stcb->asoc.stream_queue_cnt > 255)
282 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
283 	else
284 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
285 
286 	if (net) {
287 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
288 		sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
289 		sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack;
290 		sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
291 		sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
292 	}
293 	if (SCTP_CWNDLOG_PRESEND == from) {
294 		sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
295 	}
296 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
297 }
298 
299 void
300 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
301 {
302 	int sctp_cwnd_log_at;
303 
304 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
305 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
306 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
307 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT;
308 	if (inp) {
309 		sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket;
310 
311 	} else {
312 		sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL;
313 	}
314 	sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp;
315 	if (stcb) {
316 		sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
317 	} else {
318 		sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
319 	}
320 	if (inp) {
321 		sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
322 		sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
323 	} else {
324 		sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
325 		sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx);
328 	if (inp->sctp_socket) {
329 		sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
330 		sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
331 		sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
332 	} else {
333 		sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
334 		sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
335 		sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 }
338 
339 void
340 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
341 {
342 	int sctp_cwnd_log_at;
343 
344 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
345 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
346 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
347 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST;
348 	sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
349 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
350 	sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
351 	sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
352 	if (stcb->asoc.send_queue_cnt > 255)
353 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
354 	else
355 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
356 	if (stcb->asoc.stream_queue_cnt > 255)
357 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
358 	else
359 		sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
360 }
361 
362 void
363 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
364 {
365 	int sctp_cwnd_log_at;
366 
367 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
368 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
369 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
370 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
371 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
372 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
373 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
374 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
375 }
376 
377 void
378 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
379 {
380 	int sctp_cwnd_log_at;
381 
382 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
383 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
384 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
385 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
386 	sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
387 	sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
388 	sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
389 	sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
390 }
391 
392 void
393 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
394 {
395 	int sctp_cwnd_log_at;
396 
397 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
398 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
399 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
400 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT;
401 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
402 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
403 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
404 	sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
405 }
406 
407 void
408 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
409 {
410 	int sctp_cwnd_log_at;
411 
412 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
413 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
414 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
415 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT;
416 	sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a;
417 	sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b;
418 	sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c;
419 	sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d;
420 }
421 
422 void
423 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
424 {
425 	int sctp_cwnd_log_at;
426 
427 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
428 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
429 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
430 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE;
431 	sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb;
432 	sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt;
433 	sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count;
434 	sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt;
435 	sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt;
436 
437 	if (stcb->asoc.stream_queue_cnt < 0xff)
438 		sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
439 	else
440 		sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff;
441 
442 	if (stcb->asoc.chunks_on_out_queue < 0xff)
443 		sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
444 	else
445 		sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff;
446 
447 	sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0;
448 	/* set in the defered mode stuff */
449 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
450 		sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1;
451 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
452 		sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2;
453 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
454 		sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4;
455 	/* what about the sb */
456 	if (stcb->sctp_socket) {
457 		struct socket *so = stcb->sctp_socket;
458 
459 		sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
460 	} else {
461 		sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff;
462 	}
463 }
464 
465 void
466 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
467 {
468 	int sctp_cwnd_log_at;
469 
470 	SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
471 	sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
472 	sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
473 	sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK;
474 	sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
475 	sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
476 	sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd;
477 	sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
478 	sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
479 	sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
480 	sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen;
481 }
482 
483 int
484 sctp_fill_stat_log(void *optval, size_t *optsize)
485 {
486 	int sctp_cwnd_log_at;
487 	struct sctp_cwnd_log_req *req;
488 	size_t size_limit;
489 	int num, i, at, cnt_out = 0;
490 
491 	if (*optsize < sizeof(struct sctp_cwnd_log_req)) {
492 		return (EINVAL);
493 	}
494 	size_limit = (*optsize - sizeof(struct sctp_cwnd_log_req));
495 	if (size_limit < sizeof(struct sctp_cwnd_log)) {
496 		return (EINVAL);
497 	}
498 	sctp_cwnd_log_at = global_sctp_cwnd_log_at;
499 	req = (struct sctp_cwnd_log_req *)optval;
500 	num = size_limit / sizeof(struct sctp_cwnd_log);
501 	if (global_sctp_cwnd_log_rolled) {
502 		req->num_in_log = SCTP_STAT_LOG_SIZE;
503 	} else {
504 		req->num_in_log = sctp_cwnd_log_at;
505 		/*
506 		 * if the log has not rolled, we don't let you have old
507 		 * data.
508 		 */
509 		if (req->end_at > sctp_cwnd_log_at) {
510 			req->end_at = sctp_cwnd_log_at;
511 		}
512 	}
513 	if ((num < SCTP_STAT_LOG_SIZE) &&
514 	    ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
515 		/* we can't return all of it */
516 		if (((req->start_at == 0) && (req->end_at == 0)) ||
517 		    (req->start_at >= SCTP_STAT_LOG_SIZE) ||
518 		    (req->end_at >= SCTP_STAT_LOG_SIZE)) {
519 			/* No user request or user is wacked. */
520 			req->num_ret = num;
521 			req->end_at = sctp_cwnd_log_at - 1;
522 			if ((sctp_cwnd_log_at - num) < 0) {
523 				int cc;
524 
525 				cc = num - sctp_cwnd_log_at;
526 				req->start_at = SCTP_STAT_LOG_SIZE - cc;
527 			} else {
528 				req->start_at = sctp_cwnd_log_at - num;
529 			}
530 		} else {
531 			/* a user request */
532 			int cc;
533 
534 			if (req->start_at > req->end_at) {
535 				cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
536 				    (req->end_at + 1);
537 			} else {
538 
539 				cc = (req->end_at - req->start_at) + 1;
540 			}
541 			if (cc < num) {
542 				num = cc;
543 			}
544 			req->num_ret = num;
545 		}
546 	} else {
547 		/* We can return all  of it */
548 		req->start_at = 0;
549 		req->end_at = sctp_cwnd_log_at - 1;
550 		req->num_ret = sctp_cwnd_log_at;
551 	}
552 #ifdef INVARIANTS
553 	if (req->num_ret > num) {
554 		panic("Bad statlog get?");
555 	}
556 #endif
557 	for (i = 0, at = req->start_at; i < req->num_ret; i++) {
558 		req->log[i] = sctp_clog[at];
559 		cnt_out++;
560 		at++;
561 		if (at >= SCTP_STAT_LOG_SIZE)
562 			at = 0;
563 	}
564 	*optsize = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req);
565 	return (0);
566 }
567 
568 #endif
569 
570 #ifdef SCTP_AUDITING_ENABLED
571 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
572 static int sctp_audit_indx = 0;
573 
574 static
575 void
576 sctp_print_audit_report(void)
577 {
578 	int i;
579 	int cnt;
580 
581 	cnt = 0;
582 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
583 		if ((sctp_audit_data[i][0] == 0xe0) &&
584 		    (sctp_audit_data[i][1] == 0x01)) {
585 			cnt = 0;
586 			SCTP_PRINTF("\n");
587 		} else if (sctp_audit_data[i][0] == 0xf0) {
588 			cnt = 0;
589 			SCTP_PRINTF("\n");
590 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
591 		    (sctp_audit_data[i][1] == 0x01)) {
592 			SCTP_PRINTF("\n");
593 			cnt = 0;
594 		}
595 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
596 		    (uint32_t) sctp_audit_data[i][1]);
597 		cnt++;
598 		if ((cnt % 14) == 0)
599 			SCTP_PRINTF("\n");
600 	}
601 	for (i = 0; i < sctp_audit_indx; i++) {
602 		if ((sctp_audit_data[i][0] == 0xe0) &&
603 		    (sctp_audit_data[i][1] == 0x01)) {
604 			cnt = 0;
605 			SCTP_PRINTF("\n");
606 		} else if (sctp_audit_data[i][0] == 0xf0) {
607 			cnt = 0;
608 			SCTP_PRINTF("\n");
609 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
610 		    (sctp_audit_data[i][1] == 0x01)) {
611 			SCTP_PRINTF("\n");
612 			cnt = 0;
613 		}
614 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
615 		    (uint32_t) sctp_audit_data[i][1]);
616 		cnt++;
617 		if ((cnt % 14) == 0)
618 			SCTP_PRINTF("\n");
619 	}
620 	SCTP_PRINTF("\n");
621 }
622 
623 void
624 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
625     struct sctp_nets *net)
626 {
627 	int resend_cnt, tot_out, rep, tot_book_cnt;
628 	struct sctp_nets *lnet;
629 	struct sctp_tmit_chunk *chk;
630 
631 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
632 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
633 	sctp_audit_indx++;
634 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
635 		sctp_audit_indx = 0;
636 	}
637 	if (inp == NULL) {
638 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
639 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
640 		sctp_audit_indx++;
641 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
642 			sctp_audit_indx = 0;
643 		}
644 		return;
645 	}
646 	if (stcb == NULL) {
647 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
648 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 		return;
654 	}
655 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
656 	sctp_audit_data[sctp_audit_indx][1] =
657 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
658 	sctp_audit_indx++;
659 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 		sctp_audit_indx = 0;
661 	}
662 	rep = 0;
663 	tot_book_cnt = 0;
664 	resend_cnt = tot_out = 0;
665 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
666 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
667 			resend_cnt++;
668 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
669 			tot_out += chk->book_size;
670 			tot_book_cnt++;
671 		}
672 	}
673 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
674 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
675 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
676 		sctp_audit_indx++;
677 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
678 			sctp_audit_indx = 0;
679 		}
680 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
681 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
682 		rep = 1;
683 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
684 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
685 		sctp_audit_data[sctp_audit_indx][1] =
686 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
687 		sctp_audit_indx++;
688 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
689 			sctp_audit_indx = 0;
690 		}
691 	}
692 	if (tot_out != stcb->asoc.total_flight) {
693 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
694 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 		rep = 1;
700 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
701 		    (int)stcb->asoc.total_flight);
702 		stcb->asoc.total_flight = tot_out;
703 	}
704 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
705 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
706 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
707 		sctp_audit_indx++;
708 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
709 			sctp_audit_indx = 0;
710 		}
711 		rep = 1;
712 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
713 
714 		stcb->asoc.total_flight_count = tot_book_cnt;
715 	}
716 	tot_out = 0;
717 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
718 		tot_out += lnet->flight_size;
719 	}
720 	if (tot_out != stcb->asoc.total_flight) {
721 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
722 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
723 		sctp_audit_indx++;
724 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
725 			sctp_audit_indx = 0;
726 		}
727 		rep = 1;
728 		SCTP_PRINTF("real flight:%d net total was %d\n",
729 		    stcb->asoc.total_flight, tot_out);
730 		/* now corrective action */
731 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
732 
733 			tot_out = 0;
734 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
735 				if ((chk->whoTo == lnet) &&
736 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
737 					tot_out += chk->book_size;
738 				}
739 			}
740 			if (lnet->flight_size != tot_out) {
741 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
742 				    (uint32_t) lnet, lnet->flight_size,
743 				    tot_out);
744 				lnet->flight_size = tot_out;
745 			}
746 		}
747 	}
748 	if (rep) {
749 		sctp_print_audit_report();
750 	}
751 }
752 
753 void
754 sctp_audit_log(uint8_t ev, uint8_t fd)
755 {
756 
757 	sctp_audit_data[sctp_audit_indx][0] = ev;
758 	sctp_audit_data[sctp_audit_indx][1] = fd;
759 	sctp_audit_indx++;
760 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
761 		sctp_audit_indx = 0;
762 	}
763 }
764 
765 #endif
766 
767 /*
768  * a list of sizes based on typical mtu's, used only if next hop size not
769  * returned.
770  */
771 static int sctp_mtu_sizes[] = {
772 	68,
773 	296,
774 	508,
775 	512,
776 	544,
777 	576,
778 	1006,
779 	1492,
780 	1500,
781 	1536,
782 	2002,
783 	2048,
784 	4352,
785 	4464,
786 	8166,
787 	17914,
788 	32000,
789 	65535
790 };
791 
792 void
793 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
794 {
795 	struct sctp_association *asoc;
796 	struct sctp_nets *net;
797 
798 	asoc = &stcb->asoc;
799 
800 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
801 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
802 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
803 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
804 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
805 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
806 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
807 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
808 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
809 	}
810 }
811 
812 int
813 find_next_best_mtu(int totsz)
814 {
815 	int i, perfer;
816 
817 	/*
818 	 * if we are in here we must find the next best fit based on the
819 	 * size of the dg that failed to be sent.
820 	 */
821 	perfer = 0;
822 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
823 		if (totsz < sctp_mtu_sizes[i]) {
824 			perfer = i - 1;
825 			if (perfer < 0)
826 				perfer = 0;
827 			break;
828 		}
829 	}
830 	return (sctp_mtu_sizes[perfer]);
831 }
832 
833 void
834 sctp_fill_random_store(struct sctp_pcb *m)
835 {
836 	/*
837 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
838 	 * our counter. The result becomes our good random numbers and we
839 	 * then setup to give these out. Note that we do no locking to
840 	 * protect this. This is ok, since if competing folks call this we
841 	 * will get more gobbled gook in the random store which is what we
842 	 * want. There is a danger that two guys will use the same random
843 	 * numbers, but thats ok too since that is random as well :->
844 	 */
845 	m->store_at = 0;
846 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
847 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
848 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
849 	m->random_counter++;
850 }
851 
852 uint32_t
853 sctp_select_initial_TSN(struct sctp_pcb *m)
854 {
855 	/*
856 	 * A true implementation should use random selection process to get
857 	 * the initial stream sequence number, using RFC1750 as a good
858 	 * guideline
859 	 */
860 	uint32_t x, *xp;
861 	uint8_t *p;
862 
863 	if (m->initial_sequence_debug != 0) {
864 		uint32_t ret;
865 
866 		ret = m->initial_sequence_debug;
867 		m->initial_sequence_debug++;
868 		return (ret);
869 	}
870 	if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
871 		/* Refill the random store */
872 		sctp_fill_random_store(m);
873 	}
874 	p = &m->random_store[(int)m->store_at];
875 	xp = (uint32_t *) p;
876 	x = *xp;
877 	m->store_at += sizeof(uint32_t);
878 	return (x);
879 }
880 
881 uint32_t
882 sctp_select_a_tag(struct sctp_inpcb *m)
883 {
884 	u_long x, not_done;
885 	struct timeval now;
886 
887 	(void)SCTP_GETTIME_TIMEVAL(&now);
888 	not_done = 1;
889 	while (not_done) {
890 		x = sctp_select_initial_TSN(&m->sctp_ep);
891 		if (x == 0) {
892 			/* we never use 0 */
893 			continue;
894 		}
895 		if (sctp_is_vtag_good(m, x, &now)) {
896 			not_done = 0;
897 		}
898 	}
899 	return (x);
900 }
901 
902 
903 int
904 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
905     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
906 {
907 	/*
908 	 * Anything set to zero is taken care of by the allocation routine's
909 	 * bzero
910 	 */
911 
912 	/*
913 	 * Up front select what scoping to apply on addresses I tell my peer
914 	 * Not sure what to do with these right now, we will need to come up
915 	 * with a way to set them. We may need to pass them through from the
916 	 * caller in the sctp_aloc_assoc() function.
917 	 */
918 	int i;
919 
920 	/* init all variables to a known value. */
921 	asoc->state = SCTP_STATE_INUSE;
922 	asoc->max_burst = m->sctp_ep.max_burst;
923 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
924 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
925 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
926 #ifdef INET
927 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
928 #else
929 	asoc->default_tos = 0;
930 #endif
931 
932 #ifdef INET6
933 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
934 #else
935 	asoc->default_flowlabel = 0;
936 #endif
937 	if (override_tag) {
938 		struct timeval now;
939 
940 		(void)SCTP_GETTIME_TIMEVAL(&now);
941 		if (sctp_is_vtag_good(m, override_tag, &now)) {
942 			asoc->my_vtag = override_tag;
943 		} else {
944 			return (ENOMEM);
945 		}
946 
947 	} else {
948 		asoc->my_vtag = sctp_select_a_tag(m);
949 	}
950 	/* Get the nonce tags */
951 	asoc->my_vtag_nonce = sctp_select_a_tag(m);
952 	asoc->peer_vtag_nonce = sctp_select_a_tag(m);
953 	asoc->vrf_id = vrf_id;
954 	/* Save the table id as well from the inp */
955 	asoc->table_id = m->def_table_id;
956 
957 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
958 		asoc->hb_is_disabled = 1;
959 	else
960 		asoc->hb_is_disabled = 0;
961 
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->assoc_id = asoc->my_vtag;
965 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
966 	    sctp_select_initial_TSN(&m->sctp_ep);
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_data_came_from = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
977 	asoc->last_acked_seq = asoc->init_seq_number - 1;
978 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
979 	asoc->asconf_seq_in = asoc->last_acked_seq;
980 
981 	/* here we are different, we hold the next one we expect */
982 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
983 
984 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
985 	asoc->initial_rto = m->sctp_ep.initial_rto;
986 
987 	asoc->max_init_times = m->sctp_ep.max_init_times;
988 	asoc->max_send_times = m->sctp_ep.max_send_times;
989 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 	/* ECN Nonce initialization */
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->ecn_nonce_allowed = 0;
997 	asoc->receiver_nonce_sum = 1;
998 	asoc->nonce_sum_expect_base = 1;
999 	asoc->nonce_sum_check = 1;
1000 	asoc->nonce_resync_tsn = 0;
1001 	asoc->nonce_wait_for_ecne = 0;
1002 	asoc->nonce_wait_tsn = 0;
1003 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1004 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1005 	asoc->pr_sctp_cnt = 0;
1006 	asoc->total_output_queue_size = 0;
1007 
1008 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1009 		struct in6pcb *inp6;
1010 
1011 		/* Its a V6 socket */
1012 		inp6 = (struct in6pcb *)m;
1013 		asoc->ipv6_addr_legal = 1;
1014 		/* Now look at the binding flag to see if V4 will be legal */
1015 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1016 			asoc->ipv4_addr_legal = 1;
1017 		} else {
1018 			/* V4 addresses are NOT legal on the association */
1019 			asoc->ipv4_addr_legal = 0;
1020 		}
1021 	} else {
1022 		/* Its a V4 socket, no - V6 */
1023 		asoc->ipv4_addr_legal = 1;
1024 		asoc->ipv6_addr_legal = 0;
1025 	}
1026 
1027 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1028 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1029 
1030 	asoc->smallest_mtu = m->sctp_frag_point;
1031 #ifdef SCTP_PRINT_FOR_B_AND_M
1032 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1033 	    asoc->smallest_mtu);
1034 #endif
1035 	asoc->minrto = m->sctp_ep.sctp_minrto;
1036 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1037 
1038 	asoc->locked_on_sending = NULL;
1039 	asoc->stream_locked_on = 0;
1040 	asoc->ecn_echo_cnt_onq = 0;
1041 	asoc->stream_locked = 0;
1042 
1043 	asoc->send_sack = 1;
1044 
1045 	LIST_INIT(&asoc->sctp_restricted_addrs);
1046 
1047 	TAILQ_INIT(&asoc->nets);
1048 	TAILQ_INIT(&asoc->pending_reply_queue);
1049 	asoc->last_asconf_ack_sent = NULL;
1050 	/* Setup to fill the hb random cache at first HB */
1051 	asoc->hb_random_idx = 4;
1052 
1053 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    m->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    "StreamsOut");
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		return (ENOMEM);
1067 	}
1068 	for (i = 0; i < asoc->streamoutcnt; i++) {
1069 		/*
1070 		 * inbound side must be set to 0xffff, also NOTE when we get
1071 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1072 		 * count (streamoutcnt) but first check if we sent to any of
1073 		 * the upper streams that were dropped (if some were). Those
1074 		 * that were dropped must be notified to the upper layer as
1075 		 * failed to send.
1076 		 */
1077 		asoc->strmout[i].next_sequence_sent = 0x0;
1078 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1079 		asoc->strmout[i].stream_no = i;
1080 		asoc->strmout[i].last_msg_incomplete = 0;
1081 		asoc->strmout[i].next_spoke.tqe_next = 0;
1082 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1083 	}
1084 	/* Now the mapping array */
1085 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1086 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1087 	    "MappingArray");
1088 	if (asoc->mapping_array == NULL) {
1089 		SCTP_FREE(asoc->strmout);
1090 		return (ENOMEM);
1091 	}
1092 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1093 	/* Now the init of the other outqueues */
1094 	TAILQ_INIT(&asoc->free_chunks);
1095 	TAILQ_INIT(&asoc->out_wheel);
1096 	TAILQ_INIT(&asoc->control_send_queue);
1097 	TAILQ_INIT(&asoc->send_queue);
1098 	TAILQ_INIT(&asoc->sent_queue);
1099 	TAILQ_INIT(&asoc->reasmqueue);
1100 	TAILQ_INIT(&asoc->resetHead);
1101 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1102 	TAILQ_INIT(&asoc->asconf_queue);
1103 	/* authentication fields */
1104 	asoc->authinfo.random = NULL;
1105 	asoc->authinfo.assoc_key = NULL;
1106 	asoc->authinfo.assoc_keyid = 0;
1107 	asoc->authinfo.recv_key = NULL;
1108 	asoc->authinfo.recv_keyid = 0;
1109 	LIST_INIT(&asoc->shared_keys);
1110 	asoc->marked_retrans = 0;
1111 	asoc->timoinit = 0;
1112 	asoc->timodata = 0;
1113 	asoc->timosack = 0;
1114 	asoc->timoshutdown = 0;
1115 	asoc->timoheartbeat = 0;
1116 	asoc->timocookie = 0;
1117 	asoc->timoshutdownack = 0;
1118 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1119 	asoc->discontinuity_time = asoc->start_time;
1120 	return (0);
1121 }
1122 
1123 int
1124 sctp_expand_mapping_array(struct sctp_association *asoc)
1125 {
1126 	/* mapping array needs to grow */
1127 	uint8_t *new_array;
1128 	uint16_t new_size;
1129 
1130 	new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
1131 	SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray");
1132 	if (new_array == NULL) {
1133 		/* can't get more, forget it */
1134 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1135 		    new_size);
1136 		return (-1);
1137 	}
1138 	memset(new_array, 0, new_size);
1139 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1140 	SCTP_FREE(asoc->mapping_array);
1141 	asoc->mapping_array = new_array;
1142 	asoc->mapping_array_size = new_size;
1143 	return (0);
1144 }
1145 
1146 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1147 static void
1148 sctp_iterator_work(struct sctp_iterator *it)
1149 {
1150 	int iteration_count = 0;
1151 	int inp_skip = 0;
1152 
1153 	SCTP_ITERATOR_LOCK();
1154 	if (it->inp) {
1155 		SCTP_INP_DECR_REF(it->inp);
1156 	}
1157 	if (it->inp == NULL) {
1158 		/* iterator is complete */
1159 done_with_iterator:
1160 		SCTP_ITERATOR_UNLOCK();
1161 		if (it->function_atend != NULL) {
1162 			(*it->function_atend) (it->pointer, it->val);
1163 		}
1164 		SCTP_FREE(it);
1165 		return;
1166 	}
1167 select_a_new_ep:
1168 	SCTP_INP_WLOCK(it->inp);
1169 	while (((it->pcb_flags) &&
1170 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1171 	    ((it->pcb_features) &&
1172 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1173 		/* endpoint flags or features don't match, so keep looking */
1174 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1175 			SCTP_INP_WUNLOCK(it->inp);
1176 			goto done_with_iterator;
1177 		}
1178 		SCTP_INP_WUNLOCK(it->inp);
1179 		it->inp = LIST_NEXT(it->inp, sctp_list);
1180 		if (it->inp == NULL) {
1181 			goto done_with_iterator;
1182 		}
1183 		SCTP_INP_WLOCK(it->inp);
1184 	}
1185 
1186 	/* mark the current iterator on the endpoint */
1187 	it->inp->inp_starting_point_for_iterator = it;
1188 	SCTP_INP_WUNLOCK(it->inp);
1189 	SCTP_INP_RLOCK(it->inp);
1190 
1191 	/* now go through each assoc which is in the desired state */
1192 	if (it->done_current_ep == 0) {
1193 		if (it->function_inp != NULL)
1194 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1195 		it->done_current_ep = 1;
1196 	}
1197 	if (it->stcb == NULL) {
1198 		/* run the per instance function */
1199 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1200 	}
1201 	if ((inp_skip) || it->stcb == NULL) {
1202 		if (it->function_inp_end != NULL) {
1203 			inp_skip = (*it->function_inp_end) (it->inp,
1204 			    it->pointer,
1205 			    it->val);
1206 		}
1207 		SCTP_INP_RUNLOCK(it->inp);
1208 		goto no_stcb;
1209 	}
1210 	if ((it->stcb) &&
1211 	    (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1212 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1213 	}
1214 	while (it->stcb) {
1215 		SCTP_TCB_LOCK(it->stcb);
1216 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1217 			/* not in the right state... keep looking */
1218 			SCTP_TCB_UNLOCK(it->stcb);
1219 			goto next_assoc;
1220 		}
1221 		/* mark the current iterator on the assoc */
1222 		it->stcb->asoc.stcb_starting_point_for_iterator = it;
1223 		/* see if we have limited out the iterator loop */
1224 		iteration_count++;
1225 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1226 			/* Pause to let others grab the lock */
1227 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1228 			SCTP_TCB_UNLOCK(it->stcb);
1229 			SCTP_INP_RUNLOCK(it->inp);
1230 			SCTP_ITERATOR_UNLOCK();
1231 			SCTP_ITERATOR_LOCK();
1232 			SCTP_INP_RLOCK(it->inp);
1233 			SCTP_TCB_LOCK(it->stcb);
1234 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1235 			iteration_count = 0;
1236 		}
1237 		/* run function on this one */
1238 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1239 
1240 		/*
1241 		 * we lie here, it really needs to have its own type but
1242 		 * first I must verify that this won't effect things :-0
1243 		 */
1244 		if (it->no_chunk_output == 0)
1245 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1246 
1247 		SCTP_TCB_UNLOCK(it->stcb);
1248 next_assoc:
1249 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1250 		if (it->stcb == NULL) {
1251 			/* Run last function */
1252 			if (it->function_inp_end != NULL) {
1253 				inp_skip = (*it->function_inp_end) (it->inp,
1254 				    it->pointer,
1255 				    it->val);
1256 			}
1257 		}
1258 	}
1259 	SCTP_INP_RUNLOCK(it->inp);
1260 no_stcb:
1261 	/* done with all assocs on this endpoint, move on to next endpoint */
1262 	it->done_current_ep = 0;
1263 	SCTP_INP_WLOCK(it->inp);
1264 	it->inp->inp_starting_point_for_iterator = NULL;
1265 	SCTP_INP_WUNLOCK(it->inp);
1266 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1267 		it->inp = NULL;
1268 	} else {
1269 		SCTP_INP_INFO_RLOCK();
1270 		it->inp = LIST_NEXT(it->inp, sctp_list);
1271 		SCTP_INP_INFO_RUNLOCK();
1272 	}
1273 	if (it->inp == NULL) {
1274 		goto done_with_iterator;
1275 	}
1276 	goto select_a_new_ep;
1277 }
1278 
1279 void
1280 sctp_iterator_worker(void)
1281 {
1282 	struct sctp_iterator *it = NULL;
1283 
1284 	/* This function is called with the WQ lock in place */
1285 
1286 	sctppcbinfo.iterator_running = 1;
1287 again:
1288 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1289 	while (it) {
1290 		/* now lets work on this one */
1291 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1292 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1293 		sctp_iterator_work(it);
1294 		SCTP_IPI_ITERATOR_WQ_LOCK();
1295 		/* sa_ignore FREED_MEMORY */
1296 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1297 	}
1298 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1299 		goto again;
1300 	}
1301 	sctppcbinfo.iterator_running = 0;
1302 	return;
1303 }
1304 
1305 #endif
1306 
1307 
1308 static void
1309 sctp_handle_addr_wq(void)
1310 {
1311 	/* deal with the ADDR wq from the rtsock calls */
1312 	struct sctp_laddr *wi;
1313 	struct sctp_asconf_iterator *asc;
1314 
1315 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1316 	    sizeof(struct sctp_asconf_iterator), "SCTP_ASCONF_ITERATOR");
1317 	if (asc == NULL) {
1318 		/* Try later, no memory */
1319 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1320 		    (struct sctp_inpcb *)NULL,
1321 		    (struct sctp_tcb *)NULL,
1322 		    (struct sctp_nets *)NULL);
1323 		return;
1324 	}
1325 	LIST_INIT(&asc->list_of_work);
1326 	asc->cnt = 0;
1327 	SCTP_IPI_ITERATOR_WQ_LOCK();
1328 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1329 	while (wi != NULL) {
1330 		LIST_REMOVE(wi, sctp_nxt_addr);
1331 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1332 		asc->cnt++;
1333 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1334 	}
1335 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1336 	if (asc->cnt == 0) {
1337 		SCTP_FREE(asc);
1338 	} else {
1339 		(void)sctp_initiate_iterator(sctp_iterator_ep,
1340 		    sctp_iterator_stcb,
1341 		    NULL,	/* No ep end for boundall */
1342 		    SCTP_PCB_FLAGS_BOUNDALL,
1343 		    SCTP_PCB_ANY_FEATURES,
1344 		    SCTP_ASOC_ANY_STATE, (void *)asc, 0,
1345 		    sctp_iterator_end, NULL, 0);
1346 	}
1347 
1348 }
1349 
1350 void
1351 sctp_timeout_handler(void *t)
1352 {
1353 	struct sctp_inpcb *inp;
1354 	struct sctp_tcb *stcb;
1355 	struct sctp_nets *net;
1356 	struct sctp_timer *tmr;
1357 	int did_output;
1358 	struct sctp_iterator *it = NULL;
1359 
1360 
1361 	tmr = (struct sctp_timer *)t;
1362 	inp = (struct sctp_inpcb *)tmr->ep;
1363 	stcb = (struct sctp_tcb *)tmr->tcb;
1364 	net = (struct sctp_nets *)tmr->net;
1365 	did_output = 1;
1366 
1367 #ifdef SCTP_AUDITING_ENABLED
1368 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1369 	sctp_auditing(3, inp, stcb, net);
1370 #endif
1371 
1372 	/* sanity checks... */
1373 	if (tmr->self != (void *)tmr) {
1374 		/*
1375 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1376 		 * tmr);
1377 		 */
1378 		return;
1379 	}
1380 	tmr->stopped_from = 0xa001;
1381 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1382 		/*
1383 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1384 		 * tmr->type);
1385 		 */
1386 		return;
1387 	}
1388 	tmr->stopped_from = 0xa002;
1389 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1390 		return;
1391 	}
1392 	/* if this is an iterator timeout, get the struct and clear inp */
1393 	tmr->stopped_from = 0xa003;
1394 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1395 		it = (struct sctp_iterator *)inp;
1396 		inp = NULL;
1397 	}
1398 	if (inp) {
1399 		SCTP_INP_INCR_REF(inp);
1400 		if ((inp->sctp_socket == 0) &&
1401 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1402 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1403 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1404 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1405 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1406 		    ) {
1407 			SCTP_INP_DECR_REF(inp);
1408 			return;
1409 		}
1410 	}
1411 	tmr->stopped_from = 0xa004;
1412 	if (stcb) {
1413 		atomic_add_int(&stcb->asoc.refcnt, 1);
1414 		if (stcb->asoc.state == 0) {
1415 			atomic_add_int(&stcb->asoc.refcnt, -1);
1416 			if (inp) {
1417 				SCTP_INP_DECR_REF(inp);
1418 			}
1419 			return;
1420 		}
1421 	}
1422 	tmr->stopped_from = 0xa005;
1423 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1424 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1425 		if (inp) {
1426 			SCTP_INP_DECR_REF(inp);
1427 		}
1428 		return;
1429 	}
1430 	tmr->stopped_from = 0xa006;
1431 
1432 	if (stcb) {
1433 		SCTP_TCB_LOCK(stcb);
1434 		atomic_add_int(&stcb->asoc.refcnt, -1);
1435 	}
1436 	/* record in stopped what t-o occured */
1437 	tmr->stopped_from = tmr->type;
1438 
1439 	/* mark as being serviced now */
1440 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1441 		/*
1442 		 * Callout has been rescheduled.
1443 		 */
1444 		goto get_out;
1445 	}
1446 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1447 		/*
1448 		 * Not active, so no action.
1449 		 */
1450 		goto get_out;
1451 	}
1452 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1453 
1454 	/* call the handler for the appropriate timer type */
1455 	switch (tmr->type) {
1456 	case SCTP_TIMER_TYPE_ADDR_WQ:
1457 		sctp_handle_addr_wq();
1458 		break;
1459 	case SCTP_TIMER_TYPE_ITERATOR:
1460 		SCTP_STAT_INCR(sctps_timoiterator);
1461 		sctp_iterator_timer(it);
1462 		break;
1463 	case SCTP_TIMER_TYPE_SEND:
1464 		if ((stcb == NULL) || (inp == NULL)) {
1465 			break;
1466 		}
1467 		SCTP_STAT_INCR(sctps_timodata);
1468 		stcb->asoc.timodata++;
1469 		stcb->asoc.num_send_timers_up--;
1470 		if (stcb->asoc.num_send_timers_up < 0) {
1471 			stcb->asoc.num_send_timers_up = 0;
1472 		}
1473 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1474 			/* no need to unlock on tcb its gone */
1475 
1476 			goto out_decr;
1477 		}
1478 #ifdef SCTP_AUDITING_ENABLED
1479 		sctp_auditing(4, inp, stcb, net);
1480 #endif
1481 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1482 		if ((stcb->asoc.num_send_timers_up == 0) &&
1483 		    (stcb->asoc.sent_queue_cnt > 0)
1484 		    ) {
1485 			struct sctp_tmit_chunk *chk;
1486 
1487 			/*
1488 			 * safeguard. If there on some on the sent queue
1489 			 * somewhere but no timers running something is
1490 			 * wrong... so we start a timer on the first chunk
1491 			 * on the send queue on whatever net it is sent to.
1492 			 */
1493 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1494 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1495 			    chk->whoTo);
1496 		}
1497 		break;
1498 	case SCTP_TIMER_TYPE_INIT:
1499 		if ((stcb == NULL) || (inp == NULL)) {
1500 			break;
1501 		}
1502 		SCTP_STAT_INCR(sctps_timoinit);
1503 		stcb->asoc.timoinit++;
1504 		if (sctp_t1init_timer(inp, stcb, net)) {
1505 			/* no need to unlock on tcb its gone */
1506 			goto out_decr;
1507 		}
1508 		/* We do output but not here */
1509 		did_output = 0;
1510 		break;
1511 	case SCTP_TIMER_TYPE_RECV:
1512 		if ((stcb == NULL) || (inp == NULL)) {
1513 			break;
1514 		}
1515 		SCTP_STAT_INCR(sctps_timosack);
1516 		stcb->asoc.timosack++;
1517 		sctp_send_sack(stcb);
1518 #ifdef SCTP_AUDITING_ENABLED
1519 		sctp_auditing(4, inp, stcb, net);
1520 #endif
1521 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR);
1522 		break;
1523 	case SCTP_TIMER_TYPE_SHUTDOWN:
1524 		if ((stcb == NULL) || (inp == NULL)) {
1525 			break;
1526 		}
1527 		if (sctp_shutdown_timer(inp, stcb, net)) {
1528 			/* no need to unlock on tcb its gone */
1529 			goto out_decr;
1530 		}
1531 		SCTP_STAT_INCR(sctps_timoshutdown);
1532 		stcb->asoc.timoshutdown++;
1533 #ifdef SCTP_AUDITING_ENABLED
1534 		sctp_auditing(4, inp, stcb, net);
1535 #endif
1536 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR);
1537 		break;
1538 	case SCTP_TIMER_TYPE_HEARTBEAT:
1539 		{
1540 			struct sctp_nets *net;
1541 			int cnt_of_unconf = 0;
1542 
1543 			if ((stcb == NULL) || (inp == NULL)) {
1544 				break;
1545 			}
1546 			SCTP_STAT_INCR(sctps_timoheartbeat);
1547 			stcb->asoc.timoheartbeat++;
1548 			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1549 				if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1550 				    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1551 					cnt_of_unconf++;
1552 				}
1553 			}
1554 			if (cnt_of_unconf == 0) {
1555 				if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) {
1556 					/* no need to unlock on tcb its gone */
1557 					goto out_decr;
1558 				}
1559 			}
1560 #ifdef SCTP_AUDITING_ENABLED
1561 			sctp_auditing(4, inp, stcb, net);
1562 #endif
1563 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
1564 			    stcb, net);
1565 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR);
1566 		}
1567 		break;
1568 	case SCTP_TIMER_TYPE_COOKIE:
1569 		if ((stcb == NULL) || (inp == NULL)) {
1570 			break;
1571 		}
1572 		if (sctp_cookie_timer(inp, stcb, net)) {
1573 			/* no need to unlock on tcb its gone */
1574 			goto out_decr;
1575 		}
1576 		SCTP_STAT_INCR(sctps_timocookie);
1577 		stcb->asoc.timocookie++;
1578 #ifdef SCTP_AUDITING_ENABLED
1579 		sctp_auditing(4, inp, stcb, net);
1580 #endif
1581 		/*
1582 		 * We consider T3 and Cookie timer pretty much the same with
1583 		 * respect to where from in chunk_output.
1584 		 */
1585 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1586 		break;
1587 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1588 		{
1589 			struct timeval tv;
1590 			int i, secret;
1591 
1592 			if (inp == NULL) {
1593 				break;
1594 			}
1595 			SCTP_STAT_INCR(sctps_timosecret);
1596 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1597 			SCTP_INP_WLOCK(inp);
1598 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1599 			inp->sctp_ep.last_secret_number =
1600 			    inp->sctp_ep.current_secret_number;
1601 			inp->sctp_ep.current_secret_number++;
1602 			if (inp->sctp_ep.current_secret_number >=
1603 			    SCTP_HOW_MANY_SECRETS) {
1604 				inp->sctp_ep.current_secret_number = 0;
1605 			}
1606 			secret = (int)inp->sctp_ep.current_secret_number;
1607 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1608 				inp->sctp_ep.secret_key[secret][i] =
1609 				    sctp_select_initial_TSN(&inp->sctp_ep);
1610 			}
1611 			SCTP_INP_WUNLOCK(inp);
1612 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1613 		}
1614 		did_output = 0;
1615 		break;
1616 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1617 		if ((stcb == NULL) || (inp == NULL)) {
1618 			break;
1619 		}
1620 		SCTP_STAT_INCR(sctps_timopathmtu);
1621 		sctp_pathmtu_timer(inp, stcb, net);
1622 		did_output = 0;
1623 		break;
1624 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1625 		if ((stcb == NULL) || (inp == NULL)) {
1626 			break;
1627 		}
1628 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1629 			/* no need to unlock on tcb its gone */
1630 			goto out_decr;
1631 		}
1632 		SCTP_STAT_INCR(sctps_timoshutdownack);
1633 		stcb->asoc.timoshutdownack++;
1634 #ifdef SCTP_AUDITING_ENABLED
1635 		sctp_auditing(4, inp, stcb, net);
1636 #endif
1637 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR);
1638 		break;
1639 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1640 		if ((stcb == NULL) || (inp == NULL)) {
1641 			break;
1642 		}
1643 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1644 		sctp_abort_an_association(inp, stcb,
1645 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1646 		/* no need to unlock on tcb its gone */
1647 		goto out_decr;
1648 
1649 	case SCTP_TIMER_TYPE_STRRESET:
1650 		if ((stcb == NULL) || (inp == NULL)) {
1651 			break;
1652 		}
1653 		if (sctp_strreset_timer(inp, stcb, net)) {
1654 			/* no need to unlock on tcb its gone */
1655 			goto out_decr;
1656 		}
1657 		SCTP_STAT_INCR(sctps_timostrmrst);
1658 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR);
1659 		break;
1660 	case SCTP_TIMER_TYPE_EARLYFR:
1661 		/* Need to do FR of things for net */
1662 		if ((stcb == NULL) || (inp == NULL)) {
1663 			break;
1664 		}
1665 		SCTP_STAT_INCR(sctps_timoearlyfr);
1666 		sctp_early_fr_timer(inp, stcb, net);
1667 		break;
1668 	case SCTP_TIMER_TYPE_ASCONF:
1669 		if ((stcb == NULL) || (inp == NULL)) {
1670 			break;
1671 		}
1672 		if (sctp_asconf_timer(inp, stcb, net)) {
1673 			/* no need to unlock on tcb its gone */
1674 			goto out_decr;
1675 		}
1676 		SCTP_STAT_INCR(sctps_timoasconf);
1677 #ifdef SCTP_AUDITING_ENABLED
1678 		sctp_auditing(4, inp, stcb, net);
1679 #endif
1680 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR);
1681 		break;
1682 
1683 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1684 		if ((stcb == NULL) || (inp == NULL)) {
1685 			break;
1686 		}
1687 		SCTP_STAT_INCR(sctps_timoautoclose);
1688 		sctp_autoclose_timer(inp, stcb, net);
1689 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1690 		did_output = 0;
1691 		break;
1692 	case SCTP_TIMER_TYPE_ASOCKILL:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timoassockill);
1697 		/* Can we free it yet? */
1698 		SCTP_INP_DECR_REF(inp);
1699 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1700 		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1701 		/*
1702 		 * free asoc, always unlocks (or destroy's) so prevent
1703 		 * duplicate unlock or unlock of a free mtx :-0
1704 		 */
1705 		stcb = NULL;
1706 		goto out_no_decr;
1707 	case SCTP_TIMER_TYPE_INPKILL:
1708 		SCTP_STAT_INCR(sctps_timoinpkill);
1709 		if (inp == NULL) {
1710 			break;
1711 		}
1712 		/*
1713 		 * special case, take away our increment since WE are the
1714 		 * killer
1715 		 */
1716 		SCTP_INP_DECR_REF(inp);
1717 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1718 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1719 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1720 		goto out_no_decr;
1721 	default:
1722 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1723 		    tmr->type);
1724 		break;
1725 	};
1726 #ifdef SCTP_AUDITING_ENABLED
1727 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1728 	if (inp)
1729 		sctp_auditing(5, inp, stcb, net);
1730 #endif
1731 	if ((did_output) && stcb) {
1732 		/*
1733 		 * Now we need to clean up the control chunk chain if an
1734 		 * ECNE is on it. It must be marked as UNSENT again so next
1735 		 * call will continue to send it until such time that we get
1736 		 * a CWR, to remove it. It is, however, less likely that we
1737 		 * will find a ecn echo on the chain though.
1738 		 */
1739 		sctp_fix_ecn_echo(&stcb->asoc);
1740 	}
1741 get_out:
1742 	if (stcb) {
1743 		SCTP_TCB_UNLOCK(stcb);
1744 	}
1745 out_decr:
1746 	if (inp) {
1747 		SCTP_INP_DECR_REF(inp);
1748 	}
1749 out_no_decr:
1750 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1751 	    tmr->type);
1752 	if (inp) {
1753 	}
1754 }
1755 
1756 void
1757 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1758     struct sctp_nets *net)
1759 {
1760 	int to_ticks;
1761 	struct sctp_timer *tmr;
1762 
1763 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1764 		return;
1765 
1766 	to_ticks = 0;
1767 
1768 	tmr = NULL;
1769 	if (stcb) {
1770 		SCTP_TCB_LOCK_ASSERT(stcb);
1771 	}
1772 	switch (t_type) {
1773 	case SCTP_TIMER_TYPE_ADDR_WQ:
1774 		/* Only 1 tick away :-) */
1775 		tmr = &sctppcbinfo.addr_wq_timer;
1776 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1777 		break;
1778 	case SCTP_TIMER_TYPE_ITERATOR:
1779 		{
1780 			struct sctp_iterator *it;
1781 
1782 			it = (struct sctp_iterator *)inp;
1783 			tmr = &it->tmr;
1784 			to_ticks = SCTP_ITERATOR_TICKS;
1785 		}
1786 		break;
1787 	case SCTP_TIMER_TYPE_SEND:
1788 		/* Here we use the RTO timer */
1789 		{
1790 			int rto_val;
1791 
1792 			if ((stcb == NULL) || (net == NULL)) {
1793 				return;
1794 			}
1795 			tmr = &net->rxt_timer;
1796 			if (net->RTO == 0) {
1797 				rto_val = stcb->asoc.initial_rto;
1798 			} else {
1799 				rto_val = net->RTO;
1800 			}
1801 			to_ticks = MSEC_TO_TICKS(rto_val);
1802 		}
1803 		break;
1804 	case SCTP_TIMER_TYPE_INIT:
1805 		/*
1806 		 * Here we use the INIT timer default usually about 1
1807 		 * minute.
1808 		 */
1809 		if ((stcb == NULL) || (net == NULL)) {
1810 			return;
1811 		}
1812 		tmr = &net->rxt_timer;
1813 		if (net->RTO == 0) {
1814 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1815 		} else {
1816 			to_ticks = MSEC_TO_TICKS(net->RTO);
1817 		}
1818 		break;
1819 	case SCTP_TIMER_TYPE_RECV:
1820 		/*
1821 		 * Here we use the Delayed-Ack timer value from the inp
1822 		 * ususually about 200ms.
1823 		 */
1824 		if (stcb == NULL) {
1825 			return;
1826 		}
1827 		tmr = &stcb->asoc.dack_timer;
1828 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1829 		break;
1830 	case SCTP_TIMER_TYPE_SHUTDOWN:
1831 		/* Here we use the RTO of the destination. */
1832 		if ((stcb == NULL) || (net == NULL)) {
1833 			return;
1834 		}
1835 		if (net->RTO == 0) {
1836 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1837 		} else {
1838 			to_ticks = MSEC_TO_TICKS(net->RTO);
1839 		}
1840 		tmr = &net->rxt_timer;
1841 		break;
1842 	case SCTP_TIMER_TYPE_HEARTBEAT:
1843 		/*
1844 		 * the net is used here so that we can add in the RTO. Even
1845 		 * though we use a different timer. We also add the HB timer
1846 		 * PLUS a random jitter.
1847 		 */
1848 		if ((inp == NULL) || (stcb == NULL)) {
1849 			return;
1850 		} else {
1851 			uint32_t rndval;
1852 			uint8_t this_random;
1853 			int cnt_of_unconf = 0;
1854 			struct sctp_nets *lnet;
1855 
1856 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1857 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1858 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1859 					cnt_of_unconf++;
1860 				}
1861 			}
1862 			if (cnt_of_unconf) {
1863 				net = lnet = NULL;
1864 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1865 			}
1866 			if (stcb->asoc.hb_random_idx > 3) {
1867 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1868 				memcpy(stcb->asoc.hb_random_values, &rndval,
1869 				    sizeof(stcb->asoc.hb_random_values));
1870 				stcb->asoc.hb_random_idx = 0;
1871 			}
1872 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1873 			stcb->asoc.hb_random_idx++;
1874 			stcb->asoc.hb_ect_randombit = 0;
1875 			/*
1876 			 * this_random will be 0 - 256 ms RTO is in ms.
1877 			 */
1878 			if ((stcb->asoc.hb_is_disabled) &&
1879 			    (cnt_of_unconf == 0)) {
1880 				return;
1881 			}
1882 			if (net) {
1883 				struct sctp_nets *lnet;
1884 				int delay;
1885 
1886 				delay = stcb->asoc.heart_beat_delay;
1887 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1888 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1889 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1890 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1891 						delay = 0;
1892 					}
1893 				}
1894 				if (net->RTO == 0) {
1895 					/* Never been checked */
1896 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
1897 				} else {
1898 					/* set rto_val to the ms */
1899 					to_ticks = delay + net->RTO + this_random;
1900 				}
1901 			} else {
1902 				if (cnt_of_unconf) {
1903 					to_ticks = this_random + stcb->asoc.initial_rto;
1904 				} else {
1905 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1906 				}
1907 			}
1908 			/*
1909 			 * Now we must convert the to_ticks that are now in
1910 			 * ms to ticks.
1911 			 */
1912 			to_ticks = MSEC_TO_TICKS(to_ticks);
1913 			tmr = &stcb->asoc.hb_timer;
1914 		}
1915 		break;
1916 	case SCTP_TIMER_TYPE_COOKIE:
1917 		/*
1918 		 * Here we can use the RTO timer from the network since one
1919 		 * RTT was compelete. If a retran happened then we will be
1920 		 * using the RTO initial value.
1921 		 */
1922 		if ((stcb == NULL) || (net == NULL)) {
1923 			return;
1924 		}
1925 		if (net->RTO == 0) {
1926 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1927 		} else {
1928 			to_ticks = MSEC_TO_TICKS(net->RTO);
1929 		}
1930 		tmr = &net->rxt_timer;
1931 		break;
1932 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1933 		/*
1934 		 * nothing needed but the endpoint here ususually about 60
1935 		 * minutes.
1936 		 */
1937 		if (inp == NULL) {
1938 			return;
1939 		}
1940 		tmr = &inp->sctp_ep.signature_change;
1941 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1942 		break;
1943 	case SCTP_TIMER_TYPE_ASOCKILL:
1944 		if (stcb == NULL) {
1945 			return;
1946 		}
1947 		tmr = &stcb->asoc.strreset_timer;
1948 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1949 		break;
1950 	case SCTP_TIMER_TYPE_INPKILL:
1951 		/*
1952 		 * The inp is setup to die. We re-use the signature_chage
1953 		 * timer since that has stopped and we are in the GONE
1954 		 * state.
1955 		 */
1956 		if (inp == NULL) {
1957 			return;
1958 		}
1959 		tmr = &inp->sctp_ep.signature_change;
1960 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
1961 		break;
1962 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1963 		/*
1964 		 * Here we use the value found in the EP for PMTU ususually
1965 		 * about 10 minutes.
1966 		 */
1967 		if ((stcb == NULL) || (inp == NULL)) {
1968 			return;
1969 		}
1970 		if (net == NULL) {
1971 			return;
1972 		}
1973 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1974 		tmr = &net->pmtu_timer;
1975 		break;
1976 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1977 		/* Here we use the RTO of the destination */
1978 		if ((stcb == NULL) || (net == NULL)) {
1979 			return;
1980 		}
1981 		if (net->RTO == 0) {
1982 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1983 		} else {
1984 			to_ticks = MSEC_TO_TICKS(net->RTO);
1985 		}
1986 		tmr = &net->rxt_timer;
1987 		break;
1988 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1989 		/*
1990 		 * Here we use the endpoints shutdown guard timer usually
1991 		 * about 3 minutes.
1992 		 */
1993 		if ((inp == NULL) || (stcb == NULL)) {
1994 			return;
1995 		}
1996 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1997 		tmr = &stcb->asoc.shut_guard_timer;
1998 		break;
1999 	case SCTP_TIMER_TYPE_STRRESET:
2000 		/*
2001 		 * Here the timer comes from the inp but its value is from
2002 		 * the RTO.
2003 		 */
2004 		if ((stcb == NULL) || (net == NULL)) {
2005 			return;
2006 		}
2007 		if (net->RTO == 0) {
2008 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 		} else {
2010 			to_ticks = MSEC_TO_TICKS(net->RTO);
2011 		}
2012 		tmr = &stcb->asoc.strreset_timer;
2013 		break;
2014 
2015 	case SCTP_TIMER_TYPE_EARLYFR:
2016 		{
2017 			unsigned int msec;
2018 
2019 			if ((stcb == NULL) || (net == NULL)) {
2020 				return;
2021 			}
2022 			if (net->flight_size > net->cwnd) {
2023 				/* no need to start */
2024 				return;
2025 			}
2026 			SCTP_STAT_INCR(sctps_earlyfrstart);
2027 			if (net->lastsa == 0) {
2028 				/* Hmm no rtt estimate yet? */
2029 				msec = stcb->asoc.initial_rto >> 2;
2030 			} else {
2031 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2032 			}
2033 			if (msec < sctp_early_fr_msec) {
2034 				msec = sctp_early_fr_msec;
2035 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2036 					msec = SCTP_MINFR_MSEC_FLOOR;
2037 				}
2038 			}
2039 			to_ticks = MSEC_TO_TICKS(msec);
2040 			tmr = &net->fr_timer;
2041 		}
2042 		break;
2043 	case SCTP_TIMER_TYPE_ASCONF:
2044 		/*
2045 		 * Here the timer comes from the inp but its value is from
2046 		 * the RTO.
2047 		 */
2048 		if ((stcb == NULL) || (net == NULL)) {
2049 			return;
2050 		}
2051 		if (net->RTO == 0) {
2052 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2053 		} else {
2054 			to_ticks = MSEC_TO_TICKS(net->RTO);
2055 		}
2056 		tmr = &stcb->asoc.asconf_timer;
2057 		break;
2058 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2059 		if (stcb == NULL) {
2060 			return;
2061 		}
2062 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2063 			/*
2064 			 * Really an error since stcb is NOT set to
2065 			 * autoclose
2066 			 */
2067 			return;
2068 		}
2069 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2070 		tmr = &stcb->asoc.autoclose_timer;
2071 		break;
2072 	default:
2073 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2074 		    __FUNCTION__, t_type);
2075 		return;
2076 		break;
2077 	};
2078 	if ((to_ticks <= 0) || (tmr == NULL)) {
2079 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2080 		    __FUNCTION__, t_type, to_ticks, tmr);
2081 		return;
2082 	}
2083 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2084 		/*
2085 		 * we do NOT allow you to have it already running. if it is
2086 		 * we leave the current one up unchanged
2087 		 */
2088 		return;
2089 	}
2090 	/* At this point we can proceed */
2091 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2092 		stcb->asoc.num_send_timers_up++;
2093 	}
2094 	tmr->stopped_from = 0;
2095 	tmr->type = t_type;
2096 	tmr->ep = (void *)inp;
2097 	tmr->tcb = (void *)stcb;
2098 	tmr->net = (void *)net;
2099 	tmr->self = (void *)tmr;
2100 	tmr->ticks = ticks;
2101 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2102 	return;
2103 }
2104 
2105 void
2106 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2107     struct sctp_nets *net, uint32_t from)
2108 {
2109 	struct sctp_timer *tmr;
2110 
2111 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2112 	    (inp == NULL))
2113 		return;
2114 
2115 	tmr = NULL;
2116 	if (stcb) {
2117 		SCTP_TCB_LOCK_ASSERT(stcb);
2118 	}
2119 	switch (t_type) {
2120 	case SCTP_TIMER_TYPE_ADDR_WQ:
2121 		tmr = &sctppcbinfo.addr_wq_timer;
2122 		break;
2123 	case SCTP_TIMER_TYPE_EARLYFR:
2124 		if ((stcb == NULL) || (net == NULL)) {
2125 			return;
2126 		}
2127 		tmr = &net->fr_timer;
2128 		SCTP_STAT_INCR(sctps_earlyfrstop);
2129 		break;
2130 	case SCTP_TIMER_TYPE_ITERATOR:
2131 		{
2132 			struct sctp_iterator *it;
2133 
2134 			it = (struct sctp_iterator *)inp;
2135 			tmr = &it->tmr;
2136 		}
2137 		break;
2138 	case SCTP_TIMER_TYPE_SEND:
2139 		if ((stcb == NULL) || (net == NULL)) {
2140 			return;
2141 		}
2142 		tmr = &net->rxt_timer;
2143 		break;
2144 	case SCTP_TIMER_TYPE_INIT:
2145 		if ((stcb == NULL) || (net == NULL)) {
2146 			return;
2147 		}
2148 		tmr = &net->rxt_timer;
2149 		break;
2150 	case SCTP_TIMER_TYPE_RECV:
2151 		if (stcb == NULL) {
2152 			return;
2153 		}
2154 		tmr = &stcb->asoc.dack_timer;
2155 		break;
2156 	case SCTP_TIMER_TYPE_SHUTDOWN:
2157 		if ((stcb == NULL) || (net == NULL)) {
2158 			return;
2159 		}
2160 		tmr = &net->rxt_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_HEARTBEAT:
2163 		if (stcb == NULL) {
2164 			return;
2165 		}
2166 		tmr = &stcb->asoc.hb_timer;
2167 		break;
2168 	case SCTP_TIMER_TYPE_COOKIE:
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2175 		/* nothing needed but the endpoint here */
2176 		tmr = &inp->sctp_ep.signature_change;
2177 		/*
2178 		 * We re-use the newcookie timer for the INP kill timer. We
2179 		 * must assure that we do not kill it by accident.
2180 		 */
2181 		break;
2182 	case SCTP_TIMER_TYPE_ASOCKILL:
2183 		/*
2184 		 * Stop the asoc kill timer.
2185 		 */
2186 		if (stcb == NULL) {
2187 			return;
2188 		}
2189 		tmr = &stcb->asoc.strreset_timer;
2190 		break;
2191 
2192 	case SCTP_TIMER_TYPE_INPKILL:
2193 		/*
2194 		 * The inp is setup to die. We re-use the signature_chage
2195 		 * timer since that has stopped and we are in the GONE
2196 		 * state.
2197 		 */
2198 		tmr = &inp->sctp_ep.signature_change;
2199 		break;
2200 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2201 		if ((stcb == NULL) || (net == NULL)) {
2202 			return;
2203 		}
2204 		tmr = &net->pmtu_timer;
2205 		break;
2206 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2207 		if ((stcb == NULL) || (net == NULL)) {
2208 			return;
2209 		}
2210 		tmr = &net->rxt_timer;
2211 		break;
2212 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2213 		if (stcb == NULL) {
2214 			return;
2215 		}
2216 		tmr = &stcb->asoc.shut_guard_timer;
2217 		break;
2218 	case SCTP_TIMER_TYPE_STRRESET:
2219 		if (stcb == NULL) {
2220 			return;
2221 		}
2222 		tmr = &stcb->asoc.strreset_timer;
2223 		break;
2224 	case SCTP_TIMER_TYPE_ASCONF:
2225 		if (stcb == NULL) {
2226 			return;
2227 		}
2228 		tmr = &stcb->asoc.asconf_timer;
2229 		break;
2230 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2231 		if (stcb == NULL) {
2232 			return;
2233 		}
2234 		tmr = &stcb->asoc.autoclose_timer;
2235 		break;
2236 	default:
2237 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2238 		    __FUNCTION__, t_type);
2239 		break;
2240 	};
2241 	if (tmr == NULL) {
2242 		return;
2243 	}
2244 	if ((tmr->type != t_type) && tmr->type) {
2245 		/*
2246 		 * Ok we have a timer that is under joint use. Cookie timer
2247 		 * per chance with the SEND timer. We therefore are NOT
2248 		 * running the timer that the caller wants stopped.  So just
2249 		 * return.
2250 		 */
2251 		return;
2252 	}
2253 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2254 		stcb->asoc.num_send_timers_up--;
2255 		if (stcb->asoc.num_send_timers_up < 0) {
2256 			stcb->asoc.num_send_timers_up = 0;
2257 		}
2258 	}
2259 	tmr->self = NULL;
2260 	tmr->stopped_from = from;
2261 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2262 	return;
2263 }
2264 
2265 #ifdef SCTP_USE_ADLER32
2266 static uint32_t
2267 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2268 {
2269 	uint32_t s1 = adler & 0xffff;
2270 	uint32_t s2 = (adler >> 16) & 0xffff;
2271 	int n;
2272 
2273 	for (n = 0; n < len; n++, buf++) {
2274 		/* s1 = (s1 + buf[n]) % BASE */
2275 		/* first we add */
2276 		s1 = (s1 + *buf);
2277 		/*
2278 		 * now if we need to, we do a mod by subtracting. It seems a
2279 		 * bit faster since I really will only ever do one subtract
2280 		 * at the MOST, since buf[n] is a max of 255.
2281 		 */
2282 		if (s1 >= SCTP_ADLER32_BASE) {
2283 			s1 -= SCTP_ADLER32_BASE;
2284 		}
2285 		/* s2 = (s2 + s1) % BASE */
2286 		/* first we add */
2287 		s2 = (s2 + s1);
2288 		/*
2289 		 * again, it is more efficent (it seems) to subtract since
2290 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2291 		 * worse case. This would then be (2 * BASE) - 2, which will
2292 		 * still only do one subtract. On Intel this is much better
2293 		 * to do this way and avoid the divide. Have not -pg'd on
2294 		 * sparc.
2295 		 */
2296 		if (s2 >= SCTP_ADLER32_BASE) {
2297 			s2 -= SCTP_ADLER32_BASE;
2298 		}
2299 	}
2300 	/* Return the adler32 of the bytes buf[0..len-1] */
2301 	return ((s2 << 16) + s1);
2302 }
2303 
2304 #endif
2305 
2306 
2307 uint32_t
2308 sctp_calculate_len(struct mbuf *m)
2309 {
2310 	uint32_t tlen = 0;
2311 	struct mbuf *at;
2312 
2313 	at = m;
2314 	while (at) {
2315 		tlen += SCTP_BUF_LEN(at);
2316 		at = SCTP_BUF_NEXT(at);
2317 	}
2318 	return (tlen);
2319 }
2320 
2321 #if defined(SCTP_WITH_NO_CSUM)
2322 
2323 uint32_t
2324 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2325 {
2326 	/*
2327 	 * given a mbuf chain with a packetheader offset by 'offset'
2328 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2329 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2330 	 * currently Adler32 but will change to CRC32x soon. Also has a side
2331 	 * bonus calculate the total length of the mbuf chain. Note: if
2332 	 * offset is greater than the total mbuf length, checksum=1,
2333 	 * pktlen=0 is returned (ie. no real error code)
2334 	 */
2335 	if (pktlen == NULL)
2336 		return (0);
2337 	*pktlen = sctp_calculate_len(m);
2338 	return (0);
2339 }
2340 
2341 #elif defined(SCTP_USE_INCHKSUM)
2342 
2343 #include <machine/in_cksum.h>
2344 
2345 uint32_t
2346 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2347 {
2348 	/*
2349 	 * given a mbuf chain with a packetheader offset by 'offset'
2350 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2351 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2352 	 * currently Adler32 but will change to CRC32x soon. Also has a side
2353 	 * bonus calculate the total length of the mbuf chain. Note: if
2354 	 * offset is greater than the total mbuf length, checksum=1,
2355 	 * pktlen=0 is returned (ie. no real error code)
2356 	 */
2357 	int32_t tlen = 0;
2358 	struct mbuf *at;
2359 	uint32_t the_sum, retsum;
2360 
2361 	at = m;
2362 	while (at) {
2363 		tlen += SCTP_BUF_LEN(at);
2364 		at = SCTP_BUF_NEXT(at);
2365 	}
2366 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2367 	if (pktlen != NULL)
2368 		*pktlen = (tlen - offset);
2369 	retsum = htons(the_sum);
2370 	return (the_sum);
2371 }
2372 
2373 #else
2374 
2375 uint32_t
2376 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2377 {
2378 	/*
2379 	 * given a mbuf chain with a packetheader offset by 'offset'
2380 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2381 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2382 	 * currently Adler32 but will change to CRC32x soon. Also has a side
2383 	 * bonus calculate the total length of the mbuf chain. Note: if
2384 	 * offset is greater than the total mbuf length, checksum=1,
2385 	 * pktlen=0 is returned (ie. no real error code)
2386 	 */
2387 	int32_t tlen = 0;
2388 
2389 #ifdef SCTP_USE_ADLER32
2390 	uint32_t base = 1L;
2391 
2392 #else
2393 	uint32_t base = 0xffffffff;
2394 
2395 #endif				/* SCTP_USE_ADLER32 */
2396 	struct mbuf *at;
2397 
2398 	at = m;
2399 	/* find the correct mbuf and offset into mbuf */
2400 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2401 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2402 						 * left */
2403 		at = SCTP_BUF_NEXT(at);
2404 	}
2405 	while (at != NULL) {
2406 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2407 #ifdef SCTP_USE_ADLER32
2408 			base = update_adler32(base,
2409 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2410 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2411 #else
2412 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2413 				/* Use old method if less than 4 bytes */
2414 				base = old_update_crc32(base,
2415 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2416 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2417 			} else {
2418 				base = update_crc32(base,
2419 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2420 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2421 			}
2422 #endif				/* SCTP_USE_ADLER32 */
2423 			tlen += SCTP_BUF_LEN(at) - offset;
2424 			/* we only offset once into the first mbuf */
2425 		}
2426 		if (offset) {
2427 			if (offset < SCTP_BUF_LEN(at))
2428 				offset = 0;
2429 			else
2430 				offset -= SCTP_BUF_LEN(at);
2431 		}
2432 		at = SCTP_BUF_NEXT(at);
2433 	}
2434 	if (pktlen != NULL) {
2435 		*pktlen = tlen;
2436 	}
2437 #ifdef SCTP_USE_ADLER32
2438 	/* Adler32 */
2439 	base = htonl(base);
2440 #else
2441 	/* CRC-32c */
2442 	base = sctp_csum_finalize(base);
2443 #endif
2444 	return (base);
2445 }
2446 
2447 
2448 #endif
2449 
2450 void
2451 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2452     struct sctp_association *asoc, uint32_t mtu)
2453 {
2454 	/*
2455 	 * Reset the P-MTU size on this association, this involves changing
2456 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2457 	 * allow the DF flag to be cleared.
2458 	 */
2459 	struct sctp_tmit_chunk *chk;
2460 	unsigned int eff_mtu, ovh;
2461 
2462 #ifdef SCTP_PRINT_FOR_B_AND_M
2463 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2464 	    inp, asoc, mtu);
2465 #endif
2466 	asoc->smallest_mtu = mtu;
2467 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2468 		ovh = SCTP_MIN_OVERHEAD;
2469 	} else {
2470 		ovh = SCTP_MIN_V4_OVERHEAD;
2471 	}
2472 	eff_mtu = mtu - ovh;
2473 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2474 
2475 		if (chk->send_size > eff_mtu) {
2476 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2477 		}
2478 	}
2479 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2480 		if (chk->send_size > eff_mtu) {
2481 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2482 		}
2483 	}
2484 }
2485 
2486 
2487 /*
2488  * given an association and starting time of the current RTT period return
2489  * RTO in number of msecs net should point to the current network
2490  */
2491 uint32_t
2492 sctp_calculate_rto(struct sctp_tcb *stcb,
2493     struct sctp_association *asoc,
2494     struct sctp_nets *net,
2495     struct timeval *old)
2496 {
2497 	/*
2498 	 * given an association and the starting time of the current RTT
2499 	 * period (in value1/value2) return RTO in number of msecs.
2500 	 */
2501 	int calc_time = 0;
2502 	int o_calctime;
2503 	uint32_t new_rto = 0;
2504 	int first_measure = 0;
2505 	struct timeval now;
2506 
2507 	/************************/
2508 	/* 1. calculate new RTT */
2509 	/************************/
2510 	/* get the current time */
2511 	(void)SCTP_GETTIME_TIMEVAL(&now);
2512 	/* compute the RTT value */
2513 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2514 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2515 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2516 			calc_time += (((u_long)now.tv_usec -
2517 			    (u_long)old->tv_usec) / 1000);
2518 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2519 			/* Borrow 1,000ms from current calculation */
2520 			calc_time -= 1000;
2521 			/* Add in the slop over */
2522 			calc_time += ((int)now.tv_usec / 1000);
2523 			/* Add in the pre-second ms's */
2524 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2525 		}
2526 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2527 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2528 			calc_time = ((u_long)now.tv_usec -
2529 			    (u_long)old->tv_usec) / 1000;
2530 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2531 			/* impossible .. garbage in nothing out */
2532 			goto calc_rto;
2533 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2534 			/*
2535 			 * We have to have 1 usec :-D this must be the
2536 			 * loopback.
2537 			 */
2538 			calc_time = 1;
2539 		} else {
2540 			/* impossible .. garbage in nothing out */
2541 			goto calc_rto;
2542 		}
2543 	} else {
2544 		/* Clock wrapped? */
2545 		goto calc_rto;
2546 	}
2547 	/***************************/
2548 	/* 2. update RTTVAR & SRTT */
2549 	/***************************/
2550 	o_calctime = calc_time;
2551 	/* this is Van Jacobson's integer version */
2552 	if (net->RTO) {
2553 		calc_time -= (net->lastsa >> 3);
2554 #ifdef SCTP_RTTVAR_LOGGING
2555 		rto_logging(net, SCTP_LOG_RTTVAR);
2556 #endif
2557 		net->prev_rtt = o_calctime;
2558 		net->lastsa += calc_time;
2559 		if (calc_time < 0) {
2560 			calc_time = -calc_time;
2561 		}
2562 		calc_time -= (net->lastsv >> 2);
2563 		net->lastsv += calc_time;
2564 		if (net->lastsv == 0) {
2565 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2566 		}
2567 	} else {
2568 		/* First RTO measurment */
2569 		net->lastsa = calc_time;
2570 		net->lastsv = calc_time >> 1;
2571 		first_measure = 1;
2572 		net->prev_rtt = o_calctime;
2573 #ifdef SCTP_RTTVAR_LOGGING
2574 		rto_logging(net, SCTP_LOG_INITIAL_RTT);
2575 #endif
2576 	}
2577 calc_rto:
2578 	new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
2579 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2580 	    (stcb->asoc.sat_network_lockout == 0)) {
2581 		stcb->asoc.sat_network = 1;
2582 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2583 		stcb->asoc.sat_network = 0;
2584 		stcb->asoc.sat_network_lockout = 1;
2585 	}
2586 	/* bound it, per C6/C7 in Section 5.3.1 */
2587 	if (new_rto < stcb->asoc.minrto) {
2588 		new_rto = stcb->asoc.minrto;
2589 	}
2590 	if (new_rto > stcb->asoc.maxrto) {
2591 		new_rto = stcb->asoc.maxrto;
2592 	}
2593 	/* we are now returning the RTO */
2594 	return (new_rto);
2595 }
2596 
2597 /*
2598  * return a pointer to a contiguous piece of data from the given mbuf chain
2599  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2600  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2601  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2602  */
2603 __inline caddr_t
2604 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2605 {
2606 	uint32_t count;
2607 	uint8_t *ptr;
2608 
2609 	ptr = in_ptr;
2610 	if ((off < 0) || (len <= 0))
2611 		return (NULL);
2612 
2613 	/* find the desired start location */
2614 	while ((m != NULL) && (off > 0)) {
2615 		if (off < SCTP_BUF_LEN(m))
2616 			break;
2617 		off -= SCTP_BUF_LEN(m);
2618 		m = SCTP_BUF_NEXT(m);
2619 	}
2620 	if (m == NULL)
2621 		return (NULL);
2622 
2623 	/* is the current mbuf large enough (eg. contiguous)? */
2624 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2625 		return (mtod(m, caddr_t)+off);
2626 	} else {
2627 		/* else, it spans more than one mbuf, so save a temp copy... */
2628 		while ((m != NULL) && (len > 0)) {
2629 			count = min(SCTP_BUF_LEN(m) - off, len);
2630 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2631 			len -= count;
2632 			ptr += count;
2633 			off = 0;
2634 			m = SCTP_BUF_NEXT(m);
2635 		}
2636 		if ((m == NULL) && (len > 0))
2637 			return (NULL);
2638 		else
2639 			return ((caddr_t)in_ptr);
2640 	}
2641 }
2642 
2643 
2644 
2645 struct sctp_paramhdr *
2646 sctp_get_next_param(struct mbuf *m,
2647     int offset,
2648     struct sctp_paramhdr *pull,
2649     int pull_limit)
2650 {
2651 	/* This just provides a typed signature to Peter's Pull routine */
2652 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2653 	    (uint8_t *) pull));
2654 }
2655 
2656 
2657 int
2658 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2659 {
2660 	/*
2661 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2662 	 * padlen is > 3 this routine will fail.
2663 	 */
2664 	uint8_t *dp;
2665 	int i;
2666 
2667 	if (padlen > 3) {
2668 		return (ENOBUFS);
2669 	}
2670 	if (M_TRAILINGSPACE(m)) {
2671 		/*
2672 		 * The easy way. We hope the majority of the time we hit
2673 		 * here :)
2674 		 */
2675 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2676 		SCTP_BUF_LEN(m) += padlen;
2677 	} else {
2678 		/* Hard way we must grow the mbuf */
2679 		struct mbuf *tmp;
2680 
2681 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2682 		if (tmp == NULL) {
2683 			/* Out of space GAK! we are in big trouble. */
2684 			return (ENOSPC);
2685 		}
2686 		/* setup and insert in middle */
2687 		SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2688 		SCTP_BUF_LEN(tmp) = padlen;
2689 		SCTP_BUF_NEXT(m) = tmp;
2690 		dp = mtod(tmp, uint8_t *);
2691 	}
2692 	/* zero out the pad */
2693 	for (i = 0; i < padlen; i++) {
2694 		*dp = 0;
2695 		dp++;
2696 	}
2697 	return (0);
2698 }
2699 
2700 int
2701 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2702 {
2703 	/* find the last mbuf in chain and pad it */
2704 	struct mbuf *m_at;
2705 
2706 	m_at = m;
2707 	if (last_mbuf) {
2708 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2709 	} else {
2710 		while (m_at) {
2711 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2712 				return (sctp_add_pad_tombuf(m_at, padval));
2713 			}
2714 			m_at = SCTP_BUF_NEXT(m_at);
2715 		}
2716 	}
2717 	return (EFAULT);
2718 }
2719 
2720 int sctp_asoc_change_wake = 0;
2721 
2722 static void
2723 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2724     uint32_t error, void *data)
2725 {
2726 	struct mbuf *m_notify;
2727 	struct sctp_assoc_change *sac;
2728 	struct sctp_queued_to_read *control;
2729 
2730 	/*
2731 	 * First if we are are going down dump everything we can to the
2732 	 * socket rcv queue.
2733 	 */
2734 
2735 	if ((stcb == NULL) ||
2736 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2737 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2738 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2739 	    ) {
2740 		/* If the socket is gone we are out of here */
2741 		return;
2742 	}
2743 	/*
2744 	 * For TCP model AND UDP connected sockets we will send an error up
2745 	 * when an ABORT comes in.
2746 	 */
2747 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2748 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2749 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2750 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT)
2751 			stcb->sctp_socket->so_error = ECONNREFUSED;
2752 		else
2753 			stcb->sctp_socket->so_error = ECONNRESET;
2754 		/* Wake ANY sleepers */
2755 		sorwakeup(stcb->sctp_socket);
2756 		sowwakeup(stcb->sctp_socket);
2757 		sctp_asoc_change_wake++;
2758 	}
2759 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2760 		/* event not enabled */
2761 		return;
2762 	}
2763 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2764 	if (m_notify == NULL)
2765 		/* no space left */
2766 		return;
2767 	SCTP_BUF_LEN(m_notify) = 0;
2768 
2769 	sac = mtod(m_notify, struct sctp_assoc_change *);
2770 	sac->sac_type = SCTP_ASSOC_CHANGE;
2771 	sac->sac_flags = 0;
2772 	sac->sac_length = sizeof(struct sctp_assoc_change);
2773 	sac->sac_state = event;
2774 	sac->sac_error = error;
2775 	/* XXX verify these stream counts */
2776 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2777 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2778 	sac->sac_assoc_id = sctp_get_associd(stcb);
2779 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2780 	SCTP_BUF_NEXT(m_notify) = NULL;
2781 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2782 	    0, 0, 0, 0, 0, 0,
2783 	    m_notify);
2784 	if (control == NULL) {
2785 		/* no memory */
2786 		sctp_m_freem(m_notify);
2787 		return;
2788 	}
2789 	control->length = SCTP_BUF_LEN(m_notify);
2790 	/* not that we need this */
2791 	control->tail_mbuf = m_notify;
2792 	control->spec_flags = M_NOTIFICATION;
2793 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2794 	    control,
2795 	    &stcb->sctp_socket->so_rcv, 1);
2796 	if (event == SCTP_COMM_LOST) {
2797 		/* Wake up any sleeper */
2798 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2799 	}
2800 }
2801 
2802 static void
2803 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2804     struct sockaddr *sa, uint32_t error)
2805 {
2806 	struct mbuf *m_notify;
2807 	struct sctp_paddr_change *spc;
2808 	struct sctp_queued_to_read *control;
2809 
2810 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
2811 		/* event not enabled */
2812 		return;
2813 
2814 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2815 	if (m_notify == NULL)
2816 		return;
2817 	SCTP_BUF_LEN(m_notify) = 0;
2818 	spc = mtod(m_notify, struct sctp_paddr_change *);
2819 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2820 	spc->spc_flags = 0;
2821 	spc->spc_length = sizeof(struct sctp_paddr_change);
2822 	if (sa->sa_family == AF_INET) {
2823 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2824 	} else {
2825 		struct sockaddr_in6 *sin6;
2826 
2827 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2828 
2829 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2830 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2831 			if (sin6->sin6_scope_id == 0) {
2832 				/* recover scope_id for user */
2833 				(void)sa6_recoverscope(sin6);
2834 			} else {
2835 				/* clear embedded scope_id for user */
2836 				in6_clearscope(&sin6->sin6_addr);
2837 			}
2838 		}
2839 	}
2840 	spc->spc_state = state;
2841 	spc->spc_error = error;
2842 	spc->spc_assoc_id = sctp_get_associd(stcb);
2843 
2844 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2845 	SCTP_BUF_NEXT(m_notify) = NULL;
2846 
2847 	/* append to socket */
2848 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2849 	    0, 0, 0, 0, 0, 0,
2850 	    m_notify);
2851 	if (control == NULL) {
2852 		/* no memory */
2853 		sctp_m_freem(m_notify);
2854 		return;
2855 	}
2856 	control->length = SCTP_BUF_LEN(m_notify);
2857 	control->spec_flags = M_NOTIFICATION;
2858 	/* not that we need this */
2859 	control->tail_mbuf = m_notify;
2860 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2861 	    control,
2862 	    &stcb->sctp_socket->so_rcv, 1);
2863 }
2864 
2865 
2866 static void
2867 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2868     struct sctp_tmit_chunk *chk)
2869 {
2870 	struct mbuf *m_notify;
2871 	struct sctp_send_failed *ssf;
2872 	struct sctp_queued_to_read *control;
2873 	int length;
2874 
2875 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
2876 		/* event not enabled */
2877 		return;
2878 
2879 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2880 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2881 	if (m_notify == NULL)
2882 		/* no space left */
2883 		return;
2884 	SCTP_BUF_LEN(m_notify) = 0;
2885 	ssf = mtod(m_notify, struct sctp_send_failed *);
2886 	ssf->ssf_type = SCTP_SEND_FAILED;
2887 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2888 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2889 	else
2890 		ssf->ssf_flags = SCTP_DATA_SENT;
2891 	ssf->ssf_length = length;
2892 	ssf->ssf_error = error;
2893 	/* not exactly what the user sent in, but should be close :) */
2894 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2895 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2896 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2897 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2898 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2899 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2900 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2901 	SCTP_BUF_NEXT(m_notify) = chk->data;
2902 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2903 
2904 	/* Steal off the mbuf */
2905 	chk->data = NULL;
2906 	/*
2907 	 * For this case, we check the actual socket buffer, since the assoc
2908 	 * is going away we don't want to overfill the socket buffer for a
2909 	 * non-reader
2910 	 */
2911 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2912 		sctp_m_freem(m_notify);
2913 		return;
2914 	}
2915 	/* append to socket */
2916 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2917 	    0, 0, 0, 0, 0, 0,
2918 	    m_notify);
2919 	if (control == NULL) {
2920 		/* no memory */
2921 		sctp_m_freem(m_notify);
2922 		return;
2923 	}
2924 	control->spec_flags = M_NOTIFICATION;
2925 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2926 	    control,
2927 	    &stcb->sctp_socket->so_rcv, 1);
2928 }
2929 
2930 
2931 static void
2932 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2933     struct sctp_stream_queue_pending *sp)
2934 {
2935 	struct mbuf *m_notify;
2936 	struct sctp_send_failed *ssf;
2937 	struct sctp_queued_to_read *control;
2938 	int length;
2939 
2940 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
2941 		/* event not enabled */
2942 		return;
2943 
2944 	length = sizeof(struct sctp_send_failed) + sp->length;
2945 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = 0;
2950 	ssf = mtod(m_notify, struct sctp_send_failed *);
2951 	ssf->ssf_type = SCTP_SEND_FAILED;
2952 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2953 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2954 	else
2955 		ssf->ssf_flags = SCTP_DATA_SENT;
2956 	ssf->ssf_length = length;
2957 	ssf->ssf_error = error;
2958 	/* not exactly what the user sent in, but should be close :) */
2959 	ssf->ssf_info.sinfo_stream = sp->stream;
2960 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2961 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
2962 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2963 	ssf->ssf_info.sinfo_context = sp->context;
2964 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2965 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2966 	SCTP_BUF_NEXT(m_notify) = sp->data;
2967 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2968 
2969 	/* Steal off the mbuf */
2970 	sp->data = NULL;
2971 	/*
2972 	 * For this case, we check the actual socket buffer, since the assoc
2973 	 * is going away we don't want to overfill the socket buffer for a
2974 	 * non-reader
2975 	 */
2976 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2977 		sctp_m_freem(m_notify);
2978 		return;
2979 	}
2980 	/* append to socket */
2981 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2982 	    0, 0, 0, 0, 0, 0,
2983 	    m_notify);
2984 	if (control == NULL) {
2985 		/* no memory */
2986 		sctp_m_freem(m_notify);
2987 		return;
2988 	}
2989 	control->spec_flags = M_NOTIFICATION;
2990 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2991 	    control,
2992 	    &stcb->sctp_socket->so_rcv, 1);
2993 }
2994 
2995 
2996 
2997 static void
2998 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2999     uint32_t error)
3000 {
3001 	struct mbuf *m_notify;
3002 	struct sctp_adaptation_event *sai;
3003 	struct sctp_queued_to_read *control;
3004 
3005 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3006 		/* event not enabled */
3007 		return;
3008 
3009 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3010 	if (m_notify == NULL)
3011 		/* no space left */
3012 		return;
3013 	SCTP_BUF_LEN(m_notify) = 0;
3014 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3015 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3016 	sai->sai_flags = 0;
3017 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3018 	sai->sai_adaptation_ind = error;
3019 	sai->sai_assoc_id = sctp_get_associd(stcb);
3020 
3021 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3022 	SCTP_BUF_NEXT(m_notify) = NULL;
3023 
3024 	/* append to socket */
3025 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3026 	    0, 0, 0, 0, 0, 0,
3027 	    m_notify);
3028 	if (control == NULL) {
3029 		/* no memory */
3030 		sctp_m_freem(m_notify);
3031 		return;
3032 	}
3033 	control->length = SCTP_BUF_LEN(m_notify);
3034 	control->spec_flags = M_NOTIFICATION;
3035 	/* not that we need this */
3036 	control->tail_mbuf = m_notify;
3037 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3038 	    control,
3039 	    &stcb->sctp_socket->so_rcv, 1);
3040 }
3041 
3042 /* This always must be called with the read-queue LOCKED in the INP */
3043 void
3044 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
3045     uint32_t error, int nolock, uint32_t val)
3046 {
3047 	struct mbuf *m_notify;
3048 	struct sctp_pdapi_event *pdapi;
3049 	struct sctp_queued_to_read *control;
3050 	struct sockbuf *sb;
3051 
3052 	if ((stcb == NULL) || sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3053 		/* event not enabled */
3054 		return;
3055 
3056 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3057 	if (m_notify == NULL)
3058 		/* no space left */
3059 		return;
3060 	SCTP_BUF_LEN(m_notify) = 0;
3061 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3062 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3063 	pdapi->pdapi_flags = 0;
3064 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3065 	pdapi->pdapi_indication = error;
3066 	pdapi->pdapi_stream = (val >> 16);
3067 	pdapi->pdapi_seq = (val & 0x0000ffff);
3068 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3069 
3070 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3071 	SCTP_BUF_NEXT(m_notify) = NULL;
3072 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3073 	    0, 0, 0, 0, 0, 0,
3074 	    m_notify);
3075 	if (control == NULL) {
3076 		/* no memory */
3077 		sctp_m_freem(m_notify);
3078 		return;
3079 	}
3080 	control->spec_flags = M_NOTIFICATION;
3081 	control->length = SCTP_BUF_LEN(m_notify);
3082 	/* not that we need this */
3083 	control->tail_mbuf = m_notify;
3084 	control->held_length = 0;
3085 	control->length = 0;
3086 	if (nolock == 0) {
3087 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3088 	}
3089 	sb = &stcb->sctp_socket->so_rcv;
3090 #ifdef SCTP_SB_LOGGING
3091 	sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3092 #endif
3093 	sctp_sballoc(stcb, sb, m_notify);
3094 #ifdef SCTP_SB_LOGGING
3095 	sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3096 #endif
3097 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3098 	control->end_added = 1;
3099 	if (stcb->asoc.control_pdapi)
3100 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3101 	else {
3102 		/* we really should not see this case */
3103 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3104 	}
3105 	if (nolock == 0) {
3106 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3107 	}
3108 	if (stcb->sctp_ep && stcb->sctp_socket) {
3109 		/* This should always be the case */
3110 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3111 	}
3112 }
3113 
3114 static void
3115 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3116 {
3117 	struct mbuf *m_notify;
3118 	struct sctp_shutdown_event *sse;
3119 	struct sctp_queued_to_read *control;
3120 
3121 	/*
3122 	 * For TCP model AND UDP connected sockets we will send an error up
3123 	 * when an SHUTDOWN completes
3124 	 */
3125 	if (stcb == NULL) {
3126 		return;
3127 	}
3128 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3129 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3130 		/* mark socket closed for read/write and wakeup! */
3131 		socantsendmore(stcb->sctp_socket);
3132 	}
3133 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3134 		/* event not enabled */
3135 		return;
3136 
3137 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3138 	if (m_notify == NULL)
3139 		/* no space left */
3140 		return;
3141 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3142 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3143 	sse->sse_flags = 0;
3144 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3145 	sse->sse_assoc_id = sctp_get_associd(stcb);
3146 
3147 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3148 	SCTP_BUF_NEXT(m_notify) = NULL;
3149 
3150 	/* append to socket */
3151 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3152 	    0, 0, 0, 0, 0, 0,
3153 	    m_notify);
3154 	if (control == NULL) {
3155 		/* no memory */
3156 		sctp_m_freem(m_notify);
3157 		return;
3158 	}
3159 	control->spec_flags = M_NOTIFICATION;
3160 	control->length = SCTP_BUF_LEN(m_notify);
3161 	/* not that we need this */
3162 	control->tail_mbuf = m_notify;
3163 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3164 	    control,
3165 	    &stcb->sctp_socket->so_rcv, 1);
3166 }
3167 
3168 static void
3169 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3170     int number_entries, uint16_t * list, int flag)
3171 {
3172 	struct mbuf *m_notify;
3173 	struct sctp_queued_to_read *control;
3174 	struct sctp_stream_reset_event *strreset;
3175 	int len;
3176 
3177 	if (stcb == NULL) {
3178 		return;
3179 	}
3180 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3181 		/* event not enabled */
3182 		return;
3183 
3184 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3185 	if (m_notify == NULL)
3186 		/* no space left */
3187 		return;
3188 	SCTP_BUF_LEN(m_notify) = 0;
3189 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3190 	if (len > M_TRAILINGSPACE(m_notify)) {
3191 		/* never enough room */
3192 		sctp_m_freem(m_notify);
3193 		return;
3194 	}
3195 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3196 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3197 	if (number_entries == 0) {
3198 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3199 	} else {
3200 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3201 	}
3202 	strreset->strreset_length = len;
3203 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3204 	if (number_entries) {
3205 		int i;
3206 
3207 		for (i = 0; i < number_entries; i++) {
3208 			strreset->strreset_list[i] = ntohs(list[i]);
3209 		}
3210 	}
3211 	SCTP_BUF_LEN(m_notify) = len;
3212 	SCTP_BUF_NEXT(m_notify) = NULL;
3213 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3214 		/* no space */
3215 		sctp_m_freem(m_notify);
3216 		return;
3217 	}
3218 	/* append to socket */
3219 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3220 	    0, 0, 0, 0, 0, 0,
3221 	    m_notify);
3222 	if (control == NULL) {
3223 		/* no memory */
3224 		sctp_m_freem(m_notify);
3225 		return;
3226 	}
3227 	control->spec_flags = M_NOTIFICATION;
3228 	control->length = SCTP_BUF_LEN(m_notify);
3229 	/* not that we need this */
3230 	control->tail_mbuf = m_notify;
3231 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3232 	    control,
3233 	    &stcb->sctp_socket->so_rcv, 1);
3234 }
3235 
3236 
3237 void
3238 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3239     uint32_t error, void *data)
3240 {
3241 	if (stcb == NULL) {
3242 		/* unlikely but */
3243 		return;
3244 	}
3245 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3246 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3247 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3248 	    ) {
3249 		/* No notifications up when we are in a no socket state */
3250 		return;
3251 	}
3252 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3253 		/* Can't send up to a closed socket any notifications */
3254 		return;
3255 	}
3256 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3257 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3258 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3259 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3260 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3261 			/* Don't report these in front states */
3262 			return;
3263 		}
3264 	}
3265 	if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) {
3266 		if ((notification != SCTP_NOTIFY_ASSOC_DOWN) &&
3267 		    (notification != SCTP_NOTIFY_ASSOC_ABORTED) &&
3268 		    (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) &&
3269 		    (notification != SCTP_NOTIFY_DG_FAIL) &&
3270 		    (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) {
3271 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL);
3272 			stcb->asoc.assoc_up_sent = 1;
3273 		}
3274 	}
3275 	switch (notification) {
3276 	case SCTP_NOTIFY_ASSOC_UP:
3277 		if (stcb->asoc.assoc_up_sent == 0) {
3278 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL);
3279 			stcb->asoc.assoc_up_sent = 1;
3280 		}
3281 		break;
3282 	case SCTP_NOTIFY_ASSOC_DOWN:
3283 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL);
3284 		break;
3285 	case SCTP_NOTIFY_INTERFACE_DOWN:
3286 		{
3287 			struct sctp_nets *net;
3288 
3289 			net = (struct sctp_nets *)data;
3290 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3291 			    (struct sockaddr *)&net->ro._l_addr, error);
3292 			break;
3293 		}
3294 	case SCTP_NOTIFY_INTERFACE_UP:
3295 		{
3296 			struct sctp_nets *net;
3297 
3298 			net = (struct sctp_nets *)data;
3299 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3300 			    (struct sockaddr *)&net->ro._l_addr, error);
3301 			break;
3302 		}
3303 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3304 		{
3305 			struct sctp_nets *net;
3306 
3307 			net = (struct sctp_nets *)data;
3308 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3309 			    (struct sockaddr *)&net->ro._l_addr, error);
3310 			break;
3311 		}
3312 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3313 		sctp_notify_send_failed2(stcb, error,
3314 		    (struct sctp_stream_queue_pending *)data);
3315 		break;
3316 	case SCTP_NOTIFY_DG_FAIL:
3317 		sctp_notify_send_failed(stcb, error,
3318 		    (struct sctp_tmit_chunk *)data);
3319 		break;
3320 	case SCTP_NOTIFY_ADAPTATION_INDICATION:
3321 		/* Here the error is the adaptation indication */
3322 		sctp_notify_adaptation_layer(stcb, error);
3323 		break;
3324 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3325 		{
3326 			uint32_t val;
3327 
3328 			val = *((uint32_t *) data);
3329 
3330 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3331 		}
3332 		break;
3333 	case SCTP_NOTIFY_STRDATA_ERR:
3334 		break;
3335 	case SCTP_NOTIFY_ASSOC_ABORTED:
3336 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3337 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3338 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL);
3339 		} else {
3340 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL);
3341 		}
3342 		break;
3343 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3344 		break;
3345 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3346 		break;
3347 	case SCTP_NOTIFY_ASSOC_RESTART:
3348 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data);
3349 		break;
3350 	case SCTP_NOTIFY_HB_RESP:
3351 		break;
3352 	case SCTP_NOTIFY_STR_RESET_SEND:
3353 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3354 		break;
3355 	case SCTP_NOTIFY_STR_RESET_RECV:
3356 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3357 		break;
3358 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3359 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3360 		break;
3361 
3362 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3363 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3364 		break;
3365 
3366 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3367 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3368 		    error);
3369 		break;
3370 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3371 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3372 		    error);
3373 		break;
3374 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3375 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3376 		    error);
3377 		break;
3378 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3379 		break;
3380 	case SCTP_NOTIFY_ASCONF_FAILED:
3381 		break;
3382 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3383 		sctp_notify_shutdown_event(stcb);
3384 		break;
3385 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3386 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3387 		    (uint16_t) (uintptr_t) data);
3388 		break;
3389 #if 0
3390 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3391 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3392 		    error, (uint16_t) (uintptr_t) data);
3393 		break;
3394 #endif				/* not yet? remove? */
3395 
3396 
3397 	default:
3398 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3399 		    __FUNCTION__, notification, notification);
3400 		break;
3401 	}			/* end switch */
3402 }
3403 
3404 void
3405 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock)
3406 {
3407 	struct sctp_association *asoc;
3408 	struct sctp_stream_out *outs;
3409 	struct sctp_tmit_chunk *chk;
3410 	struct sctp_stream_queue_pending *sp;
3411 	int i;
3412 
3413 	asoc = &stcb->asoc;
3414 
3415 	if (stcb == NULL) {
3416 		return;
3417 	}
3418 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3419 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3420 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3421 		return;
3422 	}
3423 	/* now through all the gunk freeing chunks */
3424 	if (holds_lock == 0) {
3425 		SCTP_TCB_SEND_LOCK(stcb);
3426 	}
3427 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3428 		/* For each stream */
3429 		outs = &stcb->asoc.strmout[i];
3430 		/* clean up any sends there */
3431 		stcb->asoc.locked_on_sending = NULL;
3432 		sp = TAILQ_FIRST(&outs->outqueue);
3433 		while (sp) {
3434 			stcb->asoc.stream_queue_cnt--;
3435 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3436 			sctp_free_spbufspace(stcb, asoc, sp);
3437 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3438 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp);
3439 			if (sp->data) {
3440 				sctp_m_freem(sp->data);
3441 				sp->data = NULL;
3442 			}
3443 			if (sp->net)
3444 				sctp_free_remote_addr(sp->net);
3445 			sp->net = NULL;
3446 			/* Free the chunk */
3447 			sctp_free_a_strmoq(stcb, sp);
3448 			/* sa_ignore FREED_MEMORY */
3449 			sp = TAILQ_FIRST(&outs->outqueue);
3450 		}
3451 	}
3452 
3453 	/* pending send queue SHOULD be empty */
3454 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3455 		chk = TAILQ_FIRST(&asoc->send_queue);
3456 		while (chk) {
3457 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3458 			asoc->send_queue_cnt--;
3459 			if (chk->data) {
3460 				/*
3461 				 * trim off the sctp chunk header(it should
3462 				 * be there)
3463 				 */
3464 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3465 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3466 					sctp_mbuf_crush(chk->data);
3467 				}
3468 			}
3469 			sctp_free_bufspace(stcb, asoc, chk, 1);
3470 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
3471 			if (chk->data) {
3472 				sctp_m_freem(chk->data);
3473 				chk->data = NULL;
3474 			}
3475 			if (chk->whoTo)
3476 				sctp_free_remote_addr(chk->whoTo);
3477 			chk->whoTo = NULL;
3478 			sctp_free_a_chunk(stcb, chk);
3479 			/* sa_ignore FREED_MEMORY */
3480 			chk = TAILQ_FIRST(&asoc->send_queue);
3481 		}
3482 	}
3483 	/* sent queue SHOULD be empty */
3484 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3485 		chk = TAILQ_FIRST(&asoc->sent_queue);
3486 		while (chk) {
3487 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3488 			asoc->sent_queue_cnt--;
3489 			if (chk->data) {
3490 				/*
3491 				 * trim off the sctp chunk header(it should
3492 				 * be there)
3493 				 */
3494 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3495 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3496 					sctp_mbuf_crush(chk->data);
3497 				}
3498 			}
3499 			sctp_free_bufspace(stcb, asoc, chk, 1);
3500 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3501 			    SCTP_NOTIFY_DATAGRAM_SENT, chk);
3502 			if (chk->data) {
3503 				sctp_m_freem(chk->data);
3504 				chk->data = NULL;
3505 			}
3506 			if (chk->whoTo)
3507 				sctp_free_remote_addr(chk->whoTo);
3508 			chk->whoTo = NULL;
3509 			sctp_free_a_chunk(stcb, chk);
3510 			/* sa_ignore FREED_MEMORY */
3511 			chk = TAILQ_FIRST(&asoc->sent_queue);
3512 		}
3513 	}
3514 	if (holds_lock == 0) {
3515 		SCTP_TCB_SEND_UNLOCK(stcb);
3516 	}
3517 }
3518 
3519 void
3520 sctp_abort_notification(struct sctp_tcb *stcb, int error)
3521 {
3522 
3523 	if (stcb == NULL) {
3524 		return;
3525 	}
3526 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3527 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3528 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3529 		return;
3530 	}
3531 	/* Tell them we lost the asoc */
3532 	sctp_report_all_outbound(stcb, 1);
3533 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3534 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3535 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3536 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3537 	}
3538 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
3539 }
3540 
3541 void
3542 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3543     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3544     uint32_t vrf_id, uint32_t table_id)
3545 {
3546 	uint32_t vtag;
3547 
3548 	vtag = 0;
3549 	if (stcb != NULL) {
3550 		/* We have a TCB to abort, send notification too */
3551 		vtag = stcb->asoc.peer_vtag;
3552 		sctp_abort_notification(stcb, 0);
3553 		/* get the assoc vrf id and table id */
3554 		vrf_id = stcb->asoc.vrf_id;
3555 		table_id = stcb->asoc.table_id;
3556 	}
3557 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, table_id);
3558 	if (stcb != NULL) {
3559 		/* Ok, now lets free it */
3560 		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3561 	} else {
3562 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3563 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3564 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3565 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3566 			}
3567 		}
3568 	}
3569 }
3570 
3571 #ifdef SCTP_ASOCLOG_OF_TSNS
3572 void
3573 sctp_print_out_track_log(struct sctp_tcb *stcb)
3574 {
3575 	int i;
3576 
3577 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3578 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3579 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3580 		SCTP_PRINTF("None rcvd\n");
3581 		goto none_in;
3582 	}
3583 	if (stcb->asoc.tsn_in_wrapped) {
3584 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3585 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3586 			    stcb->asoc.in_tsnlog[i].tsn,
3587 			    stcb->asoc.in_tsnlog[i].strm,
3588 			    stcb->asoc.in_tsnlog[i].seq,
3589 			    stcb->asoc.in_tsnlog[i].flgs,
3590 			    stcb->asoc.in_tsnlog[i].sz);
3591 		}
3592 	}
3593 	if (stcb->asoc.tsn_in_at) {
3594 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3595 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3596 			    stcb->asoc.in_tsnlog[i].tsn,
3597 			    stcb->asoc.in_tsnlog[i].strm,
3598 			    stcb->asoc.in_tsnlog[i].seq,
3599 			    stcb->asoc.in_tsnlog[i].flgs,
3600 			    stcb->asoc.in_tsnlog[i].sz);
3601 		}
3602 	}
3603 none_in:
3604 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3605 	if ((stcb->asoc.tsn_out_at == 0) &&
3606 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3607 		SCTP_PRINTF("None sent\n");
3608 	}
3609 	if (stcb->asoc.tsn_out_wrapped) {
3610 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3611 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3612 			    stcb->asoc.out_tsnlog[i].tsn,
3613 			    stcb->asoc.out_tsnlog[i].strm,
3614 			    stcb->asoc.out_tsnlog[i].seq,
3615 			    stcb->asoc.out_tsnlog[i].flgs,
3616 			    stcb->asoc.out_tsnlog[i].sz);
3617 		}
3618 	}
3619 	if (stcb->asoc.tsn_out_at) {
3620 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3621 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3622 			    stcb->asoc.out_tsnlog[i].tsn,
3623 			    stcb->asoc.out_tsnlog[i].strm,
3624 			    stcb->asoc.out_tsnlog[i].seq,
3625 			    stcb->asoc.out_tsnlog[i].flgs,
3626 			    stcb->asoc.out_tsnlog[i].sz);
3627 		}
3628 	}
3629 }
3630 
3631 #endif
3632 
3633 void
3634 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3635     int error, struct mbuf *op_err)
3636 {
3637 	uint32_t vtag;
3638 
3639 	if (stcb == NULL) {
3640 		/* Got to have a TCB */
3641 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3642 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3643 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3644 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3645 			}
3646 		}
3647 		return;
3648 	}
3649 	vtag = stcb->asoc.peer_vtag;
3650 	/* notify the ulp */
3651 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3652 		sctp_abort_notification(stcb, error);
3653 	/* notify the peer */
3654 	sctp_send_abort_tcb(stcb, op_err);
3655 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3656 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3657 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3658 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3659 	}
3660 	/* now free the asoc */
3661 #ifdef SCTP_ASOCLOG_OF_TSNS
3662 	sctp_print_out_track_log(stcb);
3663 #endif
3664 	sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3665 }
3666 
3667 void
3668 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3669     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id,
3670     uint32_t table_id)
3671 {
3672 	struct sctp_chunkhdr *ch, chunk_buf;
3673 	unsigned int chk_length;
3674 
3675 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3676 	/* Generate a TO address for future reference */
3677 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3678 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3679 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3680 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3681 		}
3682 	}
3683 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3684 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3685 	while (ch != NULL) {
3686 		chk_length = ntohs(ch->chunk_length);
3687 		if (chk_length < sizeof(*ch)) {
3688 			/* break to abort land */
3689 			break;
3690 		}
3691 		switch (ch->chunk_type) {
3692 		case SCTP_PACKET_DROPPED:
3693 			/* we don't respond to pkt-dropped */
3694 			return;
3695 		case SCTP_ABORT_ASSOCIATION:
3696 			/* we don't respond with an ABORT to an ABORT */
3697 			return;
3698 		case SCTP_SHUTDOWN_COMPLETE:
3699 			/*
3700 			 * we ignore it since we are not waiting for it and
3701 			 * peer is gone
3702 			 */
3703 			return;
3704 		case SCTP_SHUTDOWN_ACK:
3705 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id,
3706 			    table_id);
3707 			return;
3708 		default:
3709 			break;
3710 		}
3711 		offset += SCTP_SIZE32(chk_length);
3712 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3713 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3714 	}
3715 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, table_id);
3716 }
3717 
3718 /*
3719  * check the inbound datagram to make sure there is not an abort inside it,
3720  * if there is return 1, else return 0.
3721  */
3722 int
3723 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3724 {
3725 	struct sctp_chunkhdr *ch;
3726 	struct sctp_init_chunk *init_chk, chunk_buf;
3727 	int offset;
3728 	unsigned int chk_length;
3729 
3730 	offset = iphlen + sizeof(struct sctphdr);
3731 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3732 	    (uint8_t *) & chunk_buf);
3733 	while (ch != NULL) {
3734 		chk_length = ntohs(ch->chunk_length);
3735 		if (chk_length < sizeof(*ch)) {
3736 			/* packet is probably corrupt */
3737 			break;
3738 		}
3739 		/* we seem to be ok, is it an abort? */
3740 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3741 			/* yep, tell them */
3742 			return (1);
3743 		}
3744 		if (ch->chunk_type == SCTP_INITIATION) {
3745 			/* need to update the Vtag */
3746 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3747 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3748 			if (init_chk != NULL) {
3749 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3750 			}
3751 		}
3752 		/* Nope, move to the next chunk */
3753 		offset += SCTP_SIZE32(chk_length);
3754 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3755 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3756 	}
3757 	return (0);
3758 }
3759 
3760 /*
3761  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3762  * set (i.e. it's 0) so, create this function to compare link local scopes
3763  */
3764 uint32_t
3765 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3766 {
3767 	struct sockaddr_in6 a, b;
3768 
3769 	/* save copies */
3770 	a = *addr1;
3771 	b = *addr2;
3772 
3773 	if (a.sin6_scope_id == 0)
3774 		if (sa6_recoverscope(&a)) {
3775 			/* can't get scope, so can't match */
3776 			return (0);
3777 		}
3778 	if (b.sin6_scope_id == 0)
3779 		if (sa6_recoverscope(&b)) {
3780 			/* can't get scope, so can't match */
3781 			return (0);
3782 		}
3783 	if (a.sin6_scope_id != b.sin6_scope_id)
3784 		return (0);
3785 
3786 	return (1);
3787 }
3788 
3789 /*
3790  * returns a sockaddr_in6 with embedded scope recovered and removed
3791  */
3792 struct sockaddr_in6 *
3793 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3794 {
3795 	/* check and strip embedded scope junk */
3796 	if (addr->sin6_family == AF_INET6) {
3797 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3798 			if (addr->sin6_scope_id == 0) {
3799 				*store = *addr;
3800 				if (!sa6_recoverscope(store)) {
3801 					/* use the recovered scope */
3802 					addr = store;
3803 				}
3804 			} else {
3805 				/* else, return the original "to" addr */
3806 				in6_clearscope(&addr->sin6_addr);
3807 			}
3808 		}
3809 	}
3810 	return (addr);
3811 }
3812 
3813 /*
3814  * are the two addresses the same?  currently a "scopeless" check returns: 1
3815  * if same, 0 if not
3816  */
3817 __inline int
3818 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3819 {
3820 
3821 	/* must be valid */
3822 	if (sa1 == NULL || sa2 == NULL)
3823 		return (0);
3824 
3825 	/* must be the same family */
3826 	if (sa1->sa_family != sa2->sa_family)
3827 		return (0);
3828 
3829 	if (sa1->sa_family == AF_INET6) {
3830 		/* IPv6 addresses */
3831 		struct sockaddr_in6 *sin6_1, *sin6_2;
3832 
3833 		sin6_1 = (struct sockaddr_in6 *)sa1;
3834 		sin6_2 = (struct sockaddr_in6 *)sa2;
3835 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3836 		    &sin6_2->sin6_addr));
3837 	} else if (sa1->sa_family == AF_INET) {
3838 		/* IPv4 addresses */
3839 		struct sockaddr_in *sin_1, *sin_2;
3840 
3841 		sin_1 = (struct sockaddr_in *)sa1;
3842 		sin_2 = (struct sockaddr_in *)sa2;
3843 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3844 	} else {
3845 		/* we don't do these... */
3846 		return (0);
3847 	}
3848 }
3849 
3850 void
3851 sctp_print_address(struct sockaddr *sa)
3852 {
3853 	char ip6buf[INET6_ADDRSTRLEN];
3854 
3855 	ip6buf[0] = 0;
3856 	if (sa->sa_family == AF_INET6) {
3857 		struct sockaddr_in6 *sin6;
3858 
3859 		sin6 = (struct sockaddr_in6 *)sa;
3860 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
3861 		    ip6_sprintf(ip6buf, &sin6->sin6_addr),
3862 		    ntohs(sin6->sin6_port),
3863 		    sin6->sin6_scope_id);
3864 	} else if (sa->sa_family == AF_INET) {
3865 		struct sockaddr_in *sin;
3866 		unsigned char *p;
3867 
3868 		sin = (struct sockaddr_in *)sa;
3869 		p = (unsigned char *)&sin->sin_addr;
3870 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
3871 		    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
3872 	} else {
3873 		SCTP_PRINTF("?\n");
3874 	}
3875 }
3876 
3877 void
3878 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3879 {
3880 	if (iph->ip_v == IPVERSION) {
3881 		struct sockaddr_in lsa, fsa;
3882 
3883 		bzero(&lsa, sizeof(lsa));
3884 		lsa.sin_len = sizeof(lsa);
3885 		lsa.sin_family = AF_INET;
3886 		lsa.sin_addr = iph->ip_src;
3887 		lsa.sin_port = sh->src_port;
3888 		bzero(&fsa, sizeof(fsa));
3889 		fsa.sin_len = sizeof(fsa);
3890 		fsa.sin_family = AF_INET;
3891 		fsa.sin_addr = iph->ip_dst;
3892 		fsa.sin_port = sh->dest_port;
3893 		SCTP_PRINTF("src: ");
3894 		sctp_print_address((struct sockaddr *)&lsa);
3895 		SCTP_PRINTF("dest: ");
3896 		sctp_print_address((struct sockaddr *)&fsa);
3897 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3898 		struct ip6_hdr *ip6;
3899 		struct sockaddr_in6 lsa6, fsa6;
3900 
3901 		ip6 = (struct ip6_hdr *)iph;
3902 		bzero(&lsa6, sizeof(lsa6));
3903 		lsa6.sin6_len = sizeof(lsa6);
3904 		lsa6.sin6_family = AF_INET6;
3905 		lsa6.sin6_addr = ip6->ip6_src;
3906 		lsa6.sin6_port = sh->src_port;
3907 		bzero(&fsa6, sizeof(fsa6));
3908 		fsa6.sin6_len = sizeof(fsa6);
3909 		fsa6.sin6_family = AF_INET6;
3910 		fsa6.sin6_addr = ip6->ip6_dst;
3911 		fsa6.sin6_port = sh->dest_port;
3912 		SCTP_PRINTF("src: ");
3913 		sctp_print_address((struct sockaddr *)&lsa6);
3914 		SCTP_PRINTF("dest: ");
3915 		sctp_print_address((struct sockaddr *)&fsa6);
3916 	}
3917 }
3918 
3919 void
3920 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
3921     struct sctp_inpcb *new_inp,
3922     struct sctp_tcb *stcb,
3923     int waitflags)
3924 {
3925 	/*
3926 	 * go through our old INP and pull off any control structures that
3927 	 * belong to stcb and move then to the new inp.
3928 	 */
3929 	struct socket *old_so, *new_so;
3930 	struct sctp_queued_to_read *control, *nctl;
3931 	struct sctp_readhead tmp_queue;
3932 	struct mbuf *m;
3933 	int error = 0;
3934 
3935 	old_so = old_inp->sctp_socket;
3936 	new_so = new_inp->sctp_socket;
3937 	TAILQ_INIT(&tmp_queue);
3938 	error = sblock(&old_so->so_rcv, waitflags);
3939 	if (error) {
3940 		/*
3941 		 * Gak, can't get sblock, we have a problem. data will be
3942 		 * left stranded.. and we don't dare look at it since the
3943 		 * other thread may be reading something. Oh well, its a
3944 		 * screwed up app that does a peeloff OR a accept while
3945 		 * reading from the main socket... actually its only the
3946 		 * peeloff() case, since I think read will fail on a
3947 		 * listening socket..
3948 		 */
3949 		return;
3950 	}
3951 	/* lock the socket buffers */
3952 	SCTP_INP_READ_LOCK(old_inp);
3953 	control = TAILQ_FIRST(&old_inp->read_queue);
3954 	/* Pull off all for out target stcb */
3955 	while (control) {
3956 		nctl = TAILQ_NEXT(control, next);
3957 		if (control->stcb == stcb) {
3958 			/* remove it we want it */
3959 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
3960 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
3961 			m = control->data;
3962 			while (m) {
3963 #ifdef SCTP_SB_LOGGING
3964 				sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
3965 #endif
3966 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
3967 #ifdef SCTP_SB_LOGGING
3968 				sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3969 #endif
3970 				m = SCTP_BUF_NEXT(m);
3971 			}
3972 		}
3973 		control = nctl;
3974 	}
3975 	SCTP_INP_READ_UNLOCK(old_inp);
3976 	/* Remove the sb-lock on the old socket */
3977 
3978 	sbunlock(&old_so->so_rcv);
3979 	/* Now we move them over to the new socket buffer */
3980 	control = TAILQ_FIRST(&tmp_queue);
3981 	SCTP_INP_READ_LOCK(new_inp);
3982 	while (control) {
3983 		nctl = TAILQ_NEXT(control, next);
3984 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
3985 		m = control->data;
3986 		while (m) {
3987 #ifdef SCTP_SB_LOGGING
3988 			sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
3989 #endif
3990 			sctp_sballoc(stcb, &new_so->so_rcv, m);
3991 #ifdef SCTP_SB_LOGGING
3992 			sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3993 #endif
3994 			m = SCTP_BUF_NEXT(m);
3995 		}
3996 		control = nctl;
3997 	}
3998 	SCTP_INP_READ_UNLOCK(new_inp);
3999 }
4000 
4001 
4002 void
4003 sctp_add_to_readq(struct sctp_inpcb *inp,
4004     struct sctp_tcb *stcb,
4005     struct sctp_queued_to_read *control,
4006     struct sockbuf *sb,
4007     int end)
4008 {
4009 	/*
4010 	 * Here we must place the control on the end of the socket read
4011 	 * queue AND increment sb_cc so that select will work properly on
4012 	 * read.
4013 	 */
4014 	struct mbuf *m, *prev = NULL;
4015 
4016 	if (inp == NULL) {
4017 		/* Gak, TSNH!! */
4018 #ifdef INVARIANTS
4019 		panic("Gak, inp NULL on add_to_readq");
4020 #endif
4021 		return;
4022 	}
4023 	SCTP_INP_READ_LOCK(inp);
4024 	if (!(control->spec_flags & M_NOTIFICATION)) {
4025 		atomic_add_int(&inp->total_recvs, 1);
4026 		if (!control->do_not_ref_stcb) {
4027 			atomic_add_int(&stcb->total_recvs, 1);
4028 		}
4029 	}
4030 	m = control->data;
4031 	control->held_length = 0;
4032 	control->length = 0;
4033 	while (m) {
4034 		if (SCTP_BUF_LEN(m) == 0) {
4035 			/* Skip mbufs with NO length */
4036 			if (prev == NULL) {
4037 				/* First one */
4038 				control->data = sctp_m_free(m);
4039 				m = control->data;
4040 			} else {
4041 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4042 				m = SCTP_BUF_NEXT(prev);
4043 			}
4044 			if (m == NULL) {
4045 				control->tail_mbuf = prev;;
4046 			}
4047 			continue;
4048 		}
4049 		prev = m;
4050 #ifdef SCTP_SB_LOGGING
4051 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4052 #endif
4053 		sctp_sballoc(stcb, sb, m);
4054 #ifdef SCTP_SB_LOGGING
4055 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4056 #endif
4057 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4058 		m = SCTP_BUF_NEXT(m);
4059 	}
4060 	if (prev != NULL) {
4061 		control->tail_mbuf = prev;
4062 	} else {
4063 		/* Everything got collapsed out?? */
4064 		return;
4065 	}
4066 	if (end) {
4067 		control->end_added = 1;
4068 	}
4069 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4070 	SCTP_INP_READ_UNLOCK(inp);
4071 	if (inp && inp->sctp_socket) {
4072 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4073 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4074 		} else
4075 			sctp_sorwakeup(inp, inp->sctp_socket);
4076 	}
4077 }
4078 
4079 
4080 int
4081 sctp_append_to_readq(struct sctp_inpcb *inp,
4082     struct sctp_tcb *stcb,
4083     struct sctp_queued_to_read *control,
4084     struct mbuf *m,
4085     int end,
4086     int ctls_cumack,
4087     struct sockbuf *sb)
4088 {
4089 	/*
4090 	 * A partial delivery API event is underway. OR we are appending on
4091 	 * the reassembly queue.
4092 	 *
4093 	 * If PDAPI this means we need to add m to the end of the data.
4094 	 * Increase the length in the control AND increment the sb_cc.
4095 	 * Otherwise sb is NULL and all we need to do is put it at the end
4096 	 * of the mbuf chain.
4097 	 */
4098 	int len = 0;
4099 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4100 
4101 	if (inp) {
4102 		SCTP_INP_READ_LOCK(inp);
4103 	}
4104 	if (control == NULL) {
4105 get_out:
4106 		if (inp) {
4107 			SCTP_INP_READ_UNLOCK(inp);
4108 		}
4109 		return (-1);
4110 	}
4111 	if (control->end_added) {
4112 		/* huh this one is complete? */
4113 		goto get_out;
4114 	}
4115 	mm = m;
4116 	if (mm == NULL) {
4117 		goto get_out;
4118 	}
4119 	while (mm) {
4120 		if (SCTP_BUF_LEN(mm) == 0) {
4121 			/* Skip mbufs with NO lenght */
4122 			if (prev == NULL) {
4123 				/* First one */
4124 				m = sctp_m_free(mm);
4125 				mm = m;
4126 			} else {
4127 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4128 				mm = SCTP_BUF_NEXT(prev);
4129 			}
4130 			continue;
4131 		}
4132 		prev = mm;
4133 		len += SCTP_BUF_LEN(mm);
4134 		if (sb) {
4135 #ifdef SCTP_SB_LOGGING
4136 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4137 #endif
4138 			sctp_sballoc(stcb, sb, mm);
4139 #ifdef SCTP_SB_LOGGING
4140 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4141 #endif
4142 		}
4143 		mm = SCTP_BUF_NEXT(mm);
4144 	}
4145 	if (prev) {
4146 		tail = prev;
4147 	} else {
4148 		/* Really there should always be a prev */
4149 		if (m == NULL) {
4150 			/* Huh nothing left? */
4151 #ifdef INVARIANTS
4152 			panic("Nothing left to add?");
4153 #else
4154 			goto get_out;
4155 #endif
4156 		}
4157 		tail = m;
4158 	}
4159 	if (end) {
4160 		/* message is complete */
4161 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4162 			stcb->asoc.control_pdapi = NULL;
4163 		}
4164 		control->held_length = 0;
4165 		control->end_added = 1;
4166 	}
4167 	atomic_add_int(&control->length, len);
4168 	if (control->tail_mbuf) {
4169 		/* append */
4170 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4171 		control->tail_mbuf = tail;
4172 	} else {
4173 		/* nothing there */
4174 #ifdef INVARIANTS
4175 		if (control->data != NULL) {
4176 			panic("This should NOT happen");
4177 		}
4178 #endif
4179 		control->data = m;
4180 		control->tail_mbuf = tail;
4181 	}
4182 	if (stcb == NULL) {
4183 		control->do_not_ref_stcb = 1;
4184 	}
4185 	/*
4186 	 * When we are appending in partial delivery, the cum-ack is used
4187 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4188 	 * is populated in the outbound sinfo structure from the true cumack
4189 	 * if the association exists...
4190 	 */
4191 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4192 	if (inp) {
4193 		SCTP_INP_READ_UNLOCK(inp);
4194 	}
4195 	if (inp && inp->sctp_socket) {
4196 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4197 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4198 		} else
4199 			sctp_sorwakeup(inp, inp->sctp_socket);
4200 	}
4201 	return (0);
4202 }
4203 
4204 
4205 
4206 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4207  *************ALTERNATE ROUTING CODE
4208  */
4209 
4210 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4211  *************ALTERNATE ROUTING CODE
4212  */
4213 
4214 struct mbuf *
4215 sctp_generate_invmanparam(int err)
4216 {
4217 	/* Return a MBUF with a invalid mandatory parameter */
4218 	struct mbuf *m;
4219 
4220 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4221 	if (m) {
4222 		struct sctp_paramhdr *ph;
4223 
4224 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4225 		ph = mtod(m, struct sctp_paramhdr *);
4226 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4227 		ph->param_type = htons(err);
4228 	}
4229 	return (m);
4230 }
4231 
4232 #ifdef SCTP_MBCNT_LOGGING
4233 void
4234 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4235     struct sctp_tmit_chunk *tp1, int chk_cnt)
4236 {
4237 	if (tp1->data == NULL) {
4238 		return;
4239 	}
4240 	asoc->chunks_on_out_queue -= chk_cnt;
4241 	sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4242 	    asoc->total_output_queue_size,
4243 	    tp1->book_size,
4244 	    0,
4245 	    tp1->mbcnt);
4246 	if (asoc->total_output_queue_size >= tp1->book_size) {
4247 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4248 	} else {
4249 		asoc->total_output_queue_size = 0;
4250 	}
4251 
4252 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4253 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4254 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4255 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4256 		} else {
4257 			stcb->sctp_socket->so_snd.sb_cc = 0;
4258 
4259 		}
4260 	}
4261 }
4262 
4263 #endif
4264 
4265 int
4266 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4267     int reason, struct sctpchunk_listhead *queue)
4268 {
4269 	int ret_sz = 0;
4270 	int notdone;
4271 	uint8_t foundeom = 0;
4272 
4273 	do {
4274 		ret_sz += tp1->book_size;
4275 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4276 		if (tp1->data) {
4277 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4278 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
4279 			sctp_m_freem(tp1->data);
4280 			tp1->data = NULL;
4281 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4282 		}
4283 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4284 			stcb->asoc.sent_queue_cnt_removeable--;
4285 		}
4286 		if (queue == &stcb->asoc.send_queue) {
4287 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4288 			/* on to the sent queue */
4289 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4290 			    sctp_next);
4291 			stcb->asoc.sent_queue_cnt++;
4292 		}
4293 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4294 		    SCTP_DATA_NOT_FRAG) {
4295 			/* not frag'ed we ae done   */
4296 			notdone = 0;
4297 			foundeom = 1;
4298 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4299 			/* end of frag, we are done */
4300 			notdone = 0;
4301 			foundeom = 1;
4302 		} else {
4303 			/*
4304 			 * Its a begin or middle piece, we must mark all of
4305 			 * it
4306 			 */
4307 			notdone = 1;
4308 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4309 		}
4310 	} while (tp1 && notdone);
4311 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4312 		/*
4313 		 * The multi-part message was scattered across the send and
4314 		 * sent queue.
4315 		 */
4316 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4317 		/*
4318 		 * recurse throught the send_queue too, starting at the
4319 		 * beginning.
4320 		 */
4321 		if (tp1) {
4322 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4323 			    &stcb->asoc.send_queue);
4324 		} else {
4325 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4326 		}
4327 	}
4328 	return (ret_sz);
4329 }
4330 
4331 /*
4332  * checks to see if the given address, sa, is one that is currently known by
4333  * the kernel note: can't distinguish the same address on multiple interfaces
4334  * and doesn't handle multiple addresses with different zone/scope id's note:
4335  * ifa_ifwithaddr() compares the entire sockaddr struct
4336  */
4337 struct sctp_ifa *
4338 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int holds_lock)
4339 {
4340 	struct sctp_laddr *laddr;
4341 
4342 	if (holds_lock == 0) {
4343 		SCTP_INP_RLOCK(inp);
4344 	}
4345 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4346 		if (laddr->ifa == NULL)
4347 			continue;
4348 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4349 			continue;
4350 		if (addr->sa_family == AF_INET) {
4351 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4352 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4353 				/* found him. */
4354 				if (holds_lock == 0) {
4355 					SCTP_INP_RUNLOCK(inp);
4356 				}
4357 				return (laddr->ifa);
4358 				break;
4359 			}
4360 		} else if (addr->sa_family == AF_INET6) {
4361 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4362 			    &laddr->ifa->address.sin6.sin6_addr)) {
4363 				/* found him. */
4364 				if (holds_lock == 0) {
4365 					SCTP_INP_RUNLOCK(inp);
4366 				}
4367 				return (laddr->ifa);
4368 				break;
4369 			}
4370 		}
4371 	}
4372 	if (holds_lock == 0) {
4373 		SCTP_INP_RUNLOCK(inp);
4374 	}
4375 	return (NULL);
4376 }
4377 
4378 uint32_t
4379 sctp_get_ifa_hash_val(struct sockaddr *addr)
4380 {
4381 
4382 	if (addr->sa_family == AF_INET) {
4383 		struct sockaddr_in *sin;
4384 
4385 		sin = (struct sockaddr_in *)addr;
4386 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4387 	} else if (addr->sa_family == AF_INET6) {
4388 		struct sockaddr_in6 *sin6;
4389 		uint32_t hash_of_addr;
4390 
4391 		sin6 = (struct sockaddr_in6 *)addr;
4392 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4393 		    sin6->sin6_addr.s6_addr32[1] +
4394 		    sin6->sin6_addr.s6_addr32[2] +
4395 		    sin6->sin6_addr.s6_addr32[3]);
4396 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4397 		return (hash_of_addr);
4398 	}
4399 	return (0);
4400 }
4401 
4402 struct sctp_ifa *
4403 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4404 {
4405 	struct sctp_ifa *sctp_ifap;
4406 	struct sctp_vrf *vrf;
4407 	struct sctp_ifalist *hash_head;
4408 	uint32_t hash_of_addr;
4409 
4410 	if (holds_lock == 0)
4411 		SCTP_IPI_ADDR_LOCK();
4412 
4413 	vrf = sctp_find_vrf(vrf_id);
4414 	if (vrf == NULL) {
4415 		if (holds_lock == 0)
4416 			SCTP_IPI_ADDR_UNLOCK();
4417 		return (NULL);
4418 	}
4419 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4420 
4421 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4422 	if (hash_head == NULL) {
4423 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4424 		    (u_int)hash_of_addr, (u_int)vrf->vrf_addr_hashmark,
4425 		    (u_int)(hash_of_addr & vrf->vrf_addr_hashmark));
4426 		sctp_print_address(addr);
4427 		SCTP_PRINTF("No such bucket for address\n");
4428 		if (holds_lock == 0)
4429 			SCTP_IPI_ADDR_UNLOCK();
4430 
4431 		return (NULL);
4432 	}
4433 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4434 		if (sctp_ifap == NULL) {
4435 			panic("Huh LIST_FOREACH corrupt");
4436 		}
4437 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4438 			continue;
4439 		if (addr->sa_family == AF_INET) {
4440 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4441 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4442 				/* found him. */
4443 				if (holds_lock == 0)
4444 					SCTP_IPI_ADDR_UNLOCK();
4445 				return (sctp_ifap);
4446 				break;
4447 			}
4448 		} else if (addr->sa_family == AF_INET6) {
4449 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4450 			    &sctp_ifap->address.sin6.sin6_addr)) {
4451 				/* found him. */
4452 				if (holds_lock == 0)
4453 					SCTP_IPI_ADDR_UNLOCK();
4454 				return (sctp_ifap);
4455 				break;
4456 			}
4457 		}
4458 	}
4459 	if (holds_lock == 0)
4460 		SCTP_IPI_ADDR_UNLOCK();
4461 	return (NULL);
4462 }
4463 
4464 static void
4465 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
4466     uint32_t rwnd_req)
4467 {
4468 	/* User pulled some data, do we need a rwnd update? */
4469 	int r_unlocked = 0;
4470 	uint32_t dif, rwnd;
4471 	struct socket *so = NULL;
4472 
4473 	if (stcb == NULL)
4474 		return;
4475 
4476 	atomic_add_int(&stcb->asoc.refcnt, 1);
4477 
4478 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4479 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4480 	    SCTP_STATE_SHUTDOWN_ACK_SENT)
4481 	    ) {
4482 		/* Pre-check If we are freeing no update */
4483 		goto no_lock;
4484 	}
4485 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4486 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4487 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4488 		goto out;
4489 	}
4490 	so = stcb->sctp_socket;
4491 	if (so == NULL) {
4492 		goto out;
4493 	}
4494 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4495 	/* Have you have freed enough to look */
4496 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4497 	sctp_misc_ints(SCTP_ENTER_USER_RECV,
4498 	    (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd),
4499 	    *freed_so_far,
4500 	    stcb->freed_by_sorcv_sincelast,
4501 	    rwnd_req);
4502 #endif
4503 	*freed_so_far = 0;
4504 	/* Yep, its worth a look and the lock overhead */
4505 
4506 	/* Figure out what the rwnd would be */
4507 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4508 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4509 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4510 	} else {
4511 		dif = 0;
4512 	}
4513 	if (dif >= rwnd_req) {
4514 		if (hold_rlock) {
4515 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4516 			r_unlocked = 1;
4517 		}
4518 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4519 			/*
4520 			 * One last check before we allow the guy possibly
4521 			 * to get in. There is a race, where the guy has not
4522 			 * reached the gate. In that case
4523 			 */
4524 			goto out;
4525 		}
4526 		SCTP_TCB_LOCK(stcb);
4527 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4528 			/* No reports here */
4529 			SCTP_TCB_UNLOCK(stcb);
4530 			goto out;
4531 		}
4532 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4533 		sctp_misc_ints(SCTP_USER_RECV_SACKS,
4534 		    stcb->asoc.my_rwnd,
4535 		    stcb->asoc.my_last_reported_rwnd,
4536 		    stcb->freed_by_sorcv_sincelast,
4537 		    dif);
4538 #endif
4539 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4540 		sctp_send_sack(stcb);
4541 		sctp_chunk_output(stcb->sctp_ep, stcb,
4542 		    SCTP_OUTPUT_FROM_USR_RCVD);
4543 		/* make sure no timer is running */
4544 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4545 		SCTP_TCB_UNLOCK(stcb);
4546 	} else {
4547 		/* Update how much we have pending */
4548 		stcb->freed_by_sorcv_sincelast = dif;
4549 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4550 		sctp_misc_ints(SCTP_USER_RECV_SACKS,
4551 		    stcb->asoc.my_rwnd,
4552 		    stcb->asoc.my_last_reported_rwnd,
4553 		    stcb->freed_by_sorcv_sincelast,
4554 		    0);
4555 #endif
4556 	}
4557 out:
4558 	if (so && r_unlocked && hold_rlock) {
4559 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4560 	}
4561 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4562 no_lock:
4563 	atomic_add_int(&stcb->asoc.refcnt, -1);
4564 	return;
4565 }
4566 
4567 int
4568 sctp_sorecvmsg(struct socket *so,
4569     struct uio *uio,
4570     struct mbuf **mp,
4571     struct sockaddr *from,
4572     int fromlen,
4573     int *msg_flags,
4574     struct sctp_sndrcvinfo *sinfo,
4575     int filling_sinfo)
4576 {
4577 	/*
4578 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4579 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4580 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4581 	 * On the way out we may send out any combination of:
4582 	 * MSG_NOTIFICATION MSG_EOR
4583 	 *
4584 	 */
4585 	struct sctp_inpcb *inp = NULL;
4586 	int my_len = 0;
4587 	int cp_len = 0, error = 0;
4588 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4589 	struct mbuf *m = NULL, *embuf = NULL;
4590 	struct sctp_tcb *stcb = NULL;
4591 	int wakeup_read_socket = 0;
4592 	int freecnt_applied = 0;
4593 	int out_flags = 0, in_flags = 0;
4594 	int block_allowed = 1;
4595 	int freed_so_far = 0;
4596 	int copied_so_far = 0;
4597 	int in_eeor_mode = 0;
4598 	int no_rcv_needed = 0;
4599 	uint32_t rwnd_req = 0;
4600 	int hold_sblock = 0;
4601 	int hold_rlock = 0;
4602 	int alen = 0;
4603 	int slen = 0;
4604 	int held_length = 0;
4605 	int sockbuf_lock = 0;
4606 
4607 	if (uio == NULL) {
4608 		return (EINVAL);
4609 	}
4610 	if (msg_flags) {
4611 		in_flags = *msg_flags;
4612 		if (in_flags & MSG_PEEK)
4613 			SCTP_STAT_INCR(sctps_read_peeks);
4614 	} else {
4615 		in_flags = 0;
4616 	}
4617 	slen = uio->uio_resid;
4618 
4619 	/* Pull in and set up our int flags */
4620 	if (in_flags & MSG_OOB) {
4621 		/* Out of band's NOT supported */
4622 		return (EOPNOTSUPP);
4623 	}
4624 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4625 		return (EINVAL);
4626 	}
4627 	if ((in_flags & (MSG_DONTWAIT
4628 	    | MSG_NBIO
4629 	    )) ||
4630 	    SCTP_SO_IS_NBIO(so)) {
4631 		block_allowed = 0;
4632 	}
4633 	/* setup the endpoint */
4634 	inp = (struct sctp_inpcb *)so->so_pcb;
4635 	if (inp == NULL) {
4636 		return (EFAULT);
4637 	}
4638 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
4639 	/* Must be at least a MTU's worth */
4640 	if (rwnd_req < SCTP_MIN_RWND)
4641 		rwnd_req = SCTP_MIN_RWND;
4642 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4643 #ifdef SCTP_RECV_RWND_LOGGING
4644 	sctp_misc_ints(SCTP_SORECV_ENTER,
4645 	    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4646 #endif
4647 #ifdef SCTP_RECV_RWND_LOGGING
4648 	sctp_misc_ints(SCTP_SORECV_ENTERPL,
4649 	    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4650 #endif
4651 
4652 
4653 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4654 	sockbuf_lock = 1;
4655 	if (error) {
4656 		goto release_unlocked;
4657 	}
4658 restart:
4659 
4660 
4661 restart_nosblocks:
4662 	if (hold_sblock == 0) {
4663 		SOCKBUF_LOCK(&so->so_rcv);
4664 		hold_sblock = 1;
4665 	}
4666 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4667 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4668 		goto out;
4669 	}
4670 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4671 		if (so->so_error) {
4672 			error = so->so_error;
4673 			if ((in_flags & MSG_PEEK) == 0)
4674 				so->so_error = 0;
4675 		} else {
4676 			error = ENOTCONN;
4677 		}
4678 		goto out;
4679 	}
4680 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
4681 		/* we need to wait for data */
4682 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4683 		sctp_misc_ints(SCTP_SORECV_BLOCKSA,
4684 		    0, 0, so->so_rcv.sb_cc, uio->uio_resid);
4685 #endif
4686 		if ((so->so_rcv.sb_cc == 0) &&
4687 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4688 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
4689 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4690 				/*
4691 				 * For active open side clear flags for
4692 				 * re-use passive open is blocked by
4693 				 * connect.
4694 				 */
4695 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4696 					/*
4697 					 * You were aborted, passive side
4698 					 * always hits here
4699 					 */
4700 					error = ECONNRESET;
4701 					/*
4702 					 * You get this once if you are
4703 					 * active open side
4704 					 */
4705 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4706 						/*
4707 						 * Remove flag if on the
4708 						 * active open side
4709 						 */
4710 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4711 					}
4712 				}
4713 				so->so_state &= ~(SS_ISCONNECTING |
4714 				    SS_ISDISCONNECTING |
4715 				    SS_ISCONFIRMING |
4716 				    SS_ISCONNECTED);
4717 				if (error == 0) {
4718 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4719 						error = ENOTCONN;
4720 					} else {
4721 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4722 					}
4723 				}
4724 				goto out;
4725 			}
4726 		}
4727 		error = sbwait(&so->so_rcv);
4728 		if (error) {
4729 			goto out;
4730 		}
4731 		held_length = 0;
4732 		goto restart_nosblocks;
4733 	} else if (so->so_rcv.sb_cc == 0) {
4734 		if (so->so_error) {
4735 			error = so->so_error;
4736 			if ((in_flags & MSG_PEEK) == 0)
4737 				so->so_error = 0;
4738 		} else {
4739 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4740 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4741 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4742 					/*
4743 					 * For active open side clear flags
4744 					 * for re-use passive open is
4745 					 * blocked by connect.
4746 					 */
4747 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4748 						/*
4749 						 * You were aborted, passive
4750 						 * side always hits here
4751 						 */
4752 						error = ECONNRESET;
4753 						/*
4754 						 * You get this once if you
4755 						 * are active open side
4756 						 */
4757 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4758 							/*
4759 							 * Remove flag if on
4760 							 * the active open
4761 							 * side
4762 							 */
4763 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4764 						}
4765 					}
4766 					so->so_state &= ~(SS_ISCONNECTING |
4767 					    SS_ISDISCONNECTING |
4768 					    SS_ISCONFIRMING |
4769 					    SS_ISCONNECTED);
4770 					if (error == 0) {
4771 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4772 							error = ENOTCONN;
4773 						} else {
4774 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4775 						}
4776 					}
4777 					goto out;
4778 				}
4779 			}
4780 			error = EWOULDBLOCK;
4781 		}
4782 		goto out;
4783 	}
4784 	if (hold_sblock == 1) {
4785 		SOCKBUF_UNLOCK(&so->so_rcv);
4786 		hold_sblock = 0;
4787 	}
4788 	/* we possibly have data we can read */
4789 	/* sa_ignore FREED_MEMORY */
4790 	control = TAILQ_FIRST(&inp->read_queue);
4791 	if (control == NULL) {
4792 		/*
4793 		 * This could be happening since the appender did the
4794 		 * increment but as not yet did the tailq insert onto the
4795 		 * read_queue
4796 		 */
4797 		if (hold_rlock == 0) {
4798 			SCTP_INP_READ_LOCK(inp);
4799 			hold_rlock = 1;
4800 		}
4801 		control = TAILQ_FIRST(&inp->read_queue);
4802 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
4803 #ifdef INVARIANTS
4804 			panic("Huh, its non zero and nothing on control?");
4805 #endif
4806 			so->so_rcv.sb_cc = 0;
4807 		}
4808 		SCTP_INP_READ_UNLOCK(inp);
4809 		hold_rlock = 0;
4810 		goto restart;
4811 	}
4812 	if ((control->length == 0) &&
4813 	    (control->do_not_ref_stcb)) {
4814 		/*
4815 		 * Clean up code for freeing assoc that left behind a
4816 		 * pdapi.. maybe a peer in EEOR that just closed after
4817 		 * sending and never indicated a EOR.
4818 		 */
4819 		if (hold_rlock == 0) {
4820 			hold_rlock = 1;
4821 			SCTP_INP_READ_LOCK(inp);
4822 		}
4823 		control->held_length = 0;
4824 		if (control->data) {
4825 			/* Hmm there is data here .. fix */
4826 			struct mbuf *m;
4827 			int cnt = 0;
4828 
4829 			m = control->data;
4830 			while (m) {
4831 				cnt += SCTP_BUF_LEN(m);
4832 				if (SCTP_BUF_NEXT(m) == NULL) {
4833 					control->tail_mbuf = m;
4834 					control->end_added = 1;
4835 				}
4836 				m = SCTP_BUF_NEXT(m);
4837 			}
4838 			control->length = cnt;
4839 		} else {
4840 			/* remove it */
4841 			TAILQ_REMOVE(&inp->read_queue, control, next);
4842 			/* Add back any hiddend data */
4843 			sctp_free_remote_addr(control->whoFrom);
4844 			sctp_free_a_readq(stcb, control);
4845 		}
4846 		if (hold_rlock) {
4847 			hold_rlock = 0;
4848 			SCTP_INP_READ_UNLOCK(inp);
4849 		}
4850 		goto restart;
4851 	}
4852 	if (control->length == 0) {
4853 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
4854 		    (filling_sinfo)) {
4855 			/* find a more suitable one then this */
4856 			ctl = TAILQ_NEXT(control, next);
4857 			while (ctl) {
4858 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
4859 				    (ctl->some_taken ||
4860 				    (ctl->spec_flags & M_NOTIFICATION) ||
4861 				    ((ctl->do_not_ref_stcb == 0) &&
4862 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
4863 				    ) {
4864 					/*-
4865 					 * If we have a different TCB next, and there is data
4866 					 * present. If we have already taken some (pdapi), OR we can
4867 					 * ref the tcb and no delivery as started on this stream, we
4868 					 * take it. Note we allow a notification on a different
4869 					 * assoc to be delivered..
4870 					 */
4871 					control = ctl;
4872 					goto found_one;
4873 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
4874 					    (ctl->length) &&
4875 					    ((ctl->some_taken) ||
4876 					    ((ctl->do_not_ref_stcb == 0) &&
4877 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
4878 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
4879 				    ) {
4880 					/*-
4881 					 * If we have the same tcb, and there is data present, and we
4882 					 * have the strm interleave feature present. Then if we have
4883 					 * taken some (pdapi) or we can refer to tht tcb AND we have
4884 					 * not started a delivery for this stream, we can take it.
4885 					 * Note we do NOT allow a notificaiton on the same assoc to
4886 					 * be delivered.
4887 					 */
4888 					control = ctl;
4889 					goto found_one;
4890 				}
4891 				ctl = TAILQ_NEXT(ctl, next);
4892 			}
4893 		}
4894 		/*
4895 		 * if we reach here, not suitable replacement is available
4896 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
4897 		 * into the our held count, and its time to sleep again.
4898 		 */
4899 		held_length = so->so_rcv.sb_cc;
4900 		control->held_length = so->so_rcv.sb_cc;
4901 		goto restart;
4902 	}
4903 	/* Clear the held length since there is something to read */
4904 	control->held_length = 0;
4905 	if (hold_rlock) {
4906 		SCTP_INP_READ_UNLOCK(inp);
4907 		hold_rlock = 0;
4908 	}
4909 found_one:
4910 	/*
4911 	 * If we reach here, control has a some data for us to read off.
4912 	 * Note that stcb COULD be NULL.
4913 	 */
4914 	control->some_taken = 1;
4915 	if (hold_sblock) {
4916 		SOCKBUF_UNLOCK(&so->so_rcv);
4917 		hold_sblock = 0;
4918 	}
4919 	stcb = control->stcb;
4920 	if (stcb) {
4921 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
4922 		    (control->do_not_ref_stcb == 0)) {
4923 			if (freecnt_applied == 0)
4924 				stcb = NULL;
4925 		} else if (control->do_not_ref_stcb == 0) {
4926 			/* you can't free it on me please */
4927 			/*
4928 			 * The lock on the socket buffer protects us so the
4929 			 * free code will stop. But since we used the
4930 			 * socketbuf lock and the sender uses the tcb_lock
4931 			 * to increment, we need to use the atomic add to
4932 			 * the refcnt
4933 			 */
4934 			atomic_add_int(&stcb->asoc.refcnt, 1);
4935 			freecnt_applied = 1;
4936 			/*
4937 			 * Setup to remember how much we have not yet told
4938 			 * the peer our rwnd has opened up. Note we grab the
4939 			 * value from the tcb from last time. Note too that
4940 			 * sack sending clears this when a sack is sent..
4941 			 * which is fine. Once we hit the rwnd_req, we then
4942 			 * will go to the sctp_user_rcvd() that will not
4943 			 * lock until it KNOWs it MUST send a WUP-SACK.
4944 			 *
4945 			 */
4946 			freed_so_far = stcb->freed_by_sorcv_sincelast;
4947 			stcb->freed_by_sorcv_sincelast = 0;
4948 		}
4949 	}
4950 	if (stcb &&
4951 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
4952 	    control->do_not_ref_stcb == 0) {
4953 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
4954 	}
4955 	/* First lets get off the sinfo and sockaddr info */
4956 	if ((sinfo) && filling_sinfo) {
4957 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
4958 		nxt = TAILQ_NEXT(control, next);
4959 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
4960 			struct sctp_extrcvinfo *s_extra;
4961 
4962 			s_extra = (struct sctp_extrcvinfo *)sinfo;
4963 			if ((nxt) &&
4964 			    (nxt->length)) {
4965 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
4966 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
4967 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
4968 				}
4969 				if (nxt->spec_flags & M_NOTIFICATION) {
4970 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
4971 				}
4972 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
4973 				s_extra->sreinfo_next_length = nxt->length;
4974 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
4975 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
4976 				if (nxt->tail_mbuf != NULL) {
4977 					if (nxt->end_added) {
4978 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
4979 					}
4980 				}
4981 			} else {
4982 				/*
4983 				 * we explicitly 0 this, since the memcpy
4984 				 * got some other things beyond the older
4985 				 * sinfo_ that is on the control's structure
4986 				 * :-D
4987 				 */
4988 				nxt = NULL;
4989 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
4990 				s_extra->sreinfo_next_aid = 0;
4991 				s_extra->sreinfo_next_length = 0;
4992 				s_extra->sreinfo_next_ppid = 0;
4993 				s_extra->sreinfo_next_stream = 0;
4994 			}
4995 		}
4996 		/*
4997 		 * update off the real current cum-ack, if we have an stcb.
4998 		 */
4999 		if (stcb)
5000 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5001 		/*
5002 		 * mask off the high bits, we keep the actual chunk bits in
5003 		 * there.
5004 		 */
5005 		sinfo->sinfo_flags &= 0x00ff;
5006 	}
5007 	if (fromlen && from) {
5008 		struct sockaddr *to;
5009 
5010 #ifdef INET
5011 		cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len);
5012 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5013 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5014 #else
5015 		/* No AF_INET use AF_INET6 */
5016 		cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len);
5017 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5018 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5019 #endif
5020 
5021 		to = from;
5022 #if defined(INET) && defined(INET6)
5023 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
5024 		    (to->sa_family == AF_INET) &&
5025 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5026 			struct sockaddr_in *sin;
5027 			struct sockaddr_in6 sin6;
5028 
5029 			sin = (struct sockaddr_in *)to;
5030 			bzero(&sin6, sizeof(sin6));
5031 			sin6.sin6_family = AF_INET6;
5032 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5033 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
5034 			bcopy(&sin->sin_addr,
5035 			    &sin6.sin6_addr.s6_addr16[3],
5036 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
5037 			sin6.sin6_port = sin->sin_port;
5038 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5039 		}
5040 #endif
5041 #if defined(INET6)
5042 		{
5043 			struct sockaddr_in6 lsa6, *to6;
5044 
5045 			to6 = (struct sockaddr_in6 *)to;
5046 			sctp_recover_scope_mac(to6, (&lsa6));
5047 		}
5048 #endif
5049 	}
5050 	/* now copy out what data we can */
5051 	if (mp == NULL) {
5052 		/* copy out each mbuf in the chain up to length */
5053 get_more_data:
5054 		m = control->data;
5055 		while (m) {
5056 			/* Move out all we can */
5057 			cp_len = (int)uio->uio_resid;
5058 			my_len = (int)SCTP_BUF_LEN(m);
5059 			if (cp_len > my_len) {
5060 				/* not enough in this buf */
5061 				cp_len = my_len;
5062 			}
5063 			if (hold_rlock) {
5064 				SCTP_INP_READ_UNLOCK(inp);
5065 				hold_rlock = 0;
5066 			}
5067 			if (cp_len > 0)
5068 				error = uiomove(mtod(m, char *), cp_len, uio);
5069 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5070 			sctp_misc_ints(SCTP_SORCV_DOESCPY,
5071 			    so->so_rcv.sb_cc,
5072 			    cp_len,
5073 			    0,
5074 			    0);
5075 #endif
5076 			/* re-read */
5077 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5078 				goto release;
5079 			}
5080 			if (stcb &&
5081 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5082 				no_rcv_needed = 1;
5083 			}
5084 			if (error) {
5085 				/* error we are out of here */
5086 				goto release;
5087 			}
5088 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5089 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5090 			    ((control->end_added == 0) ||
5091 			    (control->end_added && (TAILQ_NEXT(control, next) == NULL)))
5092 			    ) {
5093 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5094 				sctp_misc_ints(SCTP_SORCV_DOESLCK,
5095 				    so->so_rcv.sb_cc,
5096 				    cp_len,
5097 				    SCTP_BUF_LEN(m),
5098 				    control->length);
5099 #endif
5100 				SCTP_INP_READ_LOCK(inp);
5101 				hold_rlock = 1;
5102 			}
5103 			if (cp_len == SCTP_BUF_LEN(m)) {
5104 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5105 				sctp_misc_ints(SCTP_SORCV_DOESADJ,
5106 				    so->so_rcv.sb_cc,
5107 				    control->length,
5108 				    cp_len,
5109 				    0);
5110 #endif
5111 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5112 				    (control->end_added)) {
5113 					out_flags |= MSG_EOR;
5114 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5115 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5116 				}
5117 				if (control->spec_flags & M_NOTIFICATION) {
5118 					out_flags |= MSG_NOTIFICATION;
5119 				}
5120 				/* we ate up the mbuf */
5121 				if (in_flags & MSG_PEEK) {
5122 					/* just looking */
5123 					m = SCTP_BUF_NEXT(m);
5124 					copied_so_far += cp_len;
5125 				} else {
5126 					/* dispose of the mbuf */
5127 #ifdef SCTP_SB_LOGGING
5128 					sctp_sblog(&so->so_rcv,
5129 					    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5130 #endif
5131 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5132 #ifdef SCTP_SB_LOGGING
5133 					sctp_sblog(&so->so_rcv,
5134 					    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5135 #endif
5136 					embuf = m;
5137 					copied_so_far += cp_len;
5138 					freed_so_far += cp_len;
5139 					alen = atomic_fetchadd_int(&control->length, -(cp_len));
5140 					if (alen < cp_len) {
5141 						panic("Control length goes negative?");
5142 					}
5143 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5144 					sctp_misc_ints(SCTP_SORCV_PASSBF,
5145 					    so->so_rcv.sb_cc,
5146 					    control->length,
5147 					    0,
5148 					    0);
5149 #endif
5150 					control->data = sctp_m_free(m);
5151 					m = control->data;
5152 					/*
5153 					 * been through it all, must hold sb
5154 					 * lock ok to null tail
5155 					 */
5156 					if (control->data == NULL) {
5157 #ifdef INVARIANTS
5158 						if ((control->end_added == 0) ||
5159 						    (TAILQ_NEXT(control, next) == NULL)) {
5160 							/*
5161 							 * If the end is not
5162 							 * added, OR the
5163 							 * next is NOT null
5164 							 * we MUST have the
5165 							 * lock.
5166 							 */
5167 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5168 								panic("Hmm we don't own the lock?");
5169 							}
5170 						}
5171 #endif
5172 						control->tail_mbuf = NULL;
5173 #ifdef INVARIANTS
5174 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5175 							panic("end_added, nothing left and no MSG_EOR");
5176 						}
5177 #endif
5178 					}
5179 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5180 					sctp_misc_ints(SCTP_SORCV_ADJD,
5181 					    so->so_rcv.sb_cc,
5182 					    control->length,
5183 					    0,
5184 					    0);
5185 #endif
5186 				}
5187 			} else {
5188 				/* Do we need to trim the mbuf? */
5189 				if (control->spec_flags & M_NOTIFICATION) {
5190 					out_flags |= MSG_NOTIFICATION;
5191 				}
5192 				if ((in_flags & MSG_PEEK) == 0) {
5193 					SCTP_BUF_RESV_UF(m, cp_len);
5194 					SCTP_BUF_LEN(m) -= cp_len;
5195 #ifdef SCTP_SB_LOGGING
5196 					sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5197 #endif
5198 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5199 					if (stcb) {
5200 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5201 					}
5202 					copied_so_far += cp_len;
5203 					embuf = m;
5204 					freed_so_far += cp_len;
5205 #ifdef SCTP_SB_LOGGING
5206 					sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5207 					    SCTP_LOG_SBRESULT, 0);
5208 #endif
5209 					alen = atomic_fetchadd_int(&control->length, -(cp_len));
5210 					if (alen < cp_len) {
5211 						panic("Control length goes negative2?");
5212 					}
5213 				} else {
5214 					copied_so_far += cp_len;
5215 				}
5216 			}
5217 			if ((out_flags & MSG_EOR) ||
5218 			    (uio->uio_resid == 0)
5219 			    ) {
5220 				break;
5221 			}
5222 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5223 			    (control->do_not_ref_stcb == 0) &&
5224 			    (freed_so_far >= rwnd_req)) {
5225 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5226 			}
5227 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5228 			sctp_misc_ints(SCTP_SORCV_BOTWHILE,
5229 			    so->so_rcv.sb_cc,
5230 			    control->length,
5231 			    0,
5232 			    0);
5233 #endif
5234 
5235 		}		/* end while(m) */
5236 		/*
5237 		 * At this point we have looked at it all and we either have
5238 		 * a MSG_EOR/or read all the user wants... <OR>
5239 		 * control->length == 0.
5240 		 */
5241 		if ((out_flags & MSG_EOR) &&
5242 		    ((in_flags & MSG_PEEK) == 0)) {
5243 			/* we are done with this control */
5244 			if (control->length == 0) {
5245 				if (control->data) {
5246 #ifdef INVARIANTS
5247 					panic("control->data not null at read eor?");
5248 #else
5249 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5250 					sctp_m_freem(control->data);
5251 					control->data = NULL;
5252 #endif
5253 				}
5254 		done_with_control:
5255 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5256 				sctp_misc_ints(SCTP_SORCV_FREECTL,
5257 				    so->so_rcv.sb_cc,
5258 				    0,
5259 				    0,
5260 				    0);
5261 #endif
5262 				if (TAILQ_NEXT(control, next) == NULL) {
5263 					/*
5264 					 * If we don't have a next we need a
5265 					 * lock, if there is a next interupt
5266 					 * is filling ahead of us and we
5267 					 * don't need a lock to remove this
5268 					 * guy (which is the head of the
5269 					 * queue).
5270 					 */
5271 					if (hold_rlock == 0) {
5272 						SCTP_INP_READ_LOCK(inp);
5273 						hold_rlock = 1;
5274 					}
5275 				}
5276 				TAILQ_REMOVE(&inp->read_queue, control, next);
5277 				/* Add back any hiddend data */
5278 				if (control->held_length) {
5279 					held_length = 0;
5280 					control->held_length = 0;
5281 					wakeup_read_socket = 1;
5282 				}
5283 				if (control->aux_data) {
5284 					sctp_m_free(control->aux_data);
5285 					control->aux_data = NULL;
5286 				}
5287 				no_rcv_needed = control->do_not_ref_stcb;
5288 				sctp_free_remote_addr(control->whoFrom);
5289 				control->data = NULL;
5290 				sctp_free_a_readq(stcb, control);
5291 				control = NULL;
5292 				if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0))
5293 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5294 
5295 			} else {
5296 				/*
5297 				 * The user did not read all of this
5298 				 * message, turn off the returned MSG_EOR
5299 				 * since we are leaving more behind on the
5300 				 * control to read.
5301 				 */
5302 #ifdef INVARIANTS
5303 				if (control->end_added && (control->data == NULL) &&
5304 				    (control->tail_mbuf == NULL)) {
5305 					panic("Gak, control->length is corrupt?");
5306 				}
5307 #endif
5308 				no_rcv_needed = control->do_not_ref_stcb;
5309 				out_flags &= ~MSG_EOR;
5310 			}
5311 		}
5312 		if (out_flags & MSG_EOR) {
5313 			goto release;
5314 		}
5315 		if ((uio->uio_resid == 0) ||
5316 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5317 		    ) {
5318 			goto release;
5319 		}
5320 		/*
5321 		 * If I hit here the receiver wants more and this message is
5322 		 * NOT done (pd-api). So two questions. Can we block? if not
5323 		 * we are done. Did the user NOT set MSG_WAITALL?
5324 		 */
5325 		if (block_allowed == 0) {
5326 			goto release;
5327 		}
5328 		/*
5329 		 * We need to wait for more data a few things: - We don't
5330 		 * sbunlock() so we don't get someone else reading. - We
5331 		 * must be sure to account for the case where what is added
5332 		 * is NOT to our control when we wakeup.
5333 		 */
5334 
5335 		/*
5336 		 * Do we need to tell the transport a rwnd update might be
5337 		 * needed before we go to sleep?
5338 		 */
5339 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5340 		    ((freed_so_far >= rwnd_req) &&
5341 		    (control->do_not_ref_stcb == 0) &&
5342 		    (no_rcv_needed == 0))) {
5343 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5344 		}
5345 wait_some_more:
5346 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5347 			goto release;
5348 		}
5349 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5350 			goto release;
5351 
5352 		if (hold_rlock == 1) {
5353 			SCTP_INP_READ_UNLOCK(inp);
5354 			hold_rlock = 0;
5355 		}
5356 		if (hold_sblock == 0) {
5357 			SOCKBUF_LOCK(&so->so_rcv);
5358 			hold_sblock = 1;
5359 		}
5360 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5361 		if (stcb)
5362 			sctp_misc_ints(SCTP_SORECV_BLOCKSB,
5363 			    freed_so_far,
5364 			    stcb->asoc.my_rwnd,
5365 			    so->so_rcv.sb_cc,
5366 			    uio->uio_resid);
5367 		else
5368 			sctp_misc_ints(SCTP_SORECV_BLOCKSB,
5369 			    freed_so_far,
5370 			    0,
5371 			    so->so_rcv.sb_cc,
5372 			    uio->uio_resid);
5373 #endif
5374 		if (so->so_rcv.sb_cc <= control->held_length) {
5375 			error = sbwait(&so->so_rcv);
5376 			if (error) {
5377 				goto release;
5378 			}
5379 			control->held_length = 0;
5380 		}
5381 		if (hold_sblock) {
5382 			SOCKBUF_UNLOCK(&so->so_rcv);
5383 			hold_sblock = 0;
5384 		}
5385 		if (control->length == 0) {
5386 			/* still nothing here */
5387 			if (control->end_added == 1) {
5388 				/* he aborted, or is done i.e.did a shutdown */
5389 				out_flags |= MSG_EOR;
5390 				if (control->pdapi_aborted) {
5391 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5392 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5393 
5394 					out_flags |= MSG_TRUNC;
5395 				} else {
5396 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5397 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5398 				}
5399 				goto done_with_control;
5400 			}
5401 			if (so->so_rcv.sb_cc > held_length) {
5402 				control->held_length = so->so_rcv.sb_cc;
5403 				held_length = 0;
5404 			}
5405 			goto wait_some_more;
5406 		} else if (control->data == NULL) {
5407 			/*
5408 			 * we must re-sync since data is probably being
5409 			 * added
5410 			 */
5411 			SCTP_INP_READ_LOCK(inp);
5412 			if ((control->length > 0) && (control->data == NULL)) {
5413 				/*
5414 				 * big trouble.. we have the lock and its
5415 				 * corrupt?
5416 				 */
5417 				panic("Impossible data==NULL length !=0");
5418 			}
5419 			SCTP_INP_READ_UNLOCK(inp);
5420 			/* We will fall around to get more data */
5421 		}
5422 		goto get_more_data;
5423 	} else {
5424 		/*-
5425 		 * Give caller back the mbuf chain,
5426 		 * store in uio_resid the length
5427 		 */
5428 		wakeup_read_socket = 0;
5429 		if ((control->end_added == 0) ||
5430 		    (TAILQ_NEXT(control, next) == NULL)) {
5431 			/* Need to get rlock */
5432 			if (hold_rlock == 0) {
5433 				SCTP_INP_READ_LOCK(inp);
5434 				hold_rlock = 1;
5435 			}
5436 		}
5437 		if (control->end_added) {
5438 			out_flags |= MSG_EOR;
5439 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5440 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5441 		}
5442 		if (control->spec_flags & M_NOTIFICATION) {
5443 			out_flags |= MSG_NOTIFICATION;
5444 		}
5445 		uio->uio_resid = control->length;
5446 		*mp = control->data;
5447 		m = control->data;
5448 		while (m) {
5449 #ifdef SCTP_SB_LOGGING
5450 			sctp_sblog(&so->so_rcv,
5451 			    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5452 #endif
5453 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5454 			freed_so_far += SCTP_BUF_LEN(m);
5455 #ifdef SCTP_SB_LOGGING
5456 			sctp_sblog(&so->so_rcv,
5457 			    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5458 #endif
5459 			m = SCTP_BUF_NEXT(m);
5460 		}
5461 		control->data = control->tail_mbuf = NULL;
5462 		control->length = 0;
5463 		if (out_flags & MSG_EOR) {
5464 			/* Done with this control */
5465 			goto done_with_control;
5466 		}
5467 	}
5468 release:
5469 	if (hold_rlock == 1) {
5470 		SCTP_INP_READ_UNLOCK(inp);
5471 		hold_rlock = 0;
5472 	}
5473 	if (hold_sblock == 1) {
5474 		SOCKBUF_UNLOCK(&so->so_rcv);
5475 		hold_sblock = 0;
5476 	}
5477 	sbunlock(&so->so_rcv);
5478 	sockbuf_lock = 0;
5479 
5480 release_unlocked:
5481 	if (hold_sblock) {
5482 		SOCKBUF_UNLOCK(&so->so_rcv);
5483 		hold_sblock = 0;
5484 	}
5485 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5486 		if ((freed_so_far >= rwnd_req) &&
5487 		    (control && (control->do_not_ref_stcb == 0)) &&
5488 		    (no_rcv_needed == 0))
5489 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5490 	}
5491 	if (msg_flags)
5492 		*msg_flags |= out_flags;
5493 out:
5494 	if (((out_flags & MSG_EOR) == 0) &&
5495 	    ((in_flags & MSG_PEEK) == 0) &&
5496 	    (sinfo) &&
5497 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5498 		struct sctp_extrcvinfo *s_extra;
5499 
5500 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5501 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5502 	}
5503 	if (hold_rlock == 1) {
5504 		SCTP_INP_READ_UNLOCK(inp);
5505 		hold_rlock = 0;
5506 	}
5507 	if (hold_sblock) {
5508 		SOCKBUF_UNLOCK(&so->so_rcv);
5509 		hold_sblock = 0;
5510 	}
5511 	if (sockbuf_lock) {
5512 		sbunlock(&so->so_rcv);
5513 	}
5514 	if (freecnt_applied) {
5515 		/*
5516 		 * The lock on the socket buffer protects us so the free
5517 		 * code will stop. But since we used the socketbuf lock and
5518 		 * the sender uses the tcb_lock to increment, we need to use
5519 		 * the atomic add to the refcnt.
5520 		 */
5521 		if (stcb == NULL) {
5522 			panic("stcb for refcnt has gone NULL?");
5523 		}
5524 		atomic_add_int(&stcb->asoc.refcnt, -1);
5525 		freecnt_applied = 0;
5526 		/* Save the value back for next time */
5527 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5528 	}
5529 #ifdef SCTP_RECV_RWND_LOGGING
5530 	if (stcb) {
5531 		sctp_misc_ints(SCTP_SORECV_DONE,
5532 		    freed_so_far,
5533 		    ((uio) ? (slen - uio->uio_resid) : slen),
5534 		    stcb->asoc.my_rwnd,
5535 		    so->so_rcv.sb_cc);
5536 	} else {
5537 		sctp_misc_ints(SCTP_SORECV_DONE,
5538 		    freed_so_far,
5539 		    ((uio) ? (slen - uio->uio_resid) : slen),
5540 		    0,
5541 		    so->so_rcv.sb_cc);
5542 	}
5543 #endif
5544 	if (wakeup_read_socket) {
5545 		sctp_sorwakeup(inp, so);
5546 	}
5547 	return (error);
5548 }
5549 
5550 
5551 #ifdef SCTP_MBUF_LOGGING
5552 struct mbuf *
5553 sctp_m_free(struct mbuf *m)
5554 {
5555 	if (SCTP_BUF_IS_EXTENDED(m)) {
5556 		sctp_log_mb(m, SCTP_MBUF_IFREE);
5557 	}
5558 	return (m_free(m));
5559 }
5560 
5561 void
5562 sctp_m_freem(struct mbuf *mb)
5563 {
5564 	while (mb != NULL)
5565 		mb = sctp_m_free(mb);
5566 }
5567 
5568 #endif
5569 
5570 int
5571 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5572 {
5573 	/*
5574 	 * Given a local address. For all associations that holds the
5575 	 * address, request a peer-set-primary.
5576 	 */
5577 	struct sctp_ifa *ifa;
5578 	struct sctp_laddr *wi;
5579 
5580 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5581 	if (ifa == NULL) {
5582 		return (EADDRNOTAVAIL);
5583 	}
5584 	/*
5585 	 * Now that we have the ifa we must awaken the iterator with this
5586 	 * message.
5587 	 */
5588 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5589 	if (wi == NULL) {
5590 		return (ENOMEM);
5591 	}
5592 	/* Now incr the count and int wi structure */
5593 	SCTP_INCR_LADDR_COUNT();
5594 	bzero(wi, sizeof(*wi));
5595 	wi->ifa = ifa;
5596 	wi->action = SCTP_SET_PRIM_ADDR;
5597 	atomic_add_int(&ifa->refcount, 1);
5598 
5599 	/* Now add it to the work queue */
5600 	SCTP_IPI_ITERATOR_WQ_LOCK();
5601 	/*
5602 	 * Should this really be a tailq? As it is we will process the
5603 	 * newest first :-0
5604 	 */
5605 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5606 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5607 	    (struct sctp_inpcb *)NULL,
5608 	    (struct sctp_tcb *)NULL,
5609 	    (struct sctp_nets *)NULL);
5610 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5611 	return (0);
5612 }
5613 
5614 
5615 
5616 
5617 int
5618 sctp_soreceive(struct socket *so,
5619     struct sockaddr **psa,
5620     struct uio *uio,
5621     struct mbuf **mp0,
5622     struct mbuf **controlp,
5623     int *flagsp)
5624 {
5625 	int error, fromlen;
5626 	uint8_t sockbuf[256];
5627 	struct sockaddr *from;
5628 	struct sctp_extrcvinfo sinfo;
5629 	int filling_sinfo = 1;
5630 	struct sctp_inpcb *inp;
5631 
5632 	inp = (struct sctp_inpcb *)so->so_pcb;
5633 	/* pickup the assoc we are reading from */
5634 	if (inp == NULL) {
5635 		return (EINVAL);
5636 	}
5637 	if ((sctp_is_feature_off(inp,
5638 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5639 	    (controlp == NULL)) {
5640 		/* user does not want the sndrcv ctl */
5641 		filling_sinfo = 0;
5642 	}
5643 	if (psa) {
5644 		from = (struct sockaddr *)sockbuf;
5645 		fromlen = sizeof(sockbuf);
5646 		from->sa_len = 0;
5647 	} else {
5648 		from = NULL;
5649 		fromlen = 0;
5650 	}
5651 
5652 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5653 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5654 	if ((controlp) && (filling_sinfo)) {
5655 		/* copy back the sinfo in a CMSG format */
5656 		if (filling_sinfo)
5657 			*controlp = sctp_build_ctl_nchunk(inp,
5658 			    (struct sctp_sndrcvinfo *)&sinfo);
5659 		else
5660 			*controlp = NULL;
5661 	}
5662 	if (psa) {
5663 		/* copy back the address info */
5664 		if (from && from->sa_len) {
5665 			*psa = sodupsockaddr(from, M_NOWAIT);
5666 		} else {
5667 			*psa = NULL;
5668 		}
5669 	}
5670 	return (error);
5671 }
5672 
5673 
5674 int
5675 sctp_l_soreceive(struct socket *so,
5676     struct sockaddr **name,
5677     struct uio *uio,
5678     char **controlp,
5679     int *controllen,
5680     int *flag)
5681 {
5682 	int error, fromlen;
5683 	uint8_t sockbuf[256];
5684 	struct sockaddr *from;
5685 	struct sctp_extrcvinfo sinfo;
5686 	int filling_sinfo = 1;
5687 	struct sctp_inpcb *inp;
5688 
5689 	inp = (struct sctp_inpcb *)so->so_pcb;
5690 	/* pickup the assoc we are reading from */
5691 	if (inp == NULL) {
5692 		return (EINVAL);
5693 	}
5694 	if ((sctp_is_feature_off(inp,
5695 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5696 	    (controlp == NULL)) {
5697 		/* user does not want the sndrcv ctl */
5698 		filling_sinfo = 0;
5699 	}
5700 	if (name) {
5701 		from = (struct sockaddr *)sockbuf;
5702 		fromlen = sizeof(sockbuf);
5703 		from->sa_len = 0;
5704 	} else {
5705 		from = NULL;
5706 		fromlen = 0;
5707 	}
5708 
5709 	error = sctp_sorecvmsg(so, uio,
5710 	    (struct mbuf **)NULL,
5711 	    from, fromlen, flag,
5712 	    (struct sctp_sndrcvinfo *)&sinfo,
5713 	    filling_sinfo);
5714 	if ((controlp) && (filling_sinfo)) {
5715 		/*
5716 		 * copy back the sinfo in a CMSG format note that the caller
5717 		 * has reponsibility for freeing the memory.
5718 		 */
5719 		if (filling_sinfo)
5720 			*controlp = sctp_build_ctl_cchunk(inp,
5721 			    controllen,
5722 			    (struct sctp_sndrcvinfo *)&sinfo);
5723 	}
5724 	if (name) {
5725 		/* copy back the address info */
5726 		if (from && from->sa_len) {
5727 			*name = sodupsockaddr(from, M_WAIT);
5728 		} else {
5729 			*name = NULL;
5730 		}
5731 	}
5732 	return (error);
5733 }
5734 
5735 
5736 
5737 
5738 
5739 
5740 
5741 int
5742 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, int totaddr, int *error)
5743 {
5744 	int added = 0;
5745 	int i;
5746 	struct sctp_inpcb *inp;
5747 	struct sockaddr *sa;
5748 	size_t incr = 0;
5749 
5750 	sa = addr;
5751 	inp = stcb->sctp_ep;
5752 	*error = 0;
5753 	for (i = 0; i < totaddr; i++) {
5754 		if (sa->sa_family == AF_INET) {
5755 			incr = sizeof(struct sockaddr_in);
5756 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
5757 				/* assoc gone no un-lock */
5758 				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
5759 				*error = ENOBUFS;
5760 				goto out_now;
5761 			}
5762 			added++;
5763 		} else if (sa->sa_family == AF_INET6) {
5764 			incr = sizeof(struct sockaddr_in6);
5765 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
5766 				/* assoc gone no un-lock */
5767 				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
5768 				*error = ENOBUFS;
5769 				goto out_now;
5770 			}
5771 			added++;
5772 		}
5773 		sa = (struct sockaddr *)((caddr_t)sa + incr);
5774 	}
5775 out_now:
5776 	return (added);
5777 }
5778 
5779 struct sctp_tcb *
5780 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, int *totaddr,
5781     int *num_v4, int *num_v6, int *error, int max)
5782 {
5783 	struct sockaddr *sa;
5784 	struct sctp_tcb *stcb = NULL;
5785 	size_t incr, at, i;
5786 
5787 	at = incr = 0;
5788 	sa = addr;
5789 	*error = *num_v6 = *num_v4 = 0;
5790 	/* account and validate addresses */
5791 	for (i = 0; i < *totaddr; i++) {
5792 		if (sa->sa_family == AF_INET) {
5793 			(*num_v4) += 1;
5794 			incr = sizeof(struct sockaddr_in);
5795 		} else if (sa->sa_family == AF_INET6) {
5796 			struct sockaddr_in6 *sin6;
5797 
5798 			sin6 = (struct sockaddr_in6 *)sa;
5799 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
5800 				/* Must be non-mapped for connectx */
5801 				*error = EINVAL;
5802 				return (NULL);
5803 			}
5804 			(*num_v6) += 1;
5805 			incr = sizeof(struct sockaddr_in6);
5806 		} else {
5807 			*totaddr = i;
5808 			/* we are done */
5809 			break;
5810 		}
5811 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
5812 		if (stcb != NULL) {
5813 			/* Already have or am bring up an association */
5814 			return (stcb);
5815 		}
5816 		if ((at + incr) > max) {
5817 			*totaddr = i;
5818 			break;
5819 		}
5820 		sa = (struct sockaddr *)((caddr_t)sa + incr);
5821 	}
5822 	return ((struct sctp_tcb *)NULL);
5823 }
5824