xref: /freebsd/sys/netinet/sctp_timer.c (revision acd3428b7d3e94cef0e1881c868cb4b131d4ff41)
1 /*-
2  * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ipsec.h"
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
39 #include "opt_inet.h"
40 #include "opt_sctp.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
53 #ifdef INET6
54 #include <sys/domain.h>
55 #endif
56 
57 #include <sys/limits.h>
58 
59 #include <net/if.h>
60 #include <net/if_types.h>
61 #include <net/route.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #define _IP_VHL
65 #include <netinet/ip.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip_var.h>
69 
70 #ifdef INET6
71 #include <netinet/ip6.h>
72 #include <netinet6/ip6_var.h>
73 #include <netinet6/scope6_var.h>
74 #endif				/* INET6 */
75 
76 #include <netinet/sctp_pcb.h>
77 
78 #ifdef IPSEC
79 #include <netinet6/ipsec.h>
80 #include <netkey/key.h>
81 #endif				/* IPSEC */
82 #ifdef INET6
83 #include <netinet6/sctp6_var.h>
84 #endif
85 #include <netinet/sctp_os.h>
86 #include <netinet/sctp_var.h>
87 #include <netinet/sctp_timer.h>
88 #include <netinet/sctputil.h>
89 #include <netinet/sctp_output.h>
90 #include <netinet/sctp_header.h>
91 #include <netinet/sctp_indata.h>
92 #include <netinet/sctp_asconf.h>
93 #include <netinet/sctp_input.h>
94 
95 #include <netinet/sctp.h>
96 #include <netinet/sctp_uio.h>
97 
98 
99 #ifdef SCTP_DEBUG
100 extern uint32_t sctp_debug_on;
101 
102 #endif				/* SCTP_DEBUG */
103 
104 
105 extern unsigned int sctp_early_fr_msec;
106 
107 void
108 sctp_early_fr_timer(struct sctp_inpcb *inp,
109     struct sctp_tcb *stcb,
110     struct sctp_nets *net)
111 {
112 	struct sctp_tmit_chunk *chk, *tp2;
113 	struct timeval now, min_wait, tv;
114 	unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
115 
116 	/* an early FR is occuring. */
117 	SCTP_GETTIME_TIMEVAL(&now);
118 	/* get cur rto in micro-seconds */
119 	if (net->lastsa == 0) {
120 		/* Hmm no rtt estimate yet? */
121 		cur_rtt = stcb->asoc.initial_rto >> 2;
122 	} else {
123 
124 		cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
125 	}
126 	if (cur_rtt < sctp_early_fr_msec) {
127 		cur_rtt = sctp_early_fr_msec;
128 	}
129 	cur_rtt *= 1000;
130 	tv.tv_sec = cur_rtt / 1000000;
131 	tv.tv_usec = cur_rtt % 1000000;
132 	min_wait = now;
133 	timevalsub(&min_wait, &tv);
134 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
135 		/*
136 		 * if we hit here, we don't have enough seconds on the clock
137 		 * to account for the RTO. We just let the lower seconds be
138 		 * the bounds and don't worry about it. This may mean we
139 		 * will mark a lot more than we should.
140 		 */
141 		min_wait.tv_sec = min_wait.tv_usec = 0;
142 	}
143 	chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
144 	for (; chk != NULL; chk = tp2) {
145 		tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
146 		if (chk->whoTo != net) {
147 			continue;
148 		}
149 		if (chk->sent == SCTP_DATAGRAM_RESEND)
150 			cnt_resend++;
151 		else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
152 		    (chk->sent < SCTP_DATAGRAM_RESEND)) {
153 			/* pending, may need retran */
154 			if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
155 				/*
156 				 * we have reached a chunk that was sent
157 				 * some seconds past our min.. forget it we
158 				 * will find no more to send.
159 				 */
160 				continue;
161 			} else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
162 				/*
163 				 * we must look at the micro seconds to
164 				 * know.
165 				 */
166 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
167 					/*
168 					 * ok it was sent after our boundary
169 					 * time.
170 					 */
171 					continue;
172 				}
173 			}
174 #ifdef SCTP_EARLYFR_LOGGING
175 			sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
176 			    4, SCTP_FR_MARKED_EARLY);
177 #endif
178 			SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
179 			chk->sent = SCTP_DATAGRAM_RESEND;
180 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
181 			/* double book size since we are doing an early FR */
182 			chk->book_size_scale++;
183 			cnt += chk->send_size;
184 			if ((cnt + net->flight_size) > net->cwnd) {
185 				/* Mark all we could possibly resend */
186 				break;
187 			}
188 		}
189 	}
190 	if (cnt) {
191 #ifdef SCTP_CWND_MONITOR
192 		int old_cwnd;
193 
194 		old_cwnd = net->cwnd;
195 #endif
196 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
197 		/*
198 		 * make a small adjustment to cwnd and force to CA.
199 		 */
200 
201 		if (net->cwnd > net->mtu)
202 			/* drop down one MTU after sending */
203 			net->cwnd -= net->mtu;
204 		if (net->cwnd < net->ssthresh)
205 			/* still in SS move to CA */
206 			net->ssthresh = net->cwnd - 1;
207 #ifdef SCTP_CWND_MONITOR
208 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
209 #endif
210 	} else if (cnt_resend) {
211 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
212 	}
213 	/* Restart it? */
214 	if (net->flight_size < net->cwnd) {
215 		SCTP_STAT_INCR(sctps_earlyfrstrtmr);
216 		sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
217 	}
218 }
219 
220 void
221 sctp_audit_retranmission_queue(struct sctp_association *asoc)
222 {
223 	struct sctp_tmit_chunk *chk;
224 
225 #ifdef SCTP_DEBUG
226 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
227 		printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
228 		    asoc->sent_queue_retran_cnt,
229 		    asoc->sent_queue_cnt);
230 	}
231 #endif				/* SCTP_DEBUG */
232 	asoc->sent_queue_retran_cnt = 0;
233 	asoc->sent_queue_cnt = 0;
234 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
235 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
236 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
237 		}
238 		asoc->sent_queue_cnt++;
239 	}
240 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
241 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
242 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
243 		}
244 	}
245 #ifdef SCTP_DEBUG
246 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
247 		printf("Audit completes retran:%d onqueue:%d\n",
248 		    asoc->sent_queue_retran_cnt,
249 		    asoc->sent_queue_cnt);
250 	}
251 #endif				/* SCTP_DEBUG */
252 }
253 
254 int
255 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
256     struct sctp_nets *net, uint16_t threshold)
257 {
258 	if (net) {
259 		net->error_count++;
260 #ifdef SCTP_DEBUG
261 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
262 			printf("Error count for %p now %d thresh:%d\n",
263 			    net, net->error_count,
264 			    net->failure_threshold);
265 		}
266 #endif				/* SCTP_DEBUG */
267 		if (net->error_count > net->failure_threshold) {
268 			/* We had a threshold failure */
269 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
270 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
271 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
272 				if (net == stcb->asoc.primary_destination) {
273 					net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
274 				}
275 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
276 				    stcb,
277 				    SCTP_FAILED_THRESHOLD,
278 				    (void *)net);
279 			}
280 		}
281 		/*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
282 		 *********ROUTING CODE
283 		 */
284 		/*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
285 		 *********ROUTING CODE
286 		 */
287 	}
288 	if (stcb == NULL)
289 		return (0);
290 
291 	if (net) {
292 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
293 			stcb->asoc.overall_error_count++;
294 		}
295 	} else {
296 		stcb->asoc.overall_error_count++;
297 	}
298 #ifdef SCTP_DEBUG
299 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
300 		printf("Overall error count for %p now %d thresh:%u state:%x\n",
301 		    &stcb->asoc,
302 		    stcb->asoc.overall_error_count,
303 		    (uint32_t) threshold,
304 		    ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
305 	}
306 #endif				/* SCTP_DEBUG */
307 	/*
308 	 * We specifically do not do >= to give the assoc one more change
309 	 * before we fail it.
310 	 */
311 	if (stcb->asoc.overall_error_count > threshold) {
312 		/* Abort notification sends a ULP notify */
313 		struct mbuf *oper;
314 
315 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
316 		    0, M_DONTWAIT, 1, MT_DATA);
317 		if (oper) {
318 			struct sctp_paramhdr *ph;
319 			uint32_t *ippp;
320 
321 			oper->m_len = sizeof(struct sctp_paramhdr) +
322 			    sizeof(uint32_t);
323 			ph = mtod(oper, struct sctp_paramhdr *);
324 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
325 			ph->param_length = htons(oper->m_len);
326 			ippp = (uint32_t *) (ph + 1);
327 			*ippp = htonl(0x40000001);
328 		}
329 		sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
330 		return (1);
331 	}
332 	return (0);
333 }
334 
335 struct sctp_nets *
336 sctp_find_alternate_net(struct sctp_tcb *stcb,
337     struct sctp_nets *net,
338     int highest_ssthresh)
339 {
340 	/* Find and return an alternate network if possible */
341 	struct sctp_nets *alt, *mnet, *hthresh = NULL;
342 	int once;
343 	uint32_t val = 0;
344 
345 	if (stcb->asoc.numnets == 1) {
346 		/* No others but net */
347 		return (TAILQ_FIRST(&stcb->asoc.nets));
348 	}
349 	if (highest_ssthresh) {
350 		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
351 			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
352 			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
353 			    ) {
354 				/*
355 				 * will skip ones that are not-reachable or
356 				 * unconfirmed
357 				 */
358 				continue;
359 			}
360 			if (val > mnet->ssthresh) {
361 				hthresh = mnet;
362 				val = mnet->ssthresh;
363 			} else if (val == mnet->ssthresh) {
364 				uint32_t rndval;
365 				uint8_t this_random;
366 
367 				if (stcb->asoc.hb_random_idx > 3) {
368 					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
369 					memcpy(stcb->asoc.hb_random_values, &rndval,
370 					    sizeof(stcb->asoc.hb_random_values));
371 					this_random = stcb->asoc.hb_random_values[0];
372 					stcb->asoc.hb_random_idx = 0;
373 					stcb->asoc.hb_ect_randombit = 0;
374 				} else {
375 					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
376 					stcb->asoc.hb_random_idx++;
377 					stcb->asoc.hb_ect_randombit = 0;
378 				}
379 				if (this_random % 2) {
380 					hthresh = mnet;
381 					val = mnet->ssthresh;
382 				}
383 			}
384 		}
385 		if (hthresh) {
386 			return (hthresh);
387 		}
388 	}
389 	mnet = net;
390 	once = 0;
391 
392 	if (mnet == NULL) {
393 		mnet = TAILQ_FIRST(&stcb->asoc.nets);
394 	}
395 	do {
396 		alt = TAILQ_NEXT(mnet, sctp_next);
397 		if (alt == NULL) {
398 			once++;
399 			if (once > 1) {
400 				break;
401 			}
402 			alt = TAILQ_FIRST(&stcb->asoc.nets);
403 		}
404 		if (alt->ro.ro_rt == NULL) {
405 			struct sockaddr_in6 *sin6;
406 
407 			sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
408 			if (sin6->sin6_family == AF_INET6) {
409 				(void)sa6_embedscope(sin6, ip6_use_defzone);
410 			}
411 			rtalloc_ign((struct route *)&alt->ro, 0UL);
412 			if (sin6->sin6_family == AF_INET6) {
413 				(void)sa6_recoverscope(sin6);
414 			}
415 			alt->src_addr_selected = 0;
416 		}
417 		if (
418 		    ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
419 		    (alt->ro.ro_rt != NULL) &&
420 		    (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
421 		    ) {
422 			/* Found a reachable address */
423 			break;
424 		}
425 		mnet = alt;
426 	} while (alt != NULL);
427 
428 	if (alt == NULL) {
429 		/* Case where NO insv network exists (dormant state) */
430 		/* we rotate destinations */
431 		once = 0;
432 		mnet = net;
433 		do {
434 			alt = TAILQ_NEXT(mnet, sctp_next);
435 			if (alt == NULL) {
436 				once++;
437 				if (once > 1) {
438 					break;
439 				}
440 				alt = TAILQ_FIRST(&stcb->asoc.nets);
441 			}
442 			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
443 			    (alt != net)) {
444 				/* Found an alternate address */
445 				break;
446 			}
447 			mnet = alt;
448 		} while (alt != NULL);
449 	}
450 	if (alt == NULL) {
451 		return (net);
452 	}
453 	return (alt);
454 }
455 
456 static void
457 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
458     struct sctp_nets *net,
459     int win_probe,
460     int num_marked)
461 {
462 	net->RTO <<= 1;
463 	if (net->RTO > stcb->asoc.maxrto) {
464 		net->RTO = stcb->asoc.maxrto;
465 	}
466 	if ((win_probe == 0) && num_marked) {
467 		/* We don't apply penalty to window probe scenarios */
468 #ifdef SCTP_CWND_MONITOR
469 		int old_cwnd = net->cwnd;
470 
471 #endif
472 		net->ssthresh = net->cwnd >> 1;
473 		if (net->ssthresh < (net->mtu << 1)) {
474 			net->ssthresh = (net->mtu << 1);
475 		}
476 		net->cwnd = net->mtu;
477 		/* floor of 1 mtu */
478 		if (net->cwnd < net->mtu)
479 			net->cwnd = net->mtu;
480 #ifdef SCTP_CWND_MONITOR
481 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
482 #endif
483 
484 		net->partial_bytes_acked = 0;
485 	}
486 }
487 
488 extern int sctp_peer_chunk_oh;
489 
490 static int
491 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
492     struct sctp_nets *net,
493     struct sctp_nets *alt,
494     int window_probe,
495     int *num_marked)
496 {
497 
498 	/*
499 	 * Mark all chunks (well not all) that were sent to *net for
500 	 * retransmission. Move them to alt for there destination as well...
501 	 * We only mark chunks that have been outstanding long enough to
502 	 * have received feed-back.
503 	 */
504 	struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
505 	struct sctp_nets *lnets;
506 	struct timeval now, min_wait, tv;
507 	int cur_rtt;
508 	int orig_rwnd, audit_tf, num_mk, fir;
509 	unsigned int cnt_mk;
510 	uint32_t orig_flight;
511 	uint32_t tsnlast, tsnfirst;
512 
513 	/*
514 	 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
515 	 * then pick dest with largest ssthresh for any retransmission.
516 	 * (iyengar@cis.udel.edu, 2005/08/12)
517 	 */
518 	if (sctp_cmt_on_off) {
519 		alt = sctp_find_alternate_net(stcb, net, 1);
520 		/*
521 		 * CUCv2: If a different dest is picked for the
522 		 * retransmission, then new (rtx-)pseudo_cumack needs to be
523 		 * tracked for orig dest. Let CUCv2 track new (rtx-)
524 		 * pseudo-cumack always.
525 		 */
526 		net->find_pseudo_cumack = 1;
527 		net->find_rtx_pseudo_cumack = 1;
528 	}
529 	/* none in flight now */
530 	audit_tf = 0;
531 	fir = 0;
532 	/*
533 	 * figure out how long a data chunk must be pending before we can
534 	 * mark it ..
535 	 */
536 	SCTP_GETTIME_TIMEVAL(&now);
537 	/* get cur rto in micro-seconds */
538 	cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
539 	cur_rtt *= 1000;
540 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
541 	sctp_log_fr(cur_rtt,
542 	    stcb->asoc.peers_rwnd,
543 	    window_probe,
544 	    SCTP_FR_T3_MARK_TIME);
545 	sctp_log_fr(net->flight_size,
546 	    callout_pending(&net->fr_timer.timer),
547 	    callout_active(&net->fr_timer.timer),
548 	    SCTP_FR_CWND_REPORT);
549 	sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
550 #endif
551 	tv.tv_sec = cur_rtt / 1000000;
552 	tv.tv_usec = cur_rtt % 1000000;
553 	min_wait = now;
554 	timevalsub(&min_wait, &tv);
555 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
556 		/*
557 		 * if we hit here, we don't have enough seconds on the clock
558 		 * to account for the RTO. We just let the lower seconds be
559 		 * the bounds and don't worry about it. This may mean we
560 		 * will mark a lot more than we should.
561 		 */
562 		min_wait.tv_sec = min_wait.tv_usec = 0;
563 	}
564 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
565 	sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
566 	sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
567 #endif
568 	/*
569 	 * Our rwnd will be incorrect here since we are not adding back the
570 	 * cnt * mbuf but we will fix that down below.
571 	 */
572 	orig_rwnd = stcb->asoc.peers_rwnd;
573 	orig_flight = net->flight_size;
574 	net->rto_pending = 0;
575 	net->fast_retran_ip = 0;
576 	/* Now on to each chunk */
577 	num_mk = cnt_mk = 0;
578 	tsnfirst = tsnlast = 0;
579 	chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
580 	for (; chk != NULL; chk = tp2) {
581 		tp2 = TAILQ_NEXT(chk, sctp_next);
582 		if ((compare_with_wrap(stcb->asoc.last_acked_seq,
583 		    chk->rec.data.TSN_seq,
584 		    MAX_TSN)) ||
585 		    (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
586 			/* Strange case our list got out of order? */
587 			printf("Our list is out of order?\n");
588 			panic("Out of order list");
589 		}
590 		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
591 			/*
592 			 * found one to mark: If it is less than
593 			 * DATAGRAM_ACKED it MUST not be a skipped or marked
594 			 * TSN but instead one that is either already set
595 			 * for retransmission OR one that needs
596 			 * retransmission.
597 			 */
598 
599 			/* validate its been outstanding long enough */
600 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
601 			sctp_log_fr(chk->rec.data.TSN_seq,
602 			    chk->sent_rcv_time.tv_sec,
603 			    chk->sent_rcv_time.tv_usec,
604 			    SCTP_FR_T3_MARK_TIME);
605 #endif
606 			if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
607 				/*
608 				 * we have reached a chunk that was sent
609 				 * some seconds past our min.. forget it we
610 				 * will find no more to send.
611 				 */
612 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
613 				sctp_log_fr(0,
614 				    chk->sent_rcv_time.tv_sec,
615 				    chk->sent_rcv_time.tv_usec,
616 				    SCTP_FR_T3_STOPPED);
617 #endif
618 				continue;
619 			} else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
620 			    (window_probe == 0)) {
621 				/*
622 				 * we must look at the micro seconds to
623 				 * know.
624 				 */
625 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
626 					/*
627 					 * ok it was sent after our boundary
628 					 * time.
629 					 */
630 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
631 					sctp_log_fr(0,
632 					    chk->sent_rcv_time.tv_sec,
633 					    chk->sent_rcv_time.tv_usec,
634 					    SCTP_FR_T3_STOPPED);
635 #endif
636 					continue;
637 				}
638 			}
639 			if (PR_SCTP_TTL_ENABLED(chk->flags)) {
640 				/* Is it expired? */
641 				if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
642 				    ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
643 				    (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
644 					/* Yes so drop it */
645 					if (chk->data) {
646 						sctp_release_pr_sctp_chunk(stcb,
647 						    chk,
648 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
649 						    &stcb->asoc.sent_queue);
650 					}
651 				}
652 				continue;
653 			}
654 			if (PR_SCTP_RTX_ENABLED(chk->flags)) {
655 				/* Has it been retransmitted tv_sec times? */
656 				if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
657 					if (chk->data) {
658 						sctp_release_pr_sctp_chunk(stcb,
659 						    chk,
660 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
661 						    &stcb->asoc.sent_queue);
662 					}
663 				}
664 				continue;
665 			}
666 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
667 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
668 				num_mk++;
669 				if (fir == 0) {
670 					fir = 1;
671 					tsnfirst = chk->rec.data.TSN_seq;
672 				}
673 				tsnlast = chk->rec.data.TSN_seq;
674 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
675 				sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
676 				    0, SCTP_FR_T3_MARKED);
677 
678 #endif
679 			}
680 			if (stcb->asoc.total_flight_count > 0)
681 				stcb->asoc.total_flight_count--;
682 			chk->sent = SCTP_DATAGRAM_RESEND;
683 			SCTP_STAT_INCR(sctps_markedretrans);
684 			net->flight_size -= chk->book_size;
685 			stcb->asoc.peers_rwnd += chk->send_size;
686 			stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
687 
688 			/* reset the TSN for striking and other FR stuff */
689 			chk->rec.data.doing_fast_retransmit = 0;
690 			/* Clear any time so NO RTT is being done */
691 			chk->do_rtt = 0;
692 			if (alt != net) {
693 				sctp_free_remote_addr(chk->whoTo);
694 				chk->no_fr_allowed = 1;
695 				chk->whoTo = alt;
696 				atomic_add_int(&alt->ref_count, 1);
697 			} else {
698 				chk->no_fr_allowed = 0;
699 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
700 					chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
701 				} else {
702 					chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
703 				}
704 			}
705 			if (sctp_cmt_on_off == 1) {
706 				chk->no_fr_allowed = 1;
707 			}
708 		} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
709 			/* remember highest acked one */
710 			could_be_sent = chk;
711 		}
712 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
713 			cnt_mk++;
714 		}
715 	}
716 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
717 	sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
718 #endif
719 
720 	if (stcb->asoc.total_flight >= (orig_flight - net->flight_size)) {
721 		stcb->asoc.total_flight -= (orig_flight - net->flight_size);
722 	} else {
723 		stcb->asoc.total_flight = 0;
724 		stcb->asoc.total_flight_count = 0;
725 		audit_tf = 1;
726 	}
727 
728 #ifdef SCTP_DEBUG
729 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
730 		if (num_mk) {
731 			printf("LAST TSN marked was %x\n", tsnlast);
732 			printf("Num marked for retransmission was %d peer-rwd:%ld\n",
733 			    num_mk, (u_long)stcb->asoc.peers_rwnd);
734 			printf("LAST TSN marked was %x\n", tsnlast);
735 			printf("Num marked for retransmission was %d peer-rwd:%d\n",
736 			    num_mk,
737 			    (int)stcb->asoc.peers_rwnd
738 			    );
739 		}
740 	}
741 #endif
742 	*num_marked = num_mk;
743 	if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
744 		/* fix it so we retransmit the highest acked anyway */
745 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
746 		cnt_mk++;
747 		could_be_sent->sent = SCTP_DATAGRAM_RESEND;
748 	}
749 	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
750 #ifdef INVARIENTS
751 		printf("Local Audit says there are %d for retran asoc cnt:%d\n",
752 		    cnt_mk, stcb->asoc.sent_queue_retran_cnt);
753 #endif
754 #ifndef SCTP_AUDITING_ENABLED
755 		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
756 #endif
757 	}
758 	/* Now check for a ECN Echo that may be stranded */
759 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
760 		if ((chk->whoTo == net) &&
761 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
762 			sctp_free_remote_addr(chk->whoTo);
763 			chk->whoTo = alt;
764 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
765 				chk->sent = SCTP_DATAGRAM_RESEND;
766 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
767 			}
768 			atomic_add_int(&alt->ref_count, 1);
769 		}
770 	}
771 	if (audit_tf) {
772 #ifdef SCTP_DEBUG
773 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
774 			printf("Audit total flight due to negative value net:%p\n",
775 			    net);
776 		}
777 #endif				/* SCTP_DEBUG */
778 		stcb->asoc.total_flight = 0;
779 		stcb->asoc.total_flight_count = 0;
780 		/* Clear all networks flight size */
781 		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
782 			lnets->flight_size = 0;
783 #ifdef SCTP_DEBUG
784 			if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
785 				printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
786 				    lnets, lnets->cwnd, lnets->ssthresh);
787 			}
788 #endif				/* SCTP_DEBUG */
789 		}
790 		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
791 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
792 				stcb->asoc.total_flight += chk->book_size;
793 				chk->whoTo->flight_size += chk->book_size;
794 				stcb->asoc.total_flight_count++;
795 			}
796 		}
797 	}
798 	/*
799 	 * Setup the ecn nonce re-sync point. We do this since
800 	 * retranmissions are NOT setup for ECN. This means that do to
801 	 * Karn's rule, we don't know the total of the peers ecn bits.
802 	 */
803 	chk = TAILQ_FIRST(&stcb->asoc.send_queue);
804 	if (chk == NULL) {
805 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
806 	} else {
807 		stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
808 	}
809 	stcb->asoc.nonce_wait_for_ecne = 0;
810 	stcb->asoc.nonce_sum_check = 0;
811 	/* We return 1 if we only have a window probe outstanding */
812 	return (0);
813 }
814 
815 static void
816 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
817     struct sctp_nets *net,
818     struct sctp_nets *alt)
819 {
820 	struct sctp_association *asoc;
821 	struct sctp_stream_out *outs;
822 	struct sctp_tmit_chunk *chk;
823 	struct sctp_stream_queue_pending *sp;
824 
825 	if (net == alt)
826 		/* nothing to do */
827 		return;
828 
829 	asoc = &stcb->asoc;
830 
831 	/*
832 	 * now through all the streams checking for chunks sent to our bad
833 	 * network.
834 	 */
835 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
836 		/* now clean up any chunks here */
837 		TAILQ_FOREACH(sp, &outs->outqueue, next) {
838 			if (sp->net == net) {
839 				sctp_free_remote_addr(sp->net);
840 				sp->net = alt;
841 				atomic_add_int(&alt->ref_count, 1);
842 			}
843 		}
844 	}
845 	/* Now check the pending queue */
846 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
847 		if (chk->whoTo == net) {
848 			sctp_free_remote_addr(chk->whoTo);
849 			chk->whoTo = alt;
850 			atomic_add_int(&alt->ref_count, 1);
851 		}
852 	}
853 
854 }
855 
856 int
857 sctp_t3rxt_timer(struct sctp_inpcb *inp,
858     struct sctp_tcb *stcb,
859     struct sctp_nets *net)
860 {
861 	struct sctp_nets *alt;
862 	int win_probe, num_mk;
863 
864 #ifdef SCTP_FR_LOGGING
865 	sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
866 #ifdef SCTP_CWND_LOGGING
867 	{
868 		struct sctp_nets *lnet;
869 
870 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
871 			if (net == lnet) {
872 				sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
873 			} else {
874 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
875 			}
876 		}
877 	}
878 #endif
879 #endif
880 	/* Find an alternate and mark those for retransmission */
881 	if ((stcb->asoc.peers_rwnd == 0) &&
882 	    (stcb->asoc.total_flight < net->mtu)) {
883 		SCTP_STAT_INCR(sctps_timowindowprobe);
884 		win_probe = 1;
885 	} else {
886 		win_probe = 0;
887 	}
888 	alt = sctp_find_alternate_net(stcb, net, 0);
889 	sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
890 	/* FR Loss recovery just ended with the T3. */
891 	stcb->asoc.fast_retran_loss_recovery = 0;
892 
893 	/* CMT FR loss recovery ended with the T3 */
894 	net->fast_retran_loss_recovery = 0;
895 
896 	/*
897 	 * setup the sat loss recovery that prevents satellite cwnd advance.
898 	 */
899 	stcb->asoc.sat_t3_loss_recovery = 1;
900 	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
901 
902 	/* Backoff the timer and cwnd */
903 	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
904 	if (win_probe == 0) {
905 		/* We don't do normal threshold management on window probes */
906 		if (sctp_threshold_management(inp, stcb, net,
907 		    stcb->asoc.max_send_times)) {
908 			/* Association was destroyed */
909 			return (1);
910 		} else {
911 			if (net != stcb->asoc.primary_destination) {
912 				/* send a immediate HB if our RTO is stale */
913 				struct timeval now;
914 				unsigned int ms_goneby;
915 
916 				SCTP_GETTIME_TIMEVAL(&now);
917 				if (net->last_sent_time.tv_sec) {
918 					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
919 				} else {
920 					ms_goneby = 0;
921 				}
922 				if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
923 					/*
924 					 * no recent feed back in an RTO or
925 					 * more, request a RTT update
926 					 */
927 					sctp_send_hb(stcb, 1, net);
928 				}
929 			}
930 		}
931 	} else {
932 		/*
933 		 * For a window probe we don't penalize the net's but only
934 		 * the association. This may fail it if SACKs are not coming
935 		 * back. If sack's are coming with rwnd locked at 0, we will
936 		 * continue to hold things waiting for rwnd to raise
937 		 */
938 		if (sctp_threshold_management(inp, stcb, NULL,
939 		    stcb->asoc.max_send_times)) {
940 			/* Association was destroyed */
941 			return (1);
942 		}
943 	}
944 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
945 		/* Move all pending over too */
946 		sctp_move_all_chunks_to_alt(stcb, net, alt);
947 		/* Was it our primary? */
948 		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
949 			/*
950 			 * Yes, note it as such and find an alternate note:
951 			 * this means HB code must use this to resent the
952 			 * primary if it goes active AND if someone does a
953 			 * change-primary then this flag must be cleared
954 			 * from any net structures.
955 			 */
956 			if (sctp_set_primary_addr(stcb,
957 			    (struct sockaddr *)NULL,
958 			    alt) == 0) {
959 				net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
960 				net->src_addr_selected = 0;
961 			}
962 		}
963 	}
964 	/*
965 	 * Special case for cookie-echo'ed case, we don't do output but must
966 	 * await the COOKIE-ACK before retransmission
967 	 */
968 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
969 		/*
970 		 * Here we just reset the timer and start again since we
971 		 * have not established the asoc
972 		 */
973 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
974 		return (0);
975 	}
976 	if (stcb->asoc.peer_supports_prsctp) {
977 		struct sctp_tmit_chunk *lchk;
978 
979 		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
980 		/* C3. See if we need to send a Fwd-TSN */
981 		if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
982 		    stcb->asoc.last_acked_seq, MAX_TSN)) {
983 			/*
984 			 * ISSUE with ECN, see FWD-TSN processing for notes
985 			 * on issues that will occur when the ECN NONCE
986 			 * stuff is put into SCTP for cross checking.
987 			 */
988 			send_forward_tsn(stcb, &stcb->asoc);
989 			if (lchk) {
990 				/* Assure a timer is up */
991 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
992 			}
993 		}
994 	}
995 #ifdef SCTP_CWND_MONITOR
996 	sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
997 #endif
998 	return (0);
999 }
1000 
1001 int
1002 sctp_t1init_timer(struct sctp_inpcb *inp,
1003     struct sctp_tcb *stcb,
1004     struct sctp_nets *net)
1005 {
1006 	/* bump the thresholds */
1007 	if (stcb->asoc.delayed_connection) {
1008 		/*
1009 		 * special hook for delayed connection. The library did NOT
1010 		 * complete the rest of its sends.
1011 		 */
1012 		stcb->asoc.delayed_connection = 0;
1013 		sctp_send_initiate(inp, stcb);
1014 		return (0);
1015 	}
1016 	if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
1017 		return (0);
1018 	}
1019 	if (sctp_threshold_management(inp, stcb, net,
1020 	    stcb->asoc.max_init_times)) {
1021 		/* Association was destroyed */
1022 		return (1);
1023 	}
1024 	stcb->asoc.dropped_special_cnt = 0;
1025 	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1026 	if (stcb->asoc.initial_init_rto_max < net->RTO) {
1027 		net->RTO = stcb->asoc.initial_init_rto_max;
1028 	}
1029 	if (stcb->asoc.numnets > 1) {
1030 		/* If we have more than one addr use it */
1031 		struct sctp_nets *alt;
1032 
1033 		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1034 		if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1035 			sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1036 			stcb->asoc.primary_destination = alt;
1037 		}
1038 	}
1039 	/* Send out a new init */
1040 	sctp_send_initiate(inp, stcb);
1041 	return (0);
1042 }
1043 
1044 /*
1045  * For cookie and asconf we actually need to find and mark for resend, then
1046  * increment the resend counter (after all the threshold management stuff of
1047  * course).
1048  */
1049 int
1050 sctp_cookie_timer(struct sctp_inpcb *inp,
1051     struct sctp_tcb *stcb,
1052     struct sctp_nets *net)
1053 {
1054 	struct sctp_nets *alt;
1055 	struct sctp_tmit_chunk *cookie;
1056 
1057 	/* first before all else we must find the cookie */
1058 	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1059 		if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1060 			break;
1061 		}
1062 	}
1063 	if (cookie == NULL) {
1064 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1065 			/* FOOBAR! */
1066 			struct mbuf *oper;
1067 
1068 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1069 			    0, M_DONTWAIT, 1, MT_DATA);
1070 			if (oper) {
1071 				struct sctp_paramhdr *ph;
1072 				uint32_t *ippp;
1073 
1074 				oper->m_len = sizeof(struct sctp_paramhdr) +
1075 				    sizeof(uint32_t);
1076 				ph = mtod(oper, struct sctp_paramhdr *);
1077 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1078 				ph->param_length = htons(oper->m_len);
1079 				ippp = (uint32_t *) (ph + 1);
1080 				*ippp = htonl(0x40000002);
1081 			}
1082 			sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1083 			    oper);
1084 		} else {
1085 #ifdef INVARIENTS
1086 			panic("Cookie timer expires in wrong state?");
1087 #else
1088 			printf("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1089 			return (0);
1090 #endif
1091 		}
1092 		return (0);
1093 	}
1094 	/* Ok we found the cookie, threshold management next */
1095 	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1096 	    stcb->asoc.max_init_times)) {
1097 		/* Assoc is over */
1098 		return (1);
1099 	}
1100 	/*
1101 	 * cleared theshold management now lets backoff the address & select
1102 	 * an alternate
1103 	 */
1104 	stcb->asoc.dropped_special_cnt = 0;
1105 	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1106 	alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1107 	if (alt != cookie->whoTo) {
1108 		sctp_free_remote_addr(cookie->whoTo);
1109 		cookie->whoTo = alt;
1110 		atomic_add_int(&alt->ref_count, 1);
1111 	}
1112 	/* Now mark the retran info */
1113 	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1114 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1115 	}
1116 	cookie->sent = SCTP_DATAGRAM_RESEND;
1117 	/*
1118 	 * Now call the output routine to kick out the cookie again, Note we
1119 	 * don't mark any chunks for retran so that FR will need to kick in
1120 	 * to move these (or a send timer).
1121 	 */
1122 	return (0);
1123 }
1124 
1125 int
1126 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1127     struct sctp_nets *net)
1128 {
1129 	struct sctp_nets *alt;
1130 	struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1131 
1132 	if (stcb->asoc.stream_reset_outstanding == 0) {
1133 		return (0);
1134 	}
1135 	/* find the existing STRRESET, we use the seq number we sent out on */
1136 	sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1137 	if (strrst == NULL) {
1138 		return (0);
1139 	}
1140 	/* do threshold management */
1141 	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1142 	    stcb->asoc.max_send_times)) {
1143 		/* Assoc is over */
1144 		return (1);
1145 	}
1146 	/*
1147 	 * cleared theshold management now lets backoff the address & select
1148 	 * an alternate
1149 	 */
1150 	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1151 	alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1152 	sctp_free_remote_addr(strrst->whoTo);
1153 	strrst->whoTo = alt;
1154 	atomic_add_int(&alt->ref_count, 1);
1155 
1156 	/* See if a ECN Echo is also stranded */
1157 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1158 		if ((chk->whoTo == net) &&
1159 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1160 			sctp_free_remote_addr(chk->whoTo);
1161 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
1162 				chk->sent = SCTP_DATAGRAM_RESEND;
1163 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1164 			}
1165 			chk->whoTo = alt;
1166 			atomic_add_int(&alt->ref_count, 1);
1167 		}
1168 	}
1169 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1170 		/*
1171 		 * If the address went un-reachable, we need to move to
1172 		 * alternates for ALL chk's in queue
1173 		 */
1174 		sctp_move_all_chunks_to_alt(stcb, net, alt);
1175 	}
1176 	/* mark the retran info */
1177 	if (strrst->sent != SCTP_DATAGRAM_RESEND)
1178 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1179 	strrst->sent = SCTP_DATAGRAM_RESEND;
1180 
1181 	/* restart the timer */
1182 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1183 	return (0);
1184 }
1185 
1186 int
1187 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1188     struct sctp_nets *net)
1189 {
1190 	struct sctp_nets *alt;
1191 	struct sctp_tmit_chunk *asconf, *chk;
1192 
1193 	/* is this the first send, or a retransmission? */
1194 	if (stcb->asoc.asconf_sent == 0) {
1195 		/* compose a new ASCONF chunk and send it */
1196 		sctp_send_asconf(stcb, net);
1197 	} else {
1198 		/* Retransmission of the existing ASCONF needed... */
1199 
1200 		/* find the existing ASCONF */
1201 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1202 		    sctp_next) {
1203 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
1204 				break;
1205 			}
1206 		}
1207 		if (asconf == NULL) {
1208 			return (0);
1209 		}
1210 		/* do threshold management */
1211 		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1212 		    stcb->asoc.max_send_times)) {
1213 			/* Assoc is over */
1214 			return (1);
1215 		}
1216 		/*
1217 		 * PETER? FIX? How will the following code ever run? If the
1218 		 * max_send_times is hit, threshold managment will blow away
1219 		 * the association?
1220 		 */
1221 		if (asconf->snd_count > stcb->asoc.max_send_times) {
1222 			/*
1223 			 * Something is rotten, peer is not responding to
1224 			 * ASCONFs but maybe is to data etc.  e.g. it is not
1225 			 * properly handling the chunk type upper bits Mark
1226 			 * this peer as ASCONF incapable and cleanup
1227 			 */
1228 #ifdef SCTP_DEBUG
1229 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1230 				printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1231 			}
1232 #endif				/* SCTP_DEBUG */
1233 			sctp_asconf_cleanup(stcb, net);
1234 			return (0);
1235 		}
1236 		/*
1237 		 * cleared theshold management now lets backoff the address
1238 		 * & select an alternate
1239 		 */
1240 		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1241 		alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1242 		sctp_free_remote_addr(asconf->whoTo);
1243 		asconf->whoTo = alt;
1244 		atomic_add_int(&alt->ref_count, 1);
1245 
1246 		/* See if a ECN Echo is also stranded */
1247 		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1248 			if ((chk->whoTo == net) &&
1249 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1250 				sctp_free_remote_addr(chk->whoTo);
1251 				chk->whoTo = alt;
1252 				if (chk->sent != SCTP_DATAGRAM_RESEND) {
1253 					chk->sent = SCTP_DATAGRAM_RESEND;
1254 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1255 				}
1256 				atomic_add_int(&alt->ref_count, 1);
1257 			}
1258 		}
1259 		if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1260 			/*
1261 			 * If the address went un-reachable, we need to move
1262 			 * to alternates for ALL chk's in queue
1263 			 */
1264 			sctp_move_all_chunks_to_alt(stcb, net, alt);
1265 		}
1266 		/* mark the retran info */
1267 		if (asconf->sent != SCTP_DATAGRAM_RESEND)
1268 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1269 		asconf->sent = SCTP_DATAGRAM_RESEND;
1270 	}
1271 	return (0);
1272 }
1273 
1274 /*
1275  * For the shutdown and shutdown-ack, we do not keep one around on the
1276  * control queue. This means we must generate a new one and call the general
1277  * chunk output routine, AFTER having done threshold management.
1278  */
1279 int
1280 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1281     struct sctp_nets *net)
1282 {
1283 	struct sctp_nets *alt;
1284 
1285 	/* first threshold managment */
1286 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1287 		/* Assoc is over */
1288 		return (1);
1289 	}
1290 	/* second select an alternative */
1291 	alt = sctp_find_alternate_net(stcb, net, 0);
1292 
1293 	/* third generate a shutdown into the queue for out net */
1294 	if (alt) {
1295 		sctp_send_shutdown(stcb, alt);
1296 	} else {
1297 		/*
1298 		 * if alt is NULL, there is no dest to send to??
1299 		 */
1300 		return (0);
1301 	}
1302 	/* fourth restart timer */
1303 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1304 	return (0);
1305 }
1306 
1307 int
1308 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1309     struct sctp_nets *net)
1310 {
1311 	struct sctp_nets *alt;
1312 
1313 	/* first threshold managment */
1314 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1315 		/* Assoc is over */
1316 		return (1);
1317 	}
1318 	/* second select an alternative */
1319 	alt = sctp_find_alternate_net(stcb, net, 0);
1320 
1321 	/* third generate a shutdown into the queue for out net */
1322 	sctp_send_shutdown_ack(stcb, alt);
1323 
1324 	/* fourth restart timer */
1325 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1326 	return (0);
1327 }
1328 
1329 static void
1330 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1331     struct sctp_tcb *stcb)
1332 {
1333 	struct sctp_stream_out *outs;
1334 	struct sctp_stream_queue_pending *sp;
1335 	unsigned int chks_in_queue = 0;
1336 	int being_filled = 0;
1337 
1338 	/*
1339 	 * This function is ONLY called when the send/sent queues are empty.
1340 	 */
1341 	if ((stcb == NULL) || (inp == NULL))
1342 		return;
1343 
1344 	if (stcb->asoc.sent_queue_retran_cnt) {
1345 		printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1346 		    stcb->asoc.sent_queue_retran_cnt);
1347 		stcb->asoc.sent_queue_retran_cnt = 0;
1348 	}
1349 	SCTP_TCB_SEND_LOCK(stcb);
1350 	if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1351 		int i, cnt = 0;
1352 
1353 		/* Check to see if a spoke fell off the wheel */
1354 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1355 			if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1356 				sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1357 				cnt++;
1358 			}
1359 		}
1360 		if (cnt) {
1361 			/* yep, we lost a spoke or two */
1362 			printf("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1363 		} else {
1364 			/* no spokes lost, */
1365 			stcb->asoc.total_output_queue_size = 0;
1366 		}
1367 		SCTP_TCB_SEND_UNLOCK(stcb);
1368 		return;
1369 	}
1370 	SCTP_TCB_SEND_UNLOCK(stcb);
1371 	/* Check to see if some data queued, if so report it */
1372 	TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1373 		if (!TAILQ_EMPTY(&outs->outqueue)) {
1374 			TAILQ_FOREACH(sp, &outs->outqueue, next) {
1375 				if (sp->msg_is_complete)
1376 					being_filled++;
1377 				chks_in_queue++;
1378 			}
1379 		}
1380 	}
1381 	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1382 		printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1383 		    stcb->asoc.stream_queue_cnt, chks_in_queue);
1384 	}
1385 	if (chks_in_queue) {
1386 		/* call the output queue function */
1387 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1388 		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1389 		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1390 			/*
1391 			 * Probably should go in and make it go back through
1392 			 * and add fragments allowed
1393 			 */
1394 			if (being_filled == 0) {
1395 				printf("Still nothing moved %d chunks are stuck\n",
1396 				    chks_in_queue);
1397 			}
1398 		}
1399 	} else {
1400 		printf("Found no chunks on any queue tot:%lu\n",
1401 		    (u_long)stcb->asoc.total_output_queue_size);
1402 		stcb->asoc.total_output_queue_size = 0;
1403 	}
1404 }
1405 
1406 int
1407 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1408     struct sctp_nets *net, int cnt_of_unconf)
1409 {
1410 	if (net) {
1411 		if (net->hb_responded == 0) {
1412 			sctp_backoff_on_timeout(stcb, net, 1, 0);
1413 		}
1414 		/* Zero PBA, if it needs it */
1415 		if (net->partial_bytes_acked) {
1416 			net->partial_bytes_acked = 0;
1417 		}
1418 	}
1419 	if ((stcb->asoc.total_output_queue_size > 0) &&
1420 	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1421 	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1422 		sctp_audit_stream_queues_for_size(inp, stcb);
1423 	}
1424 	/* Send a new HB, this will do threshold managment, pick a new dest */
1425 	if (cnt_of_unconf == 0) {
1426 		if (sctp_send_hb(stcb, 0, NULL) < 0) {
1427 			return (1);
1428 		}
1429 	} else {
1430 		/*
1431 		 * this will send out extra hb's up to maxburst if there are
1432 		 * any unconfirmed addresses.
1433 		 */
1434 		int cnt_sent = 0;
1435 
1436 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1437 			if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1438 			    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1439 				cnt_sent++;
1440 				if (sctp_send_hb(stcb, 1, net) == 0) {
1441 					break;
1442 				}
1443 				if (cnt_sent >= stcb->asoc.max_burst)
1444 					break;
1445 			}
1446 		}
1447 	}
1448 	return (0);
1449 }
1450 
1451 int
1452 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1453 {
1454 	if (callout_pending(&stcb->asoc.hb_timer.timer)) {
1455 		/* its running */
1456 		return (1);
1457 	} else {
1458 		/* nope */
1459 		return (0);
1460 	}
1461 }
1462 
1463 int
1464 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1465 {
1466 	if (callout_pending(&stcb->asoc.dack_timer.timer)) {
1467 		/* its running */
1468 		return (1);
1469 	} else {
1470 		/* nope */
1471 		return (0);
1472 	}
1473 }
1474 
1475 
1476 #define SCTP_NUMBER_OF_MTU_SIZES 18
1477 static uint32_t mtu_sizes[] = {
1478 	68,
1479 	296,
1480 	508,
1481 	512,
1482 	544,
1483 	576,
1484 	1006,
1485 	1492,
1486 	1500,
1487 	1536,
1488 	2002,
1489 	2048,
1490 	4352,
1491 	4464,
1492 	8166,
1493 	17914,
1494 	32000,
1495 	65535
1496 };
1497 
1498 
1499 static uint32_t
1500 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1501 {
1502 	/* select another MTU that is just bigger than this one */
1503 	int i;
1504 
1505 	for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1506 		if (cur_mtu < mtu_sizes[i]) {
1507 			/* no max_mtu is bigger than this one */
1508 			return (mtu_sizes[i]);
1509 		}
1510 	}
1511 	/* here return the highest allowable */
1512 	return (cur_mtu);
1513 }
1514 
1515 
1516 void
1517 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1518     struct sctp_tcb *stcb,
1519     struct sctp_nets *net)
1520 {
1521 	uint32_t next_mtu;
1522 
1523 	/* restart the timer in any case */
1524 	next_mtu = sctp_getnext_mtu(inp, net->mtu);
1525 	if (next_mtu <= net->mtu) {
1526 		/* nothing to do */
1527 		return;
1528 	}
1529 	if (net->ro.ro_rt != NULL) {
1530 		/*
1531 		 * only if we have a route and interface do we set anything.
1532 		 * Note we always restart the timer though just in case it
1533 		 * is updated (i.e. the ifp) or route/ifp is populated.
1534 		 */
1535 		if (net->ro.ro_rt->rt_ifp != NULL) {
1536 			if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1537 				/* ok it will fit out the door */
1538 				net->mtu = next_mtu;
1539 			}
1540 		}
1541 	}
1542 	/* restart the timer */
1543 	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1544 }
1545 
1546 void
1547 sctp_autoclose_timer(struct sctp_inpcb *inp,
1548     struct sctp_tcb *stcb,
1549     struct sctp_nets *net)
1550 {
1551 	struct timeval tn, *tim_touse;
1552 	struct sctp_association *asoc;
1553 	int ticks_gone_by;
1554 
1555 	SCTP_GETTIME_TIMEVAL(&tn);
1556 	if (stcb->asoc.sctp_autoclose_ticks &&
1557 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1558 		/* Auto close is on */
1559 		asoc = &stcb->asoc;
1560 		/* pick the time to use */
1561 		if (asoc->time_last_rcvd.tv_sec >
1562 		    asoc->time_last_sent.tv_sec) {
1563 			tim_touse = &asoc->time_last_rcvd;
1564 		} else {
1565 			tim_touse = &asoc->time_last_sent;
1566 		}
1567 		/* Now has long enough transpired to autoclose? */
1568 		ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1569 		if ((ticks_gone_by > 0) &&
1570 		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1571 			/*
1572 			 * autoclose time has hit, call the output routine,
1573 			 * which should do nothing just to be SURE we don't
1574 			 * have hanging data. We can then safely check the
1575 			 * queues and know that we are clear to send
1576 			 * shutdown
1577 			 */
1578 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1579 			/* Are we clean? */
1580 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1581 			    TAILQ_EMPTY(&asoc->sent_queue)) {
1582 				/*
1583 				 * there is nothing queued to send, so I'm
1584 				 * done...
1585 				 */
1586 				if (SCTP_GET_STATE(asoc) !=
1587 				    SCTP_STATE_SHUTDOWN_SENT) {
1588 					/* only send SHUTDOWN 1st time thru */
1589 					sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1590 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1591 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1592 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1593 					    stcb->sctp_ep, stcb,
1594 					    asoc->primary_destination);
1595 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1596 					    stcb->sctp_ep, stcb,
1597 					    asoc->primary_destination);
1598 				}
1599 			}
1600 		} else {
1601 			/*
1602 			 * No auto close at this time, reset t-o to check
1603 			 * later
1604 			 */
1605 			int tmp;
1606 
1607 			/* fool the timer startup to use the time left */
1608 			tmp = asoc->sctp_autoclose_ticks;
1609 			asoc->sctp_autoclose_ticks -= ticks_gone_by;
1610 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1611 			    net);
1612 			/* restore the real tick value */
1613 			asoc->sctp_autoclose_ticks = tmp;
1614 		}
1615 	}
1616 }
1617 
1618 
1619 void
1620 sctp_iterator_timer(struct sctp_iterator *it)
1621 {
1622 	int iteration_count = 0;
1623 
1624 	/*
1625 	 * only one iterator can run at a time. This is the only way we can
1626 	 * cleanly pull ep's from underneath all the running interators when
1627 	 * a ep is freed.
1628 	 */
1629 	SCTP_ITERATOR_LOCK();
1630 	if (it->inp == NULL) {
1631 		/* iterator is complete */
1632 done_with_iterator:
1633 		SCTP_ITERATOR_UNLOCK();
1634 		SCTP_INP_INFO_WLOCK();
1635 		LIST_REMOVE(it, sctp_nxt_itr);
1636 		/* stopping the callout is not needed, in theory */
1637 		SCTP_INP_INFO_WUNLOCK();
1638 		callout_stop(&it->tmr.timer);
1639 		if (it->function_atend != NULL) {
1640 			(*it->function_atend) (it->pointer, it->val);
1641 		}
1642 		SCTP_FREE(it);
1643 		return;
1644 	}
1645 select_a_new_ep:
1646 	SCTP_INP_WLOCK(it->inp);
1647 	while (((it->pcb_flags) &&
1648 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1649 	    ((it->pcb_features) &&
1650 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1651 		/* endpoint flags or features don't match, so keep looking */
1652 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1653 			SCTP_INP_WUNLOCK(it->inp);
1654 			goto done_with_iterator;
1655 		}
1656 		SCTP_INP_WUNLOCK(it->inp);
1657 		it->inp = LIST_NEXT(it->inp, sctp_list);
1658 		if (it->inp == NULL) {
1659 			goto done_with_iterator;
1660 		}
1661 		SCTP_INP_WLOCK(it->inp);
1662 	}
1663 	if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1664 	    (it->inp->inp_starting_point_for_iterator != it)) {
1665 		printf("Iterator collision, waiting for one at %p\n",
1666 		    it->inp);
1667 		SCTP_INP_WUNLOCK(it->inp);
1668 		goto start_timer_return;
1669 	}
1670 	/* mark the current iterator on the endpoint */
1671 	it->inp->inp_starting_point_for_iterator = it;
1672 	SCTP_INP_WUNLOCK(it->inp);
1673 	SCTP_INP_RLOCK(it->inp);
1674 	/* now go through each assoc which is in the desired state */
1675 	if (it->stcb == NULL) {
1676 		/* run the per instance function */
1677 		if (it->function_inp != NULL)
1678 			(*it->function_inp) (it->inp, it->pointer, it->val);
1679 
1680 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1681 	}
1682 	SCTP_INP_RUNLOCK(it->inp);
1683 	if ((it->stcb) &&
1684 	    (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1685 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1686 	}
1687 	while (it->stcb) {
1688 		SCTP_TCB_LOCK(it->stcb);
1689 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1690 			/* not in the right state... keep looking */
1691 			SCTP_TCB_UNLOCK(it->stcb);
1692 			goto next_assoc;
1693 		}
1694 		/* mark the current iterator on the assoc */
1695 		it->stcb->asoc.stcb_starting_point_for_iterator = it;
1696 		/* see if we have limited out the iterator loop */
1697 		iteration_count++;
1698 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1699 	start_timer_return:
1700 			/* set a timer to continue this later */
1701 			SCTP_TCB_UNLOCK(it->stcb);
1702 			sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1703 			    (struct sctp_inpcb *)it, NULL, NULL);
1704 			SCTP_ITERATOR_UNLOCK();
1705 			return;
1706 		}
1707 		/* run function on this one */
1708 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1709 
1710 		/*
1711 		 * we lie here, it really needs to have its own type but
1712 		 * first I must verify that this won't effect things :-0
1713 		 */
1714 		if (it->no_chunk_output == 0)
1715 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1716 
1717 		SCTP_TCB_UNLOCK(it->stcb);
1718 next_assoc:
1719 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1720 	}
1721 	/* done with all assocs on this endpoint, move on to next endpoint */
1722 	SCTP_INP_WLOCK(it->inp);
1723 	it->inp->inp_starting_point_for_iterator = NULL;
1724 	SCTP_INP_WUNLOCK(it->inp);
1725 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1726 		it->inp = NULL;
1727 	} else {
1728 		SCTP_INP_INFO_RLOCK();
1729 		it->inp = LIST_NEXT(it->inp, sctp_list);
1730 		SCTP_INP_INFO_RUNLOCK();
1731 	}
1732 	if (it->inp == NULL) {
1733 		goto done_with_iterator;
1734 	}
1735 	goto select_a_new_ep;
1736 }
1737