xref: /freebsd/sys/netinet/sctp_timer.c (revision f856af0466c076beef4ea9b15d088e1119a945b8)
1 /*-
2  * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ipsec.h"
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
39 #include "opt_inet.h"
40 #include "opt_sctp.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
53 #ifdef INET6
54 #include <sys/domain.h>
55 #endif
56 
57 #include <sys/limits.h>
58 
59 #include <net/if.h>
60 #include <net/if_types.h>
61 #include <net/route.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #define _IP_VHL
65 #include <netinet/ip.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip_var.h>
69 
70 #ifdef INET6
71 #include <netinet/ip6.h>
72 #include <netinet6/ip6_var.h>
73 #include <netinet6/scope6_var.h>
74 #endif				/* INET6 */
75 
76 #include <netinet/sctp_pcb.h>
77 
78 #ifdef IPSEC
79 #include <netinet6/ipsec.h>
80 #include <netkey/key.h>
81 #endif				/* IPSEC */
82 #ifdef INET6
83 #include <netinet6/sctp6_var.h>
84 #endif
85 #include <netinet/sctp_os.h>
86 #include <netinet/sctp_var.h>
87 #include <netinet/sctp_timer.h>
88 #include <netinet/sctputil.h>
89 #include <netinet/sctp_output.h>
90 #include <netinet/sctp_header.h>
91 #include <netinet/sctp_indata.h>
92 #include <netinet/sctp_asconf.h>
93 #include <netinet/sctp_input.h>
94 
95 #include <netinet/sctp.h>
96 #include <netinet/sctp_uio.h>
97 
98 
99 #ifdef SCTP_DEBUG
100 extern uint32_t sctp_debug_on;
101 
102 #endif				/* SCTP_DEBUG */
103 
104 
105 extern unsigned int sctp_early_fr_msec;
106 
107 void
108 sctp_early_fr_timer(struct sctp_inpcb *inp,
109     struct sctp_tcb *stcb,
110     struct sctp_nets *net)
111 {
112 	struct sctp_tmit_chunk *chk, *tp2;
113 	struct timeval now, min_wait, tv;
114 	unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
115 
116 	/* an early FR is occuring. */
117 	SCTP_GETTIME_TIMEVAL(&now);
118 	/* get cur rto in micro-seconds */
119 	if (net->lastsa == 0) {
120 		/* Hmm no rtt estimate yet? */
121 		cur_rtt = stcb->asoc.initial_rto >> 2;
122 	} else {
123 
124 		cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
125 	}
126 	if (cur_rtt < sctp_early_fr_msec) {
127 		cur_rtt = sctp_early_fr_msec;
128 	}
129 	cur_rtt *= 1000;
130 	tv.tv_sec = cur_rtt / 1000000;
131 	tv.tv_usec = cur_rtt % 1000000;
132 	min_wait = now;
133 	timevalsub(&min_wait, &tv);
134 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
135 		/*
136 		 * if we hit here, we don't have enough seconds on the clock
137 		 * to account for the RTO. We just let the lower seconds be
138 		 * the bounds and don't worry about it. This may mean we
139 		 * will mark a lot more than we should.
140 		 */
141 		min_wait.tv_sec = min_wait.tv_usec = 0;
142 	}
143 	chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
144 	for (; chk != NULL; chk = tp2) {
145 		tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
146 		if (chk->whoTo != net) {
147 			continue;
148 		}
149 		if (chk->sent == SCTP_DATAGRAM_RESEND)
150 			cnt_resend++;
151 		else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
152 		    (chk->sent < SCTP_DATAGRAM_RESEND)) {
153 			/* pending, may need retran */
154 			if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
155 				/*
156 				 * we have reached a chunk that was sent
157 				 * some seconds past our min.. forget it we
158 				 * will find no more to send.
159 				 */
160 				continue;
161 			} else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
162 				/*
163 				 * we must look at the micro seconds to
164 				 * know.
165 				 */
166 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
167 					/*
168 					 * ok it was sent after our boundary
169 					 * time.
170 					 */
171 					continue;
172 				}
173 			}
174 #ifdef SCTP_EARLYFR_LOGGING
175 			sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
176 			    4, SCTP_FR_MARKED_EARLY);
177 #endif
178 			SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
179 			chk->sent = SCTP_DATAGRAM_RESEND;
180 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
181 			/* double book size since we are doing an early FR */
182 			chk->book_size_scale++;
183 			cnt += chk->send_size;
184 			if ((cnt + net->flight_size) > net->cwnd) {
185 				/* Mark all we could possibly resend */
186 				break;
187 			}
188 		}
189 	}
190 	if (cnt) {
191 #ifdef SCTP_CWND_MONITOR
192 		int old_cwnd;
193 
194 		old_cwnd = net->cwnd;
195 #endif
196 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
197 		/*
198 		 * make a small adjustment to cwnd and force to CA.
199 		 */
200 
201 		if (net->cwnd > net->mtu)
202 			/* drop down one MTU after sending */
203 			net->cwnd -= net->mtu;
204 		if (net->cwnd < net->ssthresh)
205 			/* still in SS move to CA */
206 			net->ssthresh = net->cwnd - 1;
207 #ifdef SCTP_CWND_MONITOR
208 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
209 #endif
210 	} else if (cnt_resend) {
211 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
212 	}
213 	/* Restart it? */
214 	if (net->flight_size < net->cwnd) {
215 		SCTP_STAT_INCR(sctps_earlyfrstrtmr);
216 		sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
217 	}
218 }
219 
220 void
221 sctp_audit_retranmission_queue(struct sctp_association *asoc)
222 {
223 	struct sctp_tmit_chunk *chk;
224 
225 #ifdef SCTP_DEBUG
226 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
227 		printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
228 		    asoc->sent_queue_retran_cnt,
229 		    asoc->sent_queue_cnt);
230 	}
231 #endif				/* SCTP_DEBUG */
232 	asoc->sent_queue_retran_cnt = 0;
233 	asoc->sent_queue_cnt = 0;
234 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
235 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
236 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
237 		}
238 		asoc->sent_queue_cnt++;
239 	}
240 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
241 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
242 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
243 		}
244 	}
245 #ifdef SCTP_DEBUG
246 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
247 		printf("Audit completes retran:%d onqueue:%d\n",
248 		    asoc->sent_queue_retran_cnt,
249 		    asoc->sent_queue_cnt);
250 	}
251 #endif				/* SCTP_DEBUG */
252 }
253 
254 int
255 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
256     struct sctp_nets *net, uint16_t threshold)
257 {
258 	if (net) {
259 		net->error_count++;
260 #ifdef SCTP_DEBUG
261 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
262 			printf("Error count for %p now %d thresh:%d\n",
263 			    net, net->error_count,
264 			    net->failure_threshold);
265 		}
266 #endif				/* SCTP_DEBUG */
267 		if (net->error_count > net->failure_threshold) {
268 			/* We had a threshold failure */
269 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
270 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
271 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
272 				if (net == stcb->asoc.primary_destination) {
273 					net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
274 				}
275 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
276 				    stcb,
277 				    SCTP_FAILED_THRESHOLD,
278 				    (void *)net);
279 			}
280 		}
281 		/*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
282 		 *********ROUTING CODE
283 		 */
284 		/*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
285 		 *********ROUTING CODE
286 		 */
287 	}
288 	if (stcb == NULL)
289 		return (0);
290 
291 	if (net) {
292 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
293 			stcb->asoc.overall_error_count++;
294 		}
295 	} else {
296 		stcb->asoc.overall_error_count++;
297 	}
298 #ifdef SCTP_DEBUG
299 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
300 		printf("Overall error count for %p now %d thresh:%u state:%x\n",
301 		    &stcb->asoc,
302 		    stcb->asoc.overall_error_count,
303 		    (uint32_t) threshold,
304 		    ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
305 	}
306 #endif				/* SCTP_DEBUG */
307 	/*
308 	 * We specifically do not do >= to give the assoc one more change
309 	 * before we fail it.
310 	 */
311 	if (stcb->asoc.overall_error_count > threshold) {
312 		/* Abort notification sends a ULP notify */
313 		struct mbuf *oper;
314 
315 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
316 		    0, M_DONTWAIT, 1, MT_DATA);
317 		if (oper) {
318 			struct sctp_paramhdr *ph;
319 			uint32_t *ippp;
320 
321 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
322 			    sizeof(uint32_t);
323 			ph = mtod(oper, struct sctp_paramhdr *);
324 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
325 			ph->param_length = htons(SCTP_BUF_LEN(oper));
326 			ippp = (uint32_t *) (ph + 1);
327 			*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
328 		}
329 		inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
330 		sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
331 		return (1);
332 	}
333 	return (0);
334 }
335 
336 struct sctp_nets *
337 sctp_find_alternate_net(struct sctp_tcb *stcb,
338     struct sctp_nets *net,
339     int highest_ssthresh)
340 {
341 	/* Find and return an alternate network if possible */
342 	struct sctp_nets *alt, *mnet, *hthresh = NULL;
343 	int once;
344 	uint32_t val = 0;
345 
346 	if (stcb->asoc.numnets == 1) {
347 		/* No others but net */
348 		return (TAILQ_FIRST(&stcb->asoc.nets));
349 	}
350 	if (highest_ssthresh) {
351 		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
352 			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
353 			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
354 			    ) {
355 				/*
356 				 * will skip ones that are not-reachable or
357 				 * unconfirmed
358 				 */
359 				continue;
360 			}
361 			if (val > mnet->ssthresh) {
362 				hthresh = mnet;
363 				val = mnet->ssthresh;
364 			} else if (val == mnet->ssthresh) {
365 				uint32_t rndval;
366 				uint8_t this_random;
367 
368 				if (stcb->asoc.hb_random_idx > 3) {
369 					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
370 					memcpy(stcb->asoc.hb_random_values, &rndval,
371 					    sizeof(stcb->asoc.hb_random_values));
372 					this_random = stcb->asoc.hb_random_values[0];
373 					stcb->asoc.hb_random_idx = 0;
374 					stcb->asoc.hb_ect_randombit = 0;
375 				} else {
376 					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
377 					stcb->asoc.hb_random_idx++;
378 					stcb->asoc.hb_ect_randombit = 0;
379 				}
380 				if (this_random % 2) {
381 					hthresh = mnet;
382 					val = mnet->ssthresh;
383 				}
384 			}
385 		}
386 		if (hthresh) {
387 			return (hthresh);
388 		}
389 	}
390 	mnet = net;
391 	once = 0;
392 
393 	if (mnet == NULL) {
394 		mnet = TAILQ_FIRST(&stcb->asoc.nets);
395 	}
396 	do {
397 		alt = TAILQ_NEXT(mnet, sctp_next);
398 		if (alt == NULL) {
399 			once++;
400 			if (once > 1) {
401 				break;
402 			}
403 			alt = TAILQ_FIRST(&stcb->asoc.nets);
404 		}
405 		if (alt->ro.ro_rt == NULL) {
406 			struct sockaddr_in6 *sin6;
407 
408 			sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
409 			if (sin6->sin6_family == AF_INET6) {
410 				(void)sa6_embedscope(sin6, ip6_use_defzone);
411 			}
412 			rtalloc_ign((struct route *)&alt->ro, 0UL);
413 			if (sin6->sin6_family == AF_INET6) {
414 				(void)sa6_recoverscope(sin6);
415 			}
416 			alt->src_addr_selected = 0;
417 		}
418 		if (
419 		    ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
420 		    (alt->ro.ro_rt != NULL) &&
421 		    (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
422 		    ) {
423 			/* Found a reachable address */
424 			break;
425 		}
426 		mnet = alt;
427 	} while (alt != NULL);
428 
429 	if (alt == NULL) {
430 		/* Case where NO insv network exists (dormant state) */
431 		/* we rotate destinations */
432 		once = 0;
433 		mnet = net;
434 		do {
435 			alt = TAILQ_NEXT(mnet, sctp_next);
436 			if (alt == NULL) {
437 				once++;
438 				if (once > 1) {
439 					break;
440 				}
441 				alt = TAILQ_FIRST(&stcb->asoc.nets);
442 			}
443 			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
444 			    (alt != net)) {
445 				/* Found an alternate address */
446 				break;
447 			}
448 			mnet = alt;
449 		} while (alt != NULL);
450 	}
451 	if (alt == NULL) {
452 		return (net);
453 	}
454 	return (alt);
455 }
456 
457 static void
458 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
459     struct sctp_nets *net,
460     int win_probe,
461     int num_marked)
462 {
463 	net->RTO <<= 1;
464 	if (net->RTO > stcb->asoc.maxrto) {
465 		net->RTO = stcb->asoc.maxrto;
466 	}
467 	if ((win_probe == 0) && num_marked) {
468 		/* We don't apply penalty to window probe scenarios */
469 #ifdef SCTP_CWND_MONITOR
470 		int old_cwnd = net->cwnd;
471 
472 #endif
473 		net->ssthresh = net->cwnd >> 1;
474 		if (net->ssthresh < (net->mtu << 1)) {
475 			net->ssthresh = (net->mtu << 1);
476 		}
477 		net->cwnd = net->mtu;
478 		/* floor of 1 mtu */
479 		if (net->cwnd < net->mtu)
480 			net->cwnd = net->mtu;
481 #ifdef SCTP_CWND_MONITOR
482 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
483 #endif
484 
485 		net->partial_bytes_acked = 0;
486 	}
487 }
488 
489 extern int sctp_peer_chunk_oh;
490 
491 static int
492 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
493     struct sctp_nets *net,
494     struct sctp_nets *alt,
495     int window_probe,
496     int *num_marked)
497 {
498 
499 	/*
500 	 * Mark all chunks (well not all) that were sent to *net for
501 	 * retransmission. Move them to alt for there destination as well...
502 	 * We only mark chunks that have been outstanding long enough to
503 	 * have received feed-back.
504 	 */
505 	struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
506 	struct sctp_nets *lnets;
507 	struct timeval now, min_wait, tv;
508 	int cur_rtt;
509 	int orig_rwnd, audit_tf, num_mk, fir;
510 	unsigned int cnt_mk;
511 	uint32_t orig_flight;
512 	uint32_t tsnlast, tsnfirst;
513 
514 	/*
515 	 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
516 	 * then pick dest with largest ssthresh for any retransmission.
517 	 * (iyengar@cis.udel.edu, 2005/08/12)
518 	 */
519 	if (sctp_cmt_on_off) {
520 		alt = sctp_find_alternate_net(stcb, net, 1);
521 		/*
522 		 * CUCv2: If a different dest is picked for the
523 		 * retransmission, then new (rtx-)pseudo_cumack needs to be
524 		 * tracked for orig dest. Let CUCv2 track new (rtx-)
525 		 * pseudo-cumack always.
526 		 */
527 		net->find_pseudo_cumack = 1;
528 		net->find_rtx_pseudo_cumack = 1;
529 	}
530 	/* none in flight now */
531 	audit_tf = 0;
532 	fir = 0;
533 	/*
534 	 * figure out how long a data chunk must be pending before we can
535 	 * mark it ..
536 	 */
537 	SCTP_GETTIME_TIMEVAL(&now);
538 	/* get cur rto in micro-seconds */
539 	cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
540 	cur_rtt *= 1000;
541 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
542 	sctp_log_fr(cur_rtt,
543 	    stcb->asoc.peers_rwnd,
544 	    window_probe,
545 	    SCTP_FR_T3_MARK_TIME);
546 	sctp_log_fr(net->flight_size,
547 	    SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
548 	    SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
549 	    SCTP_FR_CWND_REPORT);
550 	sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
551 #endif
552 	tv.tv_sec = cur_rtt / 1000000;
553 	tv.tv_usec = cur_rtt % 1000000;
554 	min_wait = now;
555 	timevalsub(&min_wait, &tv);
556 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
557 		/*
558 		 * if we hit here, we don't have enough seconds on the clock
559 		 * to account for the RTO. We just let the lower seconds be
560 		 * the bounds and don't worry about it. This may mean we
561 		 * will mark a lot more than we should.
562 		 */
563 		min_wait.tv_sec = min_wait.tv_usec = 0;
564 	}
565 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
566 	sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
567 	sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
568 #endif
569 	/*
570 	 * Our rwnd will be incorrect here since we are not adding back the
571 	 * cnt * mbuf but we will fix that down below.
572 	 */
573 	orig_rwnd = stcb->asoc.peers_rwnd;
574 	orig_flight = net->flight_size;
575 	net->rto_pending = 0;
576 	net->fast_retran_ip = 0;
577 	/* Now on to each chunk */
578 	num_mk = cnt_mk = 0;
579 	tsnfirst = tsnlast = 0;
580 	chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
581 	for (; chk != NULL; chk = tp2) {
582 		tp2 = TAILQ_NEXT(chk, sctp_next);
583 		if ((compare_with_wrap(stcb->asoc.last_acked_seq,
584 		    chk->rec.data.TSN_seq,
585 		    MAX_TSN)) ||
586 		    (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
587 			/* Strange case our list got out of order? */
588 			printf("Our list is out of order?\n");
589 			panic("Out of order list");
590 		}
591 		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
592 			/*
593 			 * found one to mark: If it is less than
594 			 * DATAGRAM_ACKED it MUST not be a skipped or marked
595 			 * TSN but instead one that is either already set
596 			 * for retransmission OR one that needs
597 			 * retransmission.
598 			 */
599 
600 			/* validate its been outstanding long enough */
601 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
602 			sctp_log_fr(chk->rec.data.TSN_seq,
603 			    chk->sent_rcv_time.tv_sec,
604 			    chk->sent_rcv_time.tv_usec,
605 			    SCTP_FR_T3_MARK_TIME);
606 #endif
607 			if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
608 				/*
609 				 * we have reached a chunk that was sent
610 				 * some seconds past our min.. forget it we
611 				 * will find no more to send.
612 				 */
613 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
614 				sctp_log_fr(0,
615 				    chk->sent_rcv_time.tv_sec,
616 				    chk->sent_rcv_time.tv_usec,
617 				    SCTP_FR_T3_STOPPED);
618 #endif
619 				continue;
620 			} else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
621 			    (window_probe == 0)) {
622 				/*
623 				 * we must look at the micro seconds to
624 				 * know.
625 				 */
626 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
627 					/*
628 					 * ok it was sent after our boundary
629 					 * time.
630 					 */
631 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
632 					sctp_log_fr(0,
633 					    chk->sent_rcv_time.tv_sec,
634 					    chk->sent_rcv_time.tv_usec,
635 					    SCTP_FR_T3_STOPPED);
636 #endif
637 					continue;
638 				}
639 			}
640 			if (PR_SCTP_TTL_ENABLED(chk->flags)) {
641 				/* Is it expired? */
642 				if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
643 				    ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
644 				    (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
645 					/* Yes so drop it */
646 					if (chk->data) {
647 						sctp_release_pr_sctp_chunk(stcb,
648 						    chk,
649 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
650 						    &stcb->asoc.sent_queue);
651 					}
652 				}
653 				continue;
654 			}
655 			if (PR_SCTP_RTX_ENABLED(chk->flags)) {
656 				/* Has it been retransmitted tv_sec times? */
657 				if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
658 					if (chk->data) {
659 						sctp_release_pr_sctp_chunk(stcb,
660 						    chk,
661 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
662 						    &stcb->asoc.sent_queue);
663 					}
664 				}
665 				continue;
666 			}
667 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
668 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
669 				num_mk++;
670 				if (fir == 0) {
671 					fir = 1;
672 					tsnfirst = chk->rec.data.TSN_seq;
673 				}
674 				tsnlast = chk->rec.data.TSN_seq;
675 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
676 				sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
677 				    0, SCTP_FR_T3_MARKED);
678 
679 #endif
680 			}
681 			if (stcb->asoc.total_flight_count > 0)
682 				stcb->asoc.total_flight_count--;
683 			chk->sent = SCTP_DATAGRAM_RESEND;
684 			SCTP_STAT_INCR(sctps_markedretrans);
685 #ifdef SCTP_FLIGHT_LOGGING
686 			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
687 			    chk->whoTo->flight_size,
688 			    chk->book_size,
689 			    (uintptr_t) stcb,
690 			    chk->rec.data.TSN_seq);
691 #endif
692 
693 			if (net->flight_size >= chk->book_size)
694 				net->flight_size -= chk->book_size;
695 			else
696 				net->flight_size = 0;
697 
698 			stcb->asoc.peers_rwnd += chk->send_size;
699 			stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
700 
701 			/* reset the TSN for striking and other FR stuff */
702 			chk->rec.data.doing_fast_retransmit = 0;
703 			/* Clear any time so NO RTT is being done */
704 			chk->do_rtt = 0;
705 			if (alt != net) {
706 				sctp_free_remote_addr(chk->whoTo);
707 				chk->no_fr_allowed = 1;
708 				chk->whoTo = alt;
709 				atomic_add_int(&alt->ref_count, 1);
710 			} else {
711 				chk->no_fr_allowed = 0;
712 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
713 					chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
714 				} else {
715 					chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
716 				}
717 			}
718 			if (sctp_cmt_on_off == 1) {
719 				chk->no_fr_allowed = 1;
720 			}
721 		} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
722 			/* remember highest acked one */
723 			could_be_sent = chk;
724 		}
725 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
726 			cnt_mk++;
727 		}
728 	}
729 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
730 	sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
731 #endif
732 
733 	if (stcb->asoc.total_flight >= (orig_flight - net->flight_size)) {
734 		stcb->asoc.total_flight -= (orig_flight - net->flight_size);
735 	} else {
736 		stcb->asoc.total_flight = 0;
737 		stcb->asoc.total_flight_count = 0;
738 		audit_tf = 1;
739 	}
740 
741 #ifdef SCTP_DEBUG
742 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
743 		if (num_mk) {
744 			printf("LAST TSN marked was %x\n", tsnlast);
745 			printf("Num marked for retransmission was %d peer-rwd:%ld\n",
746 			    num_mk, (u_long)stcb->asoc.peers_rwnd);
747 			printf("LAST TSN marked was %x\n", tsnlast);
748 			printf("Num marked for retransmission was %d peer-rwd:%d\n",
749 			    num_mk,
750 			    (int)stcb->asoc.peers_rwnd
751 			    );
752 		}
753 	}
754 #endif
755 	*num_marked = num_mk;
756 	if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
757 		/* fix it so we retransmit the highest acked anyway */
758 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
759 		cnt_mk++;
760 		could_be_sent->sent = SCTP_DATAGRAM_RESEND;
761 	}
762 	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
763 #ifdef INVARIANTS
764 		printf("Local Audit says there are %d for retran asoc cnt:%d\n",
765 		    cnt_mk, stcb->asoc.sent_queue_retran_cnt);
766 #endif
767 #ifndef SCTP_AUDITING_ENABLED
768 		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
769 #endif
770 	}
771 	/* Now check for a ECN Echo that may be stranded */
772 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
773 		if ((chk->whoTo == net) &&
774 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
775 			sctp_free_remote_addr(chk->whoTo);
776 			chk->whoTo = alt;
777 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
778 				chk->sent = SCTP_DATAGRAM_RESEND;
779 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
780 			}
781 			atomic_add_int(&alt->ref_count, 1);
782 		}
783 	}
784 	if (audit_tf) {
785 #ifdef SCTP_DEBUG
786 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
787 			printf("Audit total flight due to negative value net:%p\n",
788 			    net);
789 		}
790 #endif				/* SCTP_DEBUG */
791 		stcb->asoc.total_flight = 0;
792 		stcb->asoc.total_flight_count = 0;
793 		/* Clear all networks flight size */
794 		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
795 			lnets->flight_size = 0;
796 #ifdef SCTP_DEBUG
797 			if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
798 				printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
799 				    lnets, lnets->cwnd, lnets->ssthresh);
800 			}
801 #endif				/* SCTP_DEBUG */
802 		}
803 		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
804 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
805 #ifdef SCTP_FLIGHT_LOGGING
806 				sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
807 				    chk->whoTo->flight_size,
808 				    chk->book_size,
809 				    (uintptr_t) stcb,
810 				    chk->rec.data.TSN_seq);
811 #endif
812 				stcb->asoc.total_flight += chk->book_size;
813 				chk->whoTo->flight_size += chk->book_size;
814 				stcb->asoc.total_flight_count++;
815 			}
816 		}
817 	}
818 	/*
819 	 * Setup the ecn nonce re-sync point. We do this since
820 	 * retranmissions are NOT setup for ECN. This means that do to
821 	 * Karn's rule, we don't know the total of the peers ecn bits.
822 	 */
823 	chk = TAILQ_FIRST(&stcb->asoc.send_queue);
824 	if (chk == NULL) {
825 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
826 	} else {
827 		stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
828 	}
829 	stcb->asoc.nonce_wait_for_ecne = 0;
830 	stcb->asoc.nonce_sum_check = 0;
831 	/* We return 1 if we only have a window probe outstanding */
832 	return (0);
833 }
834 
835 static void
836 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
837     struct sctp_nets *net,
838     struct sctp_nets *alt)
839 {
840 	struct sctp_association *asoc;
841 	struct sctp_stream_out *outs;
842 	struct sctp_tmit_chunk *chk;
843 	struct sctp_stream_queue_pending *sp;
844 
845 	if (net == alt)
846 		/* nothing to do */
847 		return;
848 
849 	asoc = &stcb->asoc;
850 
851 	/*
852 	 * now through all the streams checking for chunks sent to our bad
853 	 * network.
854 	 */
855 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
856 		/* now clean up any chunks here */
857 		TAILQ_FOREACH(sp, &outs->outqueue, next) {
858 			if (sp->net == net) {
859 				sctp_free_remote_addr(sp->net);
860 				sp->net = alt;
861 				atomic_add_int(&alt->ref_count, 1);
862 			}
863 		}
864 	}
865 	/* Now check the pending queue */
866 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
867 		if (chk->whoTo == net) {
868 			sctp_free_remote_addr(chk->whoTo);
869 			chk->whoTo = alt;
870 			atomic_add_int(&alt->ref_count, 1);
871 		}
872 	}
873 
874 }
875 
876 int
877 sctp_t3rxt_timer(struct sctp_inpcb *inp,
878     struct sctp_tcb *stcb,
879     struct sctp_nets *net)
880 {
881 	struct sctp_nets *alt;
882 	int win_probe, num_mk;
883 
884 #ifdef SCTP_FR_LOGGING
885 	sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
886 #ifdef SCTP_CWND_LOGGING
887 	{
888 		struct sctp_nets *lnet;
889 
890 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
891 			if (net == lnet) {
892 				sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
893 			} else {
894 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
895 			}
896 		}
897 	}
898 #endif
899 #endif
900 	/* Find an alternate and mark those for retransmission */
901 	if ((stcb->asoc.peers_rwnd == 0) &&
902 	    (stcb->asoc.total_flight < net->mtu)) {
903 		SCTP_STAT_INCR(sctps_timowindowprobe);
904 		win_probe = 1;
905 	} else {
906 		win_probe = 0;
907 	}
908 	alt = sctp_find_alternate_net(stcb, net, 0);
909 	sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
910 	/* FR Loss recovery just ended with the T3. */
911 	stcb->asoc.fast_retran_loss_recovery = 0;
912 
913 	/* CMT FR loss recovery ended with the T3 */
914 	net->fast_retran_loss_recovery = 0;
915 
916 	/*
917 	 * setup the sat loss recovery that prevents satellite cwnd advance.
918 	 */
919 	stcb->asoc.sat_t3_loss_recovery = 1;
920 	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
921 
922 	/* Backoff the timer and cwnd */
923 	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
924 	if (win_probe == 0) {
925 		/* We don't do normal threshold management on window probes */
926 		if (sctp_threshold_management(inp, stcb, net,
927 		    stcb->asoc.max_send_times)) {
928 			/* Association was destroyed */
929 			return (1);
930 		} else {
931 			if (net != stcb->asoc.primary_destination) {
932 				/* send a immediate HB if our RTO is stale */
933 				struct timeval now;
934 				unsigned int ms_goneby;
935 
936 				SCTP_GETTIME_TIMEVAL(&now);
937 				if (net->last_sent_time.tv_sec) {
938 					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
939 				} else {
940 					ms_goneby = 0;
941 				}
942 				if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
943 					/*
944 					 * no recent feed back in an RTO or
945 					 * more, request a RTT update
946 					 */
947 					sctp_send_hb(stcb, 1, net);
948 				}
949 			}
950 		}
951 	} else {
952 		/*
953 		 * For a window probe we don't penalize the net's but only
954 		 * the association. This may fail it if SACKs are not coming
955 		 * back. If sack's are coming with rwnd locked at 0, we will
956 		 * continue to hold things waiting for rwnd to raise
957 		 */
958 		if (sctp_threshold_management(inp, stcb, NULL,
959 		    stcb->asoc.max_send_times)) {
960 			/* Association was destroyed */
961 			return (1);
962 		}
963 	}
964 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
965 		/* Move all pending over too */
966 		sctp_move_all_chunks_to_alt(stcb, net, alt);
967 		/* Was it our primary? */
968 		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
969 			/*
970 			 * Yes, note it as such and find an alternate note:
971 			 * this means HB code must use this to resent the
972 			 * primary if it goes active AND if someone does a
973 			 * change-primary then this flag must be cleared
974 			 * from any net structures.
975 			 */
976 			if (sctp_set_primary_addr(stcb,
977 			    (struct sockaddr *)NULL,
978 			    alt) == 0) {
979 				net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
980 				net->src_addr_selected = 0;
981 			}
982 		}
983 	}
984 	/*
985 	 * Special case for cookie-echo'ed case, we don't do output but must
986 	 * await the COOKIE-ACK before retransmission
987 	 */
988 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
989 		/*
990 		 * Here we just reset the timer and start again since we
991 		 * have not established the asoc
992 		 */
993 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
994 		return (0);
995 	}
996 	if (stcb->asoc.peer_supports_prsctp) {
997 		struct sctp_tmit_chunk *lchk;
998 
999 		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
1000 		/* C3. See if we need to send a Fwd-TSN */
1001 		if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
1002 		    stcb->asoc.last_acked_seq, MAX_TSN)) {
1003 			/*
1004 			 * ISSUE with ECN, see FWD-TSN processing for notes
1005 			 * on issues that will occur when the ECN NONCE
1006 			 * stuff is put into SCTP for cross checking.
1007 			 */
1008 			send_forward_tsn(stcb, &stcb->asoc);
1009 			if (lchk) {
1010 				/* Assure a timer is up */
1011 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
1012 			}
1013 		}
1014 	}
1015 #ifdef SCTP_CWND_MONITOR
1016 	sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
1017 #endif
1018 	return (0);
1019 }
1020 
1021 int
1022 sctp_t1init_timer(struct sctp_inpcb *inp,
1023     struct sctp_tcb *stcb,
1024     struct sctp_nets *net)
1025 {
1026 	/* bump the thresholds */
1027 	if (stcb->asoc.delayed_connection) {
1028 		/*
1029 		 * special hook for delayed connection. The library did NOT
1030 		 * complete the rest of its sends.
1031 		 */
1032 		stcb->asoc.delayed_connection = 0;
1033 		sctp_send_initiate(inp, stcb);
1034 		return (0);
1035 	}
1036 	if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
1037 		return (0);
1038 	}
1039 	if (sctp_threshold_management(inp, stcb, net,
1040 	    stcb->asoc.max_init_times)) {
1041 		/* Association was destroyed */
1042 		return (1);
1043 	}
1044 	stcb->asoc.dropped_special_cnt = 0;
1045 	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1046 	if (stcb->asoc.initial_init_rto_max < net->RTO) {
1047 		net->RTO = stcb->asoc.initial_init_rto_max;
1048 	}
1049 	if (stcb->asoc.numnets > 1) {
1050 		/* If we have more than one addr use it */
1051 		struct sctp_nets *alt;
1052 
1053 		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1054 		if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1055 			sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1056 			stcb->asoc.primary_destination = alt;
1057 		}
1058 	}
1059 	/* Send out a new init */
1060 	sctp_send_initiate(inp, stcb);
1061 	return (0);
1062 }
1063 
1064 /*
1065  * For cookie and asconf we actually need to find and mark for resend, then
1066  * increment the resend counter (after all the threshold management stuff of
1067  * course).
1068  */
1069 int
1070 sctp_cookie_timer(struct sctp_inpcb *inp,
1071     struct sctp_tcb *stcb,
1072     struct sctp_nets *net)
1073 {
1074 	struct sctp_nets *alt;
1075 	struct sctp_tmit_chunk *cookie;
1076 
1077 	/* first before all else we must find the cookie */
1078 	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1079 		if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1080 			break;
1081 		}
1082 	}
1083 	if (cookie == NULL) {
1084 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1085 			/* FOOBAR! */
1086 			struct mbuf *oper;
1087 
1088 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1089 			    0, M_DONTWAIT, 1, MT_DATA);
1090 			if (oper) {
1091 				struct sctp_paramhdr *ph;
1092 				uint32_t *ippp;
1093 
1094 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1095 				    sizeof(uint32_t);
1096 				ph = mtod(oper, struct sctp_paramhdr *);
1097 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1098 				ph->param_length = htons(SCTP_BUF_LEN(oper));
1099 				ippp = (uint32_t *) (ph + 1);
1100 				*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
1101 			}
1102 			inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1103 			sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1104 			    oper);
1105 		} else {
1106 #ifdef INVARIANTS
1107 			panic("Cookie timer expires in wrong state?");
1108 #else
1109 			printf("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1110 			return (0);
1111 #endif
1112 		}
1113 		return (0);
1114 	}
1115 	/* Ok we found the cookie, threshold management next */
1116 	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1117 	    stcb->asoc.max_init_times)) {
1118 		/* Assoc is over */
1119 		return (1);
1120 	}
1121 	/*
1122 	 * cleared theshold management now lets backoff the address & select
1123 	 * an alternate
1124 	 */
1125 	stcb->asoc.dropped_special_cnt = 0;
1126 	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1127 	alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1128 	if (alt != cookie->whoTo) {
1129 		sctp_free_remote_addr(cookie->whoTo);
1130 		cookie->whoTo = alt;
1131 		atomic_add_int(&alt->ref_count, 1);
1132 	}
1133 	/* Now mark the retran info */
1134 	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1135 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1136 	}
1137 	cookie->sent = SCTP_DATAGRAM_RESEND;
1138 	/*
1139 	 * Now call the output routine to kick out the cookie again, Note we
1140 	 * don't mark any chunks for retran so that FR will need to kick in
1141 	 * to move these (or a send timer).
1142 	 */
1143 	return (0);
1144 }
1145 
1146 int
1147 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1148     struct sctp_nets *net)
1149 {
1150 	struct sctp_nets *alt;
1151 	struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1152 
1153 	if (stcb->asoc.stream_reset_outstanding == 0) {
1154 		return (0);
1155 	}
1156 	/* find the existing STRRESET, we use the seq number we sent out on */
1157 	sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1158 	if (strrst == NULL) {
1159 		return (0);
1160 	}
1161 	/* do threshold management */
1162 	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1163 	    stcb->asoc.max_send_times)) {
1164 		/* Assoc is over */
1165 		return (1);
1166 	}
1167 	/*
1168 	 * cleared theshold management now lets backoff the address & select
1169 	 * an alternate
1170 	 */
1171 	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1172 	alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1173 	sctp_free_remote_addr(strrst->whoTo);
1174 	strrst->whoTo = alt;
1175 	atomic_add_int(&alt->ref_count, 1);
1176 
1177 	/* See if a ECN Echo is also stranded */
1178 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1179 		if ((chk->whoTo == net) &&
1180 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1181 			sctp_free_remote_addr(chk->whoTo);
1182 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
1183 				chk->sent = SCTP_DATAGRAM_RESEND;
1184 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1185 			}
1186 			chk->whoTo = alt;
1187 			atomic_add_int(&alt->ref_count, 1);
1188 		}
1189 	}
1190 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1191 		/*
1192 		 * If the address went un-reachable, we need to move to
1193 		 * alternates for ALL chk's in queue
1194 		 */
1195 		sctp_move_all_chunks_to_alt(stcb, net, alt);
1196 	}
1197 	/* mark the retran info */
1198 	if (strrst->sent != SCTP_DATAGRAM_RESEND)
1199 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1200 	strrst->sent = SCTP_DATAGRAM_RESEND;
1201 
1202 	/* restart the timer */
1203 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1204 	return (0);
1205 }
1206 
1207 int
1208 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1209     struct sctp_nets *net)
1210 {
1211 	struct sctp_nets *alt;
1212 	struct sctp_tmit_chunk *asconf, *chk;
1213 
1214 	/* is this the first send, or a retransmission? */
1215 	if (stcb->asoc.asconf_sent == 0) {
1216 		/* compose a new ASCONF chunk and send it */
1217 		sctp_send_asconf(stcb, net);
1218 	} else {
1219 		/* Retransmission of the existing ASCONF needed... */
1220 
1221 		/* find the existing ASCONF */
1222 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1223 		    sctp_next) {
1224 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
1225 				break;
1226 			}
1227 		}
1228 		if (asconf == NULL) {
1229 			return (0);
1230 		}
1231 		/* do threshold management */
1232 		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1233 		    stcb->asoc.max_send_times)) {
1234 			/* Assoc is over */
1235 			return (1);
1236 		}
1237 		/*
1238 		 * PETER? FIX? How will the following code ever run? If the
1239 		 * max_send_times is hit, threshold managment will blow away
1240 		 * the association?
1241 		 */
1242 		if (asconf->snd_count > stcb->asoc.max_send_times) {
1243 			/*
1244 			 * Something is rotten, peer is not responding to
1245 			 * ASCONFs but maybe is to data etc.  e.g. it is not
1246 			 * properly handling the chunk type upper bits Mark
1247 			 * this peer as ASCONF incapable and cleanup
1248 			 */
1249 #ifdef SCTP_DEBUG
1250 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1251 				printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1252 			}
1253 #endif				/* SCTP_DEBUG */
1254 			sctp_asconf_cleanup(stcb, net);
1255 			return (0);
1256 		}
1257 		/*
1258 		 * cleared theshold management now lets backoff the address
1259 		 * & select an alternate
1260 		 */
1261 		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1262 		alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1263 		sctp_free_remote_addr(asconf->whoTo);
1264 		asconf->whoTo = alt;
1265 		atomic_add_int(&alt->ref_count, 1);
1266 
1267 		/* See if a ECN Echo is also stranded */
1268 		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1269 			if ((chk->whoTo == net) &&
1270 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1271 				sctp_free_remote_addr(chk->whoTo);
1272 				chk->whoTo = alt;
1273 				if (chk->sent != SCTP_DATAGRAM_RESEND) {
1274 					chk->sent = SCTP_DATAGRAM_RESEND;
1275 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1276 				}
1277 				atomic_add_int(&alt->ref_count, 1);
1278 			}
1279 		}
1280 		if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1281 			/*
1282 			 * If the address went un-reachable, we need to move
1283 			 * to alternates for ALL chk's in queue
1284 			 */
1285 			sctp_move_all_chunks_to_alt(stcb, net, alt);
1286 		}
1287 		/* mark the retran info */
1288 		if (asconf->sent != SCTP_DATAGRAM_RESEND)
1289 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1290 		asconf->sent = SCTP_DATAGRAM_RESEND;
1291 	}
1292 	return (0);
1293 }
1294 
1295 /*
1296  * For the shutdown and shutdown-ack, we do not keep one around on the
1297  * control queue. This means we must generate a new one and call the general
1298  * chunk output routine, AFTER having done threshold management.
1299  */
1300 int
1301 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1302     struct sctp_nets *net)
1303 {
1304 	struct sctp_nets *alt;
1305 
1306 	/* first threshold managment */
1307 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1308 		/* Assoc is over */
1309 		return (1);
1310 	}
1311 	/* second select an alternative */
1312 	alt = sctp_find_alternate_net(stcb, net, 0);
1313 
1314 	/* third generate a shutdown into the queue for out net */
1315 	if (alt) {
1316 		sctp_send_shutdown(stcb, alt);
1317 	} else {
1318 		/*
1319 		 * if alt is NULL, there is no dest to send to??
1320 		 */
1321 		return (0);
1322 	}
1323 	/* fourth restart timer */
1324 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1325 	return (0);
1326 }
1327 
1328 int
1329 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1330     struct sctp_nets *net)
1331 {
1332 	struct sctp_nets *alt;
1333 
1334 	/* first threshold managment */
1335 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1336 		/* Assoc is over */
1337 		return (1);
1338 	}
1339 	/* second select an alternative */
1340 	alt = sctp_find_alternate_net(stcb, net, 0);
1341 
1342 	/* third generate a shutdown into the queue for out net */
1343 	sctp_send_shutdown_ack(stcb, alt);
1344 
1345 	/* fourth restart timer */
1346 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1347 	return (0);
1348 }
1349 
1350 static void
1351 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1352     struct sctp_tcb *stcb)
1353 {
1354 	struct sctp_stream_out *outs;
1355 	struct sctp_stream_queue_pending *sp;
1356 	unsigned int chks_in_queue = 0;
1357 	int being_filled = 0;
1358 
1359 	/*
1360 	 * This function is ONLY called when the send/sent queues are empty.
1361 	 */
1362 	if ((stcb == NULL) || (inp == NULL))
1363 		return;
1364 
1365 	if (stcb->asoc.sent_queue_retran_cnt) {
1366 		printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1367 		    stcb->asoc.sent_queue_retran_cnt);
1368 		stcb->asoc.sent_queue_retran_cnt = 0;
1369 	}
1370 	SCTP_TCB_SEND_LOCK(stcb);
1371 	if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1372 		int i, cnt = 0;
1373 
1374 		/* Check to see if a spoke fell off the wheel */
1375 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1376 			if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1377 				sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1378 				cnt++;
1379 			}
1380 		}
1381 		if (cnt) {
1382 			/* yep, we lost a spoke or two */
1383 			printf("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1384 		} else {
1385 			/* no spokes lost, */
1386 			stcb->asoc.total_output_queue_size = 0;
1387 		}
1388 		SCTP_TCB_SEND_UNLOCK(stcb);
1389 		return;
1390 	}
1391 	SCTP_TCB_SEND_UNLOCK(stcb);
1392 	/* Check to see if some data queued, if so report it */
1393 	TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1394 		if (!TAILQ_EMPTY(&outs->outqueue)) {
1395 			TAILQ_FOREACH(sp, &outs->outqueue, next) {
1396 				if (sp->msg_is_complete)
1397 					being_filled++;
1398 				chks_in_queue++;
1399 			}
1400 		}
1401 	}
1402 	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1403 		printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1404 		    stcb->asoc.stream_queue_cnt, chks_in_queue);
1405 	}
1406 	if (chks_in_queue) {
1407 		/* call the output queue function */
1408 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1409 		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1410 		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1411 			/*
1412 			 * Probably should go in and make it go back through
1413 			 * and add fragments allowed
1414 			 */
1415 			if (being_filled == 0) {
1416 				printf("Still nothing moved %d chunks are stuck\n",
1417 				    chks_in_queue);
1418 			}
1419 		}
1420 	} else {
1421 		printf("Found no chunks on any queue tot:%lu\n",
1422 		    (u_long)stcb->asoc.total_output_queue_size);
1423 		stcb->asoc.total_output_queue_size = 0;
1424 	}
1425 }
1426 
1427 int
1428 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1429     struct sctp_nets *net, int cnt_of_unconf)
1430 {
1431 	if (net) {
1432 		if (net->hb_responded == 0) {
1433 			sctp_backoff_on_timeout(stcb, net, 1, 0);
1434 		}
1435 		/* Zero PBA, if it needs it */
1436 		if (net->partial_bytes_acked) {
1437 			net->partial_bytes_acked = 0;
1438 		}
1439 	}
1440 	if ((stcb->asoc.total_output_queue_size > 0) &&
1441 	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1442 	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1443 		sctp_audit_stream_queues_for_size(inp, stcb);
1444 	}
1445 	/* Send a new HB, this will do threshold managment, pick a new dest */
1446 	if (cnt_of_unconf == 0) {
1447 		if (sctp_send_hb(stcb, 0, NULL) < 0) {
1448 			return (1);
1449 		}
1450 	} else {
1451 		/*
1452 		 * this will send out extra hb's up to maxburst if there are
1453 		 * any unconfirmed addresses.
1454 		 */
1455 		int cnt_sent = 0;
1456 
1457 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1458 			if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1459 			    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1460 				cnt_sent++;
1461 				if (sctp_send_hb(stcb, 1, net) == 0) {
1462 					break;
1463 				}
1464 				if (cnt_sent >= stcb->asoc.max_burst)
1465 					break;
1466 			}
1467 		}
1468 	}
1469 	return (0);
1470 }
1471 
1472 int
1473 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1474 {
1475 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1476 		/* its running */
1477 		return (1);
1478 	} else {
1479 		/* nope */
1480 		return (0);
1481 	}
1482 }
1483 
1484 int
1485 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1486 {
1487 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1488 		/* its running */
1489 		return (1);
1490 	} else {
1491 		/* nope */
1492 		return (0);
1493 	}
1494 }
1495 
1496 #define SCTP_NUMBER_OF_MTU_SIZES 18
1497 static uint32_t mtu_sizes[] = {
1498 	68,
1499 	296,
1500 	508,
1501 	512,
1502 	544,
1503 	576,
1504 	1006,
1505 	1492,
1506 	1500,
1507 	1536,
1508 	2002,
1509 	2048,
1510 	4352,
1511 	4464,
1512 	8166,
1513 	17914,
1514 	32000,
1515 	65535
1516 };
1517 
1518 
1519 static uint32_t
1520 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1521 {
1522 	/* select another MTU that is just bigger than this one */
1523 	int i;
1524 
1525 	for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1526 		if (cur_mtu < mtu_sizes[i]) {
1527 			/* no max_mtu is bigger than this one */
1528 			return (mtu_sizes[i]);
1529 		}
1530 	}
1531 	/* here return the highest allowable */
1532 	return (cur_mtu);
1533 }
1534 
1535 
1536 void
1537 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1538     struct sctp_tcb *stcb,
1539     struct sctp_nets *net)
1540 {
1541 	uint32_t next_mtu;
1542 
1543 	/* restart the timer in any case */
1544 	next_mtu = sctp_getnext_mtu(inp, net->mtu);
1545 	if (next_mtu <= net->mtu) {
1546 		/* nothing to do */
1547 		return;
1548 	}
1549 	if (net->ro.ro_rt != NULL) {
1550 		/*
1551 		 * only if we have a route and interface do we set anything.
1552 		 * Note we always restart the timer though just in case it
1553 		 * is updated (i.e. the ifp) or route/ifp is populated.
1554 		 */
1555 		if (net->ro.ro_rt->rt_ifp != NULL) {
1556 			if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1557 				/* ok it will fit out the door */
1558 				net->mtu = next_mtu;
1559 			}
1560 		}
1561 	}
1562 	/* restart the timer */
1563 	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1564 }
1565 
1566 void
1567 sctp_autoclose_timer(struct sctp_inpcb *inp,
1568     struct sctp_tcb *stcb,
1569     struct sctp_nets *net)
1570 {
1571 	struct timeval tn, *tim_touse;
1572 	struct sctp_association *asoc;
1573 	int ticks_gone_by;
1574 
1575 	SCTP_GETTIME_TIMEVAL(&tn);
1576 	if (stcb->asoc.sctp_autoclose_ticks &&
1577 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1578 		/* Auto close is on */
1579 		asoc = &stcb->asoc;
1580 		/* pick the time to use */
1581 		if (asoc->time_last_rcvd.tv_sec >
1582 		    asoc->time_last_sent.tv_sec) {
1583 			tim_touse = &asoc->time_last_rcvd;
1584 		} else {
1585 			tim_touse = &asoc->time_last_sent;
1586 		}
1587 		/* Now has long enough transpired to autoclose? */
1588 		ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1589 		if ((ticks_gone_by > 0) &&
1590 		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1591 			/*
1592 			 * autoclose time has hit, call the output routine,
1593 			 * which should do nothing just to be SURE we don't
1594 			 * have hanging data. We can then safely check the
1595 			 * queues and know that we are clear to send
1596 			 * shutdown
1597 			 */
1598 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1599 			/* Are we clean? */
1600 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1601 			    TAILQ_EMPTY(&asoc->sent_queue)) {
1602 				/*
1603 				 * there is nothing queued to send, so I'm
1604 				 * done...
1605 				 */
1606 				if (SCTP_GET_STATE(asoc) !=
1607 				    SCTP_STATE_SHUTDOWN_SENT) {
1608 					/* only send SHUTDOWN 1st time thru */
1609 					sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1610 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1611 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1612 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1613 					    stcb->sctp_ep, stcb,
1614 					    asoc->primary_destination);
1615 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1616 					    stcb->sctp_ep, stcb,
1617 					    asoc->primary_destination);
1618 				}
1619 			}
1620 		} else {
1621 			/*
1622 			 * No auto close at this time, reset t-o to check
1623 			 * later
1624 			 */
1625 			int tmp;
1626 
1627 			/* fool the timer startup to use the time left */
1628 			tmp = asoc->sctp_autoclose_ticks;
1629 			asoc->sctp_autoclose_ticks -= ticks_gone_by;
1630 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1631 			    net);
1632 			/* restore the real tick value */
1633 			asoc->sctp_autoclose_ticks = tmp;
1634 		}
1635 	}
1636 }
1637 
1638 
1639 void
1640 sctp_iterator_timer(struct sctp_iterator *it)
1641 {
1642 	int iteration_count = 0;
1643 
1644 	/*
1645 	 * only one iterator can run at a time. This is the only way we can
1646 	 * cleanly pull ep's from underneath all the running interators when
1647 	 * a ep is freed.
1648 	 */
1649 	SCTP_ITERATOR_LOCK();
1650 	if (it->inp == NULL) {
1651 		/* iterator is complete */
1652 done_with_iterator:
1653 		SCTP_ITERATOR_UNLOCK();
1654 		SCTP_INP_INFO_WLOCK();
1655 		LIST_REMOVE(it, sctp_nxt_itr);
1656 		/* stopping the callout is not needed, in theory */
1657 		SCTP_INP_INFO_WUNLOCK();
1658 		SCTP_OS_TIMER_STOP(&it->tmr.timer);
1659 		if (it->function_atend != NULL) {
1660 			(*it->function_atend) (it->pointer, it->val);
1661 		}
1662 		SCTP_FREE(it);
1663 		return;
1664 	}
1665 select_a_new_ep:
1666 	SCTP_INP_WLOCK(it->inp);
1667 	while (((it->pcb_flags) &&
1668 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1669 	    ((it->pcb_features) &&
1670 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1671 		/* endpoint flags or features don't match, so keep looking */
1672 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1673 			SCTP_INP_WUNLOCK(it->inp);
1674 			goto done_with_iterator;
1675 		}
1676 		SCTP_INP_WUNLOCK(it->inp);
1677 		it->inp = LIST_NEXT(it->inp, sctp_list);
1678 		if (it->inp == NULL) {
1679 			goto done_with_iterator;
1680 		}
1681 		SCTP_INP_WLOCK(it->inp);
1682 	}
1683 	if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1684 	    (it->inp->inp_starting_point_for_iterator != it)) {
1685 		printf("Iterator collision, waiting for one at %p\n",
1686 		    it->inp);
1687 		SCTP_INP_WUNLOCK(it->inp);
1688 		goto start_timer_return;
1689 	}
1690 	/* mark the current iterator on the endpoint */
1691 	it->inp->inp_starting_point_for_iterator = it;
1692 	SCTP_INP_WUNLOCK(it->inp);
1693 	SCTP_INP_RLOCK(it->inp);
1694 	/* now go through each assoc which is in the desired state */
1695 	if (it->stcb == NULL) {
1696 		/* run the per instance function */
1697 		if (it->function_inp != NULL)
1698 			(*it->function_inp) (it->inp, it->pointer, it->val);
1699 
1700 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1701 	}
1702 	SCTP_INP_RUNLOCK(it->inp);
1703 	if ((it->stcb) &&
1704 	    (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1705 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1706 	}
1707 	while (it->stcb) {
1708 		SCTP_TCB_LOCK(it->stcb);
1709 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1710 			/* not in the right state... keep looking */
1711 			SCTP_TCB_UNLOCK(it->stcb);
1712 			goto next_assoc;
1713 		}
1714 		/* mark the current iterator on the assoc */
1715 		it->stcb->asoc.stcb_starting_point_for_iterator = it;
1716 		/* see if we have limited out the iterator loop */
1717 		iteration_count++;
1718 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1719 	start_timer_return:
1720 			/* set a timer to continue this later */
1721 			SCTP_TCB_UNLOCK(it->stcb);
1722 			sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1723 			    (struct sctp_inpcb *)it, NULL, NULL);
1724 			SCTP_ITERATOR_UNLOCK();
1725 			return;
1726 		}
1727 		/* run function on this one */
1728 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1729 
1730 		/*
1731 		 * we lie here, it really needs to have its own type but
1732 		 * first I must verify that this won't effect things :-0
1733 		 */
1734 		if (it->no_chunk_output == 0)
1735 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1736 
1737 		SCTP_TCB_UNLOCK(it->stcb);
1738 next_assoc:
1739 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1740 	}
1741 	/* done with all assocs on this endpoint, move on to next endpoint */
1742 	SCTP_INP_WLOCK(it->inp);
1743 	it->inp->inp_starting_point_for_iterator = NULL;
1744 	SCTP_INP_WUNLOCK(it->inp);
1745 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1746 		it->inp = NULL;
1747 	} else {
1748 		SCTP_INP_INFO_RLOCK();
1749 		it->inp = LIST_NEXT(it->inp, sctp_list);
1750 		SCTP_INP_INFO_RUNLOCK();
1751 	}
1752 	if (it->inp == NULL) {
1753 		goto done_with_iterator;
1754 	}
1755 	goto select_a_new_ep;
1756 }
1757