xref: /freebsd/sys/netinet/sctp_timer.c (revision bfe691b2f75de2224c7ceb304ebcdef2b42d4179)
1 /*-
2  * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #define _IP_VHL
37 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_pcb.h>
39 #ifdef INET6
40 #include <netinet6/sctp6_var.h>
41 #endif
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_timer.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_header.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp.h>
52 #include <netinet/sctp_uio.h>
53 
54 
55 
56 void
57 sctp_early_fr_timer(struct sctp_inpcb *inp,
58     struct sctp_tcb *stcb,
59     struct sctp_nets *net)
60 {
61 	struct sctp_tmit_chunk *chk, *tp2;
62 	struct timeval now, min_wait, tv;
63 	unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
64 
65 	/* an early FR is occuring. */
66 	SCTP_GETTIME_TIMEVAL(&now);
67 	/* get cur rto in micro-seconds */
68 	if (net->lastsa == 0) {
69 		/* Hmm no rtt estimate yet? */
70 		cur_rtt = stcb->asoc.initial_rto >> 2;
71 	} else {
72 
73 		cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
74 	}
75 	if (cur_rtt < sctp_early_fr_msec) {
76 		cur_rtt = sctp_early_fr_msec;
77 	}
78 	cur_rtt *= 1000;
79 	tv.tv_sec = cur_rtt / 1000000;
80 	tv.tv_usec = cur_rtt % 1000000;
81 	min_wait = now;
82 	timevalsub(&min_wait, &tv);
83 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
84 		/*
85 		 * if we hit here, we don't have enough seconds on the clock
86 		 * to account for the RTO. We just let the lower seconds be
87 		 * the bounds and don't worry about it. This may mean we
88 		 * will mark a lot more than we should.
89 		 */
90 		min_wait.tv_sec = min_wait.tv_usec = 0;
91 	}
92 	chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
93 	for (; chk != NULL; chk = tp2) {
94 		tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
95 		if (chk->whoTo != net) {
96 			continue;
97 		}
98 		if (chk->sent == SCTP_DATAGRAM_RESEND)
99 			cnt_resend++;
100 		else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
101 		    (chk->sent < SCTP_DATAGRAM_RESEND)) {
102 			/* pending, may need retran */
103 			if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
104 				/*
105 				 * we have reached a chunk that was sent
106 				 * some seconds past our min.. forget it we
107 				 * will find no more to send.
108 				 */
109 				continue;
110 			} else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
111 				/*
112 				 * we must look at the micro seconds to
113 				 * know.
114 				 */
115 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
116 					/*
117 					 * ok it was sent after our boundary
118 					 * time.
119 					 */
120 					continue;
121 				}
122 			}
123 #ifdef SCTP_EARLYFR_LOGGING
124 			sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
125 			    4, SCTP_FR_MARKED_EARLY);
126 #endif
127 			SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
128 			chk->sent = SCTP_DATAGRAM_RESEND;
129 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
130 			/* double book size since we are doing an early FR */
131 			chk->book_size_scale++;
132 			cnt += chk->send_size;
133 			if ((cnt + net->flight_size) > net->cwnd) {
134 				/* Mark all we could possibly resend */
135 				break;
136 			}
137 		}
138 	}
139 	if (cnt) {
140 #ifdef SCTP_CWND_MONITOR
141 		int old_cwnd;
142 
143 		old_cwnd = net->cwnd;
144 #endif
145 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
146 		/*
147 		 * make a small adjustment to cwnd and force to CA.
148 		 */
149 
150 		if (net->cwnd > net->mtu)
151 			/* drop down one MTU after sending */
152 			net->cwnd -= net->mtu;
153 		if (net->cwnd < net->ssthresh)
154 			/* still in SS move to CA */
155 			net->ssthresh = net->cwnd - 1;
156 #ifdef SCTP_CWND_MONITOR
157 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
158 #endif
159 	} else if (cnt_resend) {
160 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
161 	}
162 	/* Restart it? */
163 	if (net->flight_size < net->cwnd) {
164 		SCTP_STAT_INCR(sctps_earlyfrstrtmr);
165 		sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
166 	}
167 }
168 
169 void
170 sctp_audit_retranmission_queue(struct sctp_association *asoc)
171 {
172 	struct sctp_tmit_chunk *chk;
173 
174 #ifdef SCTP_DEBUG
175 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
176 		printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
177 		    asoc->sent_queue_retran_cnt,
178 		    asoc->sent_queue_cnt);
179 	}
180 #endif				/* SCTP_DEBUG */
181 	asoc->sent_queue_retran_cnt = 0;
182 	asoc->sent_queue_cnt = 0;
183 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
184 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
185 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
186 		}
187 		asoc->sent_queue_cnt++;
188 	}
189 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
190 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
191 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
192 		}
193 	}
194 #ifdef SCTP_DEBUG
195 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
196 		printf("Audit completes retran:%d onqueue:%d\n",
197 		    asoc->sent_queue_retran_cnt,
198 		    asoc->sent_queue_cnt);
199 	}
200 #endif				/* SCTP_DEBUG */
201 }
202 
203 
204 int
205 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
206     struct sctp_nets *net, uint16_t threshold)
207 {
208 	if (net) {
209 		net->error_count++;
210 #ifdef SCTP_DEBUG
211 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
212 			printf("Error count for %p now %d thresh:%d\n",
213 			    net, net->error_count,
214 			    net->failure_threshold);
215 		}
216 #endif				/* SCTP_DEBUG */
217 		if (net->error_count > net->failure_threshold) {
218 			/* We had a threshold failure */
219 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
220 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
221 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
222 				net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
223 				if (net == stcb->asoc.primary_destination) {
224 					net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
225 				}
226 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
227 				    stcb,
228 				    SCTP_FAILED_THRESHOLD,
229 				    (void *)net);
230 			}
231 		}
232 		/*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
233 		 *********ROUTING CODE
234 		 */
235 		/*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
236 		 *********ROUTING CODE
237 		 */
238 	}
239 	if (stcb == NULL)
240 		return (0);
241 
242 	if (net) {
243 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
244 			stcb->asoc.overall_error_count++;
245 		}
246 	} else {
247 		stcb->asoc.overall_error_count++;
248 	}
249 #ifdef SCTP_DEBUG
250 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
251 		printf("Overall error count for %p now %d thresh:%u state:%x\n",
252 		    &stcb->asoc,
253 		    stcb->asoc.overall_error_count,
254 		    (uint32_t) threshold,
255 		    ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
256 	}
257 #endif				/* SCTP_DEBUG */
258 	/*
259 	 * We specifically do not do >= to give the assoc one more change
260 	 * before we fail it.
261 	 */
262 	if (stcb->asoc.overall_error_count > threshold) {
263 		/* Abort notification sends a ULP notify */
264 		struct mbuf *oper;
265 
266 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
267 		    0, M_DONTWAIT, 1, MT_DATA);
268 		if (oper) {
269 			struct sctp_paramhdr *ph;
270 			uint32_t *ippp;
271 
272 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
273 			    sizeof(uint32_t);
274 			ph = mtod(oper, struct sctp_paramhdr *);
275 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
276 			ph->param_length = htons(SCTP_BUF_LEN(oper));
277 			ippp = (uint32_t *) (ph + 1);
278 			*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
279 		}
280 		inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
281 		sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
282 		return (1);
283 	}
284 	return (0);
285 }
286 
287 struct sctp_nets *
288 sctp_find_alternate_net(struct sctp_tcb *stcb,
289     struct sctp_nets *net,
290     int highest_ssthresh)
291 {
292 	/* Find and return an alternate network if possible */
293 	struct sctp_nets *alt, *mnet, *hthresh = NULL;
294 	int once;
295 	uint32_t val = 0;
296 
297 	if (stcb->asoc.numnets == 1) {
298 		/* No others but net */
299 		return (TAILQ_FIRST(&stcb->asoc.nets));
300 	}
301 	if (highest_ssthresh) {
302 		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
303 			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
304 			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
305 			    ) {
306 				/*
307 				 * will skip ones that are not-reachable or
308 				 * unconfirmed
309 				 */
310 				continue;
311 			}
312 			if (val > mnet->ssthresh) {
313 				hthresh = mnet;
314 				val = mnet->ssthresh;
315 			} else if (val == mnet->ssthresh) {
316 				uint32_t rndval;
317 				uint8_t this_random;
318 
319 				if (stcb->asoc.hb_random_idx > 3) {
320 					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
321 					memcpy(stcb->asoc.hb_random_values, &rndval,
322 					    sizeof(stcb->asoc.hb_random_values));
323 					this_random = stcb->asoc.hb_random_values[0];
324 					stcb->asoc.hb_random_idx = 0;
325 					stcb->asoc.hb_ect_randombit = 0;
326 				} else {
327 					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
328 					stcb->asoc.hb_random_idx++;
329 					stcb->asoc.hb_ect_randombit = 0;
330 				}
331 				if (this_random % 2) {
332 					hthresh = mnet;
333 					val = mnet->ssthresh;
334 				}
335 			}
336 		}
337 		if (hthresh) {
338 			return (hthresh);
339 		}
340 	}
341 	mnet = net;
342 	once = 0;
343 
344 	if (mnet == NULL) {
345 		mnet = TAILQ_FIRST(&stcb->asoc.nets);
346 	}
347 	do {
348 		alt = TAILQ_NEXT(mnet, sctp_next);
349 		if (alt == NULL) {
350 			once++;
351 			if (once > 1) {
352 				break;
353 			}
354 			alt = TAILQ_FIRST(&stcb->asoc.nets);
355 		}
356 		if (alt->ro.ro_rt == NULL) {
357 			struct sockaddr_in6 *sin6;
358 
359 			sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
360 			if (sin6->sin6_family == AF_INET6) {
361 				(void)sa6_embedscope(sin6, ip6_use_defzone);
362 			}
363 			rtalloc_ign((struct route *)&alt->ro, 0UL);
364 
365 			if (sin6->sin6_family == AF_INET6) {
366 				(void)sa6_recoverscope(sin6);
367 			}
368 			if (alt->ro._s_addr) {
369 				sctp_free_ifa(alt->ro._s_addr);
370 				alt->ro._s_addr = NULL;
371 			}
372 			alt->src_addr_selected = 0;
373 		}
374 		if (
375 		    ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
376 		    (alt->ro.ro_rt != NULL) &&
377 		    (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
378 		    ) {
379 			/* Found a reachable address */
380 			break;
381 		}
382 		mnet = alt;
383 	} while (alt != NULL);
384 
385 	if (alt == NULL) {
386 		/* Case where NO insv network exists (dormant state) */
387 		/* we rotate destinations */
388 		once = 0;
389 		mnet = net;
390 		do {
391 			alt = TAILQ_NEXT(mnet, sctp_next);
392 			if (alt == NULL) {
393 				once++;
394 				if (once > 1) {
395 					break;
396 				}
397 				alt = TAILQ_FIRST(&stcb->asoc.nets);
398 			}
399 			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
400 			    (alt != net)) {
401 				/* Found an alternate address */
402 				break;
403 			}
404 			mnet = alt;
405 		} while (alt != NULL);
406 	}
407 	if (alt == NULL) {
408 		return (net);
409 	}
410 	return (alt);
411 }
412 
413 static void
414 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
415     struct sctp_nets *net,
416     int win_probe,
417     int num_marked)
418 {
419 	net->RTO <<= 1;
420 	if (net->RTO > stcb->asoc.maxrto) {
421 		net->RTO = stcb->asoc.maxrto;
422 	}
423 	if ((win_probe == 0) && num_marked) {
424 		/* We don't apply penalty to window probe scenarios */
425 #ifdef SCTP_CWND_MONITOR
426 		int old_cwnd = net->cwnd;
427 
428 #endif
429 		net->ssthresh = net->cwnd >> 1;
430 		if (net->ssthresh < (net->mtu << 1)) {
431 			net->ssthresh = (net->mtu << 1);
432 		}
433 		net->cwnd = net->mtu;
434 		/* floor of 1 mtu */
435 		if (net->cwnd < net->mtu)
436 			net->cwnd = net->mtu;
437 #ifdef SCTP_CWND_MONITOR
438 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
439 #endif
440 
441 		net->partial_bytes_acked = 0;
442 	}
443 }
444 
445 static int
446 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
447     struct sctp_nets *net,
448     struct sctp_nets *alt,
449     int window_probe,
450     int *num_marked)
451 {
452 
453 	/*
454 	 * Mark all chunks (well not all) that were sent to *net for
455 	 * retransmission. Move them to alt for there destination as well...
456 	 * We only mark chunks that have been outstanding long enough to
457 	 * have received feed-back.
458 	 */
459 	struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
460 	struct sctp_nets *lnets;
461 	struct timeval now, min_wait, tv;
462 	int cur_rtt;
463 	int orig_rwnd, audit_tf, num_mk, fir;
464 	unsigned int cnt_mk;
465 	uint32_t orig_flight;
466 	uint32_t tsnlast, tsnfirst;
467 
468 	/*
469 	 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
470 	 * then pick dest with largest ssthresh for any retransmission.
471 	 * (iyengar@cis.udel.edu, 2005/08/12)
472 	 */
473 	if (sctp_cmt_on_off) {
474 		alt = sctp_find_alternate_net(stcb, net, 1);
475 		/*
476 		 * CUCv2: If a different dest is picked for the
477 		 * retransmission, then new (rtx-)pseudo_cumack needs to be
478 		 * tracked for orig dest. Let CUCv2 track new (rtx-)
479 		 * pseudo-cumack always.
480 		 */
481 		net->find_pseudo_cumack = 1;
482 		net->find_rtx_pseudo_cumack = 1;
483 	}
484 	/* none in flight now */
485 	audit_tf = 0;
486 	fir = 0;
487 	/*
488 	 * figure out how long a data chunk must be pending before we can
489 	 * mark it ..
490 	 */
491 	SCTP_GETTIME_TIMEVAL(&now);
492 	/* get cur rto in micro-seconds */
493 	cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
494 	cur_rtt *= 1000;
495 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
496 	sctp_log_fr(cur_rtt,
497 	    stcb->asoc.peers_rwnd,
498 	    window_probe,
499 	    SCTP_FR_T3_MARK_TIME);
500 	sctp_log_fr(net->flight_size,
501 	    SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
502 	    SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
503 	    SCTP_FR_CWND_REPORT);
504 	sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
505 #endif
506 	tv.tv_sec = cur_rtt / 1000000;
507 	tv.tv_usec = cur_rtt % 1000000;
508 	min_wait = now;
509 	timevalsub(&min_wait, &tv);
510 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
511 		/*
512 		 * if we hit here, we don't have enough seconds on the clock
513 		 * to account for the RTO. We just let the lower seconds be
514 		 * the bounds and don't worry about it. This may mean we
515 		 * will mark a lot more than we should.
516 		 */
517 		min_wait.tv_sec = min_wait.tv_usec = 0;
518 	}
519 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
520 	sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
521 	sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
522 #endif
523 	/*
524 	 * Our rwnd will be incorrect here since we are not adding back the
525 	 * cnt * mbuf but we will fix that down below.
526 	 */
527 	orig_rwnd = stcb->asoc.peers_rwnd;
528 	orig_flight = net->flight_size;
529 	net->fast_retran_ip = 0;
530 	/* Now on to each chunk */
531 	num_mk = cnt_mk = 0;
532 	tsnfirst = tsnlast = 0;
533 	chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
534 	for (; chk != NULL; chk = tp2) {
535 		tp2 = TAILQ_NEXT(chk, sctp_next);
536 		if ((compare_with_wrap(stcb->asoc.last_acked_seq,
537 		    chk->rec.data.TSN_seq,
538 		    MAX_TSN)) ||
539 		    (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
540 			/* Strange case our list got out of order? */
541 			printf("Our list is out of order?\n");
542 			panic("Out of order list");
543 		}
544 		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
545 			/*
546 			 * found one to mark: If it is less than
547 			 * DATAGRAM_ACKED it MUST not be a skipped or marked
548 			 * TSN but instead one that is either already set
549 			 * for retransmission OR one that needs
550 			 * retransmission.
551 			 */
552 
553 			/* validate its been outstanding long enough */
554 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
555 			sctp_log_fr(chk->rec.data.TSN_seq,
556 			    chk->sent_rcv_time.tv_sec,
557 			    chk->sent_rcv_time.tv_usec,
558 			    SCTP_FR_T3_MARK_TIME);
559 #endif
560 			if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
561 				/*
562 				 * we have reached a chunk that was sent
563 				 * some seconds past our min.. forget it we
564 				 * will find no more to send.
565 				 */
566 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
567 				sctp_log_fr(0,
568 				    chk->sent_rcv_time.tv_sec,
569 				    chk->sent_rcv_time.tv_usec,
570 				    SCTP_FR_T3_STOPPED);
571 #endif
572 				continue;
573 			} else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
574 			    (window_probe == 0)) {
575 				/*
576 				 * we must look at the micro seconds to
577 				 * know.
578 				 */
579 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
580 					/*
581 					 * ok it was sent after our boundary
582 					 * time.
583 					 */
584 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
585 					sctp_log_fr(0,
586 					    chk->sent_rcv_time.tv_sec,
587 					    chk->sent_rcv_time.tv_usec,
588 					    SCTP_FR_T3_STOPPED);
589 #endif
590 					continue;
591 				}
592 			}
593 			if (PR_SCTP_TTL_ENABLED(chk->flags)) {
594 				/* Is it expired? */
595 				if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
596 				    ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
597 				    (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
598 					/* Yes so drop it */
599 					if (chk->data) {
600 						sctp_release_pr_sctp_chunk(stcb,
601 						    chk,
602 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
603 						    &stcb->asoc.sent_queue);
604 					}
605 				}
606 				continue;
607 			}
608 			if (PR_SCTP_RTX_ENABLED(chk->flags)) {
609 				/* Has it been retransmitted tv_sec times? */
610 				if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
611 					if (chk->data) {
612 						sctp_release_pr_sctp_chunk(stcb,
613 						    chk,
614 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
615 						    &stcb->asoc.sent_queue);
616 					}
617 				}
618 				continue;
619 			}
620 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
621 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
622 				num_mk++;
623 				if (fir == 0) {
624 					fir = 1;
625 					tsnfirst = chk->rec.data.TSN_seq;
626 				}
627 				tsnlast = chk->rec.data.TSN_seq;
628 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
629 				sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
630 				    0, SCTP_FR_T3_MARKED);
631 
632 #endif
633 			}
634 			if (stcb->asoc.total_flight_count > 0)
635 				stcb->asoc.total_flight_count--;
636 			if (chk->rec.data.chunk_was_revoked) {
637 				/* deflate the cwnd */
638 				chk->whoTo->cwnd -= chk->book_size;
639 				chk->rec.data.chunk_was_revoked = 0;
640 			}
641 			chk->sent = SCTP_DATAGRAM_RESEND;
642 			SCTP_STAT_INCR(sctps_markedretrans);
643 			net->marked_retrans++;
644 			stcb->asoc.marked_retrans++;
645 #ifdef SCTP_FLIGHT_LOGGING
646 			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
647 			    chk->whoTo->flight_size,
648 			    chk->book_size,
649 			    (uintptr_t) stcb,
650 			    chk->rec.data.TSN_seq);
651 #endif
652 
653 			if (net->flight_size >= chk->book_size)
654 				net->flight_size -= chk->book_size;
655 			else
656 				net->flight_size = 0;
657 
658 			stcb->asoc.peers_rwnd += chk->send_size;
659 			stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
660 
661 			/* reset the TSN for striking and other FR stuff */
662 			chk->rec.data.doing_fast_retransmit = 0;
663 			/* Clear any time so NO RTT is being done */
664 			chk->do_rtt = 0;
665 			if (alt != net) {
666 				sctp_free_remote_addr(chk->whoTo);
667 				chk->no_fr_allowed = 1;
668 				chk->whoTo = alt;
669 				atomic_add_int(&alt->ref_count, 1);
670 			} else {
671 				chk->no_fr_allowed = 0;
672 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
673 					chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
674 				} else {
675 					chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
676 				}
677 			}
678 			if (sctp_cmt_on_off == 1) {
679 				chk->no_fr_allowed = 1;
680 			}
681 		} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
682 			/* remember highest acked one */
683 			could_be_sent = chk;
684 		}
685 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
686 			cnt_mk++;
687 		}
688 	}
689 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
690 	sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
691 #endif
692 
693 	if (stcb->asoc.total_flight >= (orig_flight - net->flight_size)) {
694 		stcb->asoc.total_flight -= (orig_flight - net->flight_size);
695 	} else {
696 		stcb->asoc.total_flight = 0;
697 		stcb->asoc.total_flight_count = 0;
698 		audit_tf = 1;
699 	}
700 
701 #ifdef SCTP_DEBUG
702 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
703 		if (num_mk) {
704 			printf("LAST TSN marked was %x\n", tsnlast);
705 			printf("Num marked for retransmission was %d peer-rwd:%ld\n",
706 			    num_mk, (u_long)stcb->asoc.peers_rwnd);
707 			printf("LAST TSN marked was %x\n", tsnlast);
708 			printf("Num marked for retransmission was %d peer-rwd:%d\n",
709 			    num_mk,
710 			    (int)stcb->asoc.peers_rwnd
711 			    );
712 		}
713 	}
714 #endif
715 	*num_marked = num_mk;
716 	if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
717 		/* fix it so we retransmit the highest acked anyway */
718 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
719 		cnt_mk++;
720 		could_be_sent->sent = SCTP_DATAGRAM_RESEND;
721 	}
722 	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
723 #ifdef INVARIANTS
724 		printf("Local Audit says there are %d for retran asoc cnt:%d\n",
725 		    cnt_mk, stcb->asoc.sent_queue_retran_cnt);
726 #endif
727 #ifndef SCTP_AUDITING_ENABLED
728 		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
729 #endif
730 	}
731 	/* Now check for a ECN Echo that may be stranded */
732 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
733 		if ((chk->whoTo == net) &&
734 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
735 			sctp_free_remote_addr(chk->whoTo);
736 			chk->whoTo = alt;
737 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
738 				chk->sent = SCTP_DATAGRAM_RESEND;
739 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
740 			}
741 			atomic_add_int(&alt->ref_count, 1);
742 		}
743 	}
744 	if (audit_tf) {
745 #ifdef SCTP_DEBUG
746 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
747 			printf("Audit total flight due to negative value net:%p\n",
748 			    net);
749 		}
750 #endif				/* SCTP_DEBUG */
751 		stcb->asoc.total_flight = 0;
752 		stcb->asoc.total_flight_count = 0;
753 		/* Clear all networks flight size */
754 		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
755 			lnets->flight_size = 0;
756 #ifdef SCTP_DEBUG
757 			if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
758 				printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
759 				    lnets, lnets->cwnd, lnets->ssthresh);
760 			}
761 #endif				/* SCTP_DEBUG */
762 		}
763 		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
764 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
765 #ifdef SCTP_FLIGHT_LOGGING
766 				sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
767 				    chk->whoTo->flight_size,
768 				    chk->book_size,
769 				    (uintptr_t) stcb,
770 				    chk->rec.data.TSN_seq);
771 #endif
772 				stcb->asoc.total_flight += chk->book_size;
773 				chk->whoTo->flight_size += chk->book_size;
774 				stcb->asoc.total_flight_count++;
775 			}
776 		}
777 	}
778 	/*
779 	 * Setup the ecn nonce re-sync point. We do this since
780 	 * retranmissions are NOT setup for ECN. This means that do to
781 	 * Karn's rule, we don't know the total of the peers ecn bits.
782 	 */
783 	chk = TAILQ_FIRST(&stcb->asoc.send_queue);
784 	if (chk == NULL) {
785 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
786 	} else {
787 		stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
788 	}
789 	stcb->asoc.nonce_wait_for_ecne = 0;
790 	stcb->asoc.nonce_sum_check = 0;
791 	/* We return 1 if we only have a window probe outstanding */
792 	return (0);
793 }
794 
795 static void
796 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
797     struct sctp_nets *net,
798     struct sctp_nets *alt)
799 {
800 	struct sctp_association *asoc;
801 	struct sctp_stream_out *outs;
802 	struct sctp_tmit_chunk *chk;
803 	struct sctp_stream_queue_pending *sp;
804 
805 	if (net == alt)
806 		/* nothing to do */
807 		return;
808 
809 	asoc = &stcb->asoc;
810 
811 	/*
812 	 * now through all the streams checking for chunks sent to our bad
813 	 * network.
814 	 */
815 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
816 		/* now clean up any chunks here */
817 		TAILQ_FOREACH(sp, &outs->outqueue, next) {
818 			if (sp->net == net) {
819 				sctp_free_remote_addr(sp->net);
820 				sp->net = alt;
821 				atomic_add_int(&alt->ref_count, 1);
822 			}
823 		}
824 	}
825 	/* Now check the pending queue */
826 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
827 		if (chk->whoTo == net) {
828 			sctp_free_remote_addr(chk->whoTo);
829 			chk->whoTo = alt;
830 			atomic_add_int(&alt->ref_count, 1);
831 		}
832 	}
833 
834 }
835 
836 int
837 sctp_t3rxt_timer(struct sctp_inpcb *inp,
838     struct sctp_tcb *stcb,
839     struct sctp_nets *net)
840 {
841 	struct sctp_nets *alt;
842 	int win_probe, num_mk;
843 
844 #ifdef SCTP_FR_LOGGING
845 	sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
846 #ifdef SCTP_CWND_LOGGING
847 	{
848 		struct sctp_nets *lnet;
849 
850 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
851 			if (net == lnet) {
852 				sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
853 			} else {
854 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
855 			}
856 		}
857 	}
858 #endif
859 #endif
860 	/* Find an alternate and mark those for retransmission */
861 	if ((stcb->asoc.peers_rwnd == 0) &&
862 	    (stcb->asoc.total_flight < net->mtu)) {
863 		SCTP_STAT_INCR(sctps_timowindowprobe);
864 		win_probe = 1;
865 	} else {
866 		win_probe = 0;
867 	}
868 	alt = sctp_find_alternate_net(stcb, net, 0);
869 	sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
870 	/* FR Loss recovery just ended with the T3. */
871 	stcb->asoc.fast_retran_loss_recovery = 0;
872 
873 	/* CMT FR loss recovery ended with the T3 */
874 	net->fast_retran_loss_recovery = 0;
875 
876 	/*
877 	 * setup the sat loss recovery that prevents satellite cwnd advance.
878 	 */
879 	stcb->asoc.sat_t3_loss_recovery = 1;
880 	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
881 
882 	/* Backoff the timer and cwnd */
883 	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
884 	if (win_probe == 0) {
885 		/* We don't do normal threshold management on window probes */
886 		if (sctp_threshold_management(inp, stcb, net,
887 		    stcb->asoc.max_send_times)) {
888 			/* Association was destroyed */
889 			return (1);
890 		} else {
891 			if (net != stcb->asoc.primary_destination) {
892 				/* send a immediate HB if our RTO is stale */
893 				struct timeval now;
894 				unsigned int ms_goneby;
895 
896 				SCTP_GETTIME_TIMEVAL(&now);
897 				if (net->last_sent_time.tv_sec) {
898 					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
899 				} else {
900 					ms_goneby = 0;
901 				}
902 				if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
903 					/*
904 					 * no recent feed back in an RTO or
905 					 * more, request a RTT update
906 					 */
907 					sctp_send_hb(stcb, 1, net);
908 				}
909 			}
910 		}
911 	} else {
912 		/*
913 		 * For a window probe we don't penalize the net's but only
914 		 * the association. This may fail it if SACKs are not coming
915 		 * back. If sack's are coming with rwnd locked at 0, we will
916 		 * continue to hold things waiting for rwnd to raise
917 		 */
918 		if (sctp_threshold_management(inp, stcb, NULL,
919 		    stcb->asoc.max_send_times)) {
920 			/* Association was destroyed */
921 			return (1);
922 		}
923 	}
924 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
925 		/* Move all pending over too */
926 		sctp_move_all_chunks_to_alt(stcb, net, alt);
927 		/* Was it our primary? */
928 		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
929 			/*
930 			 * Yes, note it as such and find an alternate note:
931 			 * this means HB code must use this to resent the
932 			 * primary if it goes active AND if someone does a
933 			 * change-primary then this flag must be cleared
934 			 * from any net structures.
935 			 */
936 			if (sctp_set_primary_addr(stcb,
937 			    (struct sockaddr *)NULL,
938 			    alt) == 0) {
939 				net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
940 				if (net->ro._s_addr) {
941 					sctp_free_ifa(net->ro._s_addr);
942 					net->ro._s_addr = NULL;
943 				}
944 				net->src_addr_selected = 0;
945 			}
946 		}
947 	}
948 	/*
949 	 * Special case for cookie-echo'ed case, we don't do output but must
950 	 * await the COOKIE-ACK before retransmission
951 	 */
952 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
953 		/*
954 		 * Here we just reset the timer and start again since we
955 		 * have not established the asoc
956 		 */
957 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
958 		return (0);
959 	}
960 	if (stcb->asoc.peer_supports_prsctp) {
961 		struct sctp_tmit_chunk *lchk;
962 
963 		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
964 		/* C3. See if we need to send a Fwd-TSN */
965 		if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
966 		    stcb->asoc.last_acked_seq, MAX_TSN)) {
967 			/*
968 			 * ISSUE with ECN, see FWD-TSN processing for notes
969 			 * on issues that will occur when the ECN NONCE
970 			 * stuff is put into SCTP for cross checking.
971 			 */
972 			send_forward_tsn(stcb, &stcb->asoc);
973 			if (lchk) {
974 				/* Assure a timer is up */
975 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
976 			}
977 		}
978 	}
979 #ifdef SCTP_CWND_MONITOR
980 	sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
981 #endif
982 	return (0);
983 }
984 
985 int
986 sctp_t1init_timer(struct sctp_inpcb *inp,
987     struct sctp_tcb *stcb,
988     struct sctp_nets *net)
989 {
990 	/* bump the thresholds */
991 	if (stcb->asoc.delayed_connection) {
992 		/*
993 		 * special hook for delayed connection. The library did NOT
994 		 * complete the rest of its sends.
995 		 */
996 		stcb->asoc.delayed_connection = 0;
997 		sctp_send_initiate(inp, stcb);
998 		return (0);
999 	}
1000 	if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
1001 		return (0);
1002 	}
1003 	if (sctp_threshold_management(inp, stcb, net,
1004 	    stcb->asoc.max_init_times)) {
1005 		/* Association was destroyed */
1006 		return (1);
1007 	}
1008 	stcb->asoc.dropped_special_cnt = 0;
1009 	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1010 	if (stcb->asoc.initial_init_rto_max < net->RTO) {
1011 		net->RTO = stcb->asoc.initial_init_rto_max;
1012 	}
1013 	if (stcb->asoc.numnets > 1) {
1014 		/* If we have more than one addr use it */
1015 		struct sctp_nets *alt;
1016 
1017 		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1018 		if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1019 			sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1020 			stcb->asoc.primary_destination = alt;
1021 		}
1022 	}
1023 	/* Send out a new init */
1024 	sctp_send_initiate(inp, stcb);
1025 	return (0);
1026 }
1027 
1028 /*
1029  * For cookie and asconf we actually need to find and mark for resend, then
1030  * increment the resend counter (after all the threshold management stuff of
1031  * course).
1032  */
1033 int
1034 sctp_cookie_timer(struct sctp_inpcb *inp,
1035     struct sctp_tcb *stcb,
1036     struct sctp_nets *net)
1037 {
1038 	struct sctp_nets *alt;
1039 	struct sctp_tmit_chunk *cookie;
1040 
1041 	/* first before all else we must find the cookie */
1042 	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1043 		if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1044 			break;
1045 		}
1046 	}
1047 	if (cookie == NULL) {
1048 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1049 			/* FOOBAR! */
1050 			struct mbuf *oper;
1051 
1052 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1053 			    0, M_DONTWAIT, 1, MT_DATA);
1054 			if (oper) {
1055 				struct sctp_paramhdr *ph;
1056 				uint32_t *ippp;
1057 
1058 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1059 				    sizeof(uint32_t);
1060 				ph = mtod(oper, struct sctp_paramhdr *);
1061 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1062 				ph->param_length = htons(SCTP_BUF_LEN(oper));
1063 				ippp = (uint32_t *) (ph + 1);
1064 				*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
1065 			}
1066 			inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1067 			sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1068 			    oper);
1069 		} else {
1070 #ifdef INVARIANTS
1071 			panic("Cookie timer expires in wrong state?");
1072 #else
1073 			printf("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1074 			return (0);
1075 #endif
1076 		}
1077 		return (0);
1078 	}
1079 	/* Ok we found the cookie, threshold management next */
1080 	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1081 	    stcb->asoc.max_init_times)) {
1082 		/* Assoc is over */
1083 		return (1);
1084 	}
1085 	/*
1086 	 * cleared theshold management now lets backoff the address & select
1087 	 * an alternate
1088 	 */
1089 	stcb->asoc.dropped_special_cnt = 0;
1090 	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1091 	alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1092 	if (alt != cookie->whoTo) {
1093 		sctp_free_remote_addr(cookie->whoTo);
1094 		cookie->whoTo = alt;
1095 		atomic_add_int(&alt->ref_count, 1);
1096 	}
1097 	/* Now mark the retran info */
1098 	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1099 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1100 	}
1101 	cookie->sent = SCTP_DATAGRAM_RESEND;
1102 	/*
1103 	 * Now call the output routine to kick out the cookie again, Note we
1104 	 * don't mark any chunks for retran so that FR will need to kick in
1105 	 * to move these (or a send timer).
1106 	 */
1107 	return (0);
1108 }
1109 
1110 int
1111 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1112     struct sctp_nets *net)
1113 {
1114 	struct sctp_nets *alt;
1115 	struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1116 
1117 	if (stcb->asoc.stream_reset_outstanding == 0) {
1118 		return (0);
1119 	}
1120 	/* find the existing STRRESET, we use the seq number we sent out on */
1121 	sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1122 	if (strrst == NULL) {
1123 		return (0);
1124 	}
1125 	/* do threshold management */
1126 	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1127 	    stcb->asoc.max_send_times)) {
1128 		/* Assoc is over */
1129 		return (1);
1130 	}
1131 	/*
1132 	 * cleared theshold management now lets backoff the address & select
1133 	 * an alternate
1134 	 */
1135 	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1136 	alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1137 	sctp_free_remote_addr(strrst->whoTo);
1138 	strrst->whoTo = alt;
1139 	atomic_add_int(&alt->ref_count, 1);
1140 
1141 	/* See if a ECN Echo is also stranded */
1142 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1143 		if ((chk->whoTo == net) &&
1144 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1145 			sctp_free_remote_addr(chk->whoTo);
1146 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
1147 				chk->sent = SCTP_DATAGRAM_RESEND;
1148 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1149 			}
1150 			chk->whoTo = alt;
1151 			atomic_add_int(&alt->ref_count, 1);
1152 		}
1153 	}
1154 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1155 		/*
1156 		 * If the address went un-reachable, we need to move to
1157 		 * alternates for ALL chk's in queue
1158 		 */
1159 		sctp_move_all_chunks_to_alt(stcb, net, alt);
1160 	}
1161 	/* mark the retran info */
1162 	if (strrst->sent != SCTP_DATAGRAM_RESEND)
1163 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1164 	strrst->sent = SCTP_DATAGRAM_RESEND;
1165 
1166 	/* restart the timer */
1167 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1168 	return (0);
1169 }
1170 
1171 int
1172 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1173     struct sctp_nets *net)
1174 {
1175 	struct sctp_nets *alt;
1176 	struct sctp_tmit_chunk *asconf, *chk;
1177 
1178 	/* is this the first send, or a retransmission? */
1179 	if (stcb->asoc.asconf_sent == 0) {
1180 		/* compose a new ASCONF chunk and send it */
1181 		sctp_send_asconf(stcb, net);
1182 	} else {
1183 		/* Retransmission of the existing ASCONF needed... */
1184 
1185 		/* find the existing ASCONF */
1186 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1187 		    sctp_next) {
1188 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
1189 				break;
1190 			}
1191 		}
1192 		if (asconf == NULL) {
1193 			return (0);
1194 		}
1195 		/* do threshold management */
1196 		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1197 		    stcb->asoc.max_send_times)) {
1198 			/* Assoc is over */
1199 			return (1);
1200 		}
1201 		/*
1202 		 * PETER? FIX? How will the following code ever run? If the
1203 		 * max_send_times is hit, threshold managment will blow away
1204 		 * the association?
1205 		 */
1206 		if (asconf->snd_count > stcb->asoc.max_send_times) {
1207 			/*
1208 			 * Something is rotten, peer is not responding to
1209 			 * ASCONFs but maybe is to data etc.  e.g. it is not
1210 			 * properly handling the chunk type upper bits Mark
1211 			 * this peer as ASCONF incapable and cleanup
1212 			 */
1213 #ifdef SCTP_DEBUG
1214 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1215 				printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1216 			}
1217 #endif				/* SCTP_DEBUG */
1218 			sctp_asconf_cleanup(stcb, net);
1219 			return (0);
1220 		}
1221 		/*
1222 		 * cleared theshold management now lets backoff the address
1223 		 * & select an alternate
1224 		 */
1225 		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1226 		alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1227 		sctp_free_remote_addr(asconf->whoTo);
1228 		asconf->whoTo = alt;
1229 		atomic_add_int(&alt->ref_count, 1);
1230 
1231 		/* See if a ECN Echo is also stranded */
1232 		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1233 			if ((chk->whoTo == net) &&
1234 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1235 				sctp_free_remote_addr(chk->whoTo);
1236 				chk->whoTo = alt;
1237 				if (chk->sent != SCTP_DATAGRAM_RESEND) {
1238 					chk->sent = SCTP_DATAGRAM_RESEND;
1239 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1240 				}
1241 				atomic_add_int(&alt->ref_count, 1);
1242 			}
1243 		}
1244 		if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1245 			/*
1246 			 * If the address went un-reachable, we need to move
1247 			 * to alternates for ALL chk's in queue
1248 			 */
1249 			sctp_move_all_chunks_to_alt(stcb, net, alt);
1250 		}
1251 		/* mark the retran info */
1252 		if (asconf->sent != SCTP_DATAGRAM_RESEND)
1253 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1254 		asconf->sent = SCTP_DATAGRAM_RESEND;
1255 	}
1256 	return (0);
1257 }
1258 
1259 /*
1260  * For the shutdown and shutdown-ack, we do not keep one around on the
1261  * control queue. This means we must generate a new one and call the general
1262  * chunk output routine, AFTER having done threshold management.
1263  */
1264 int
1265 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1266     struct sctp_nets *net)
1267 {
1268 	struct sctp_nets *alt;
1269 
1270 	/* first threshold managment */
1271 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1272 		/* Assoc is over */
1273 		return (1);
1274 	}
1275 	/* second select an alternative */
1276 	alt = sctp_find_alternate_net(stcb, net, 0);
1277 
1278 	/* third generate a shutdown into the queue for out net */
1279 	if (alt) {
1280 		sctp_send_shutdown(stcb, alt);
1281 	} else {
1282 		/*
1283 		 * if alt is NULL, there is no dest to send to??
1284 		 */
1285 		return (0);
1286 	}
1287 	/* fourth restart timer */
1288 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1289 	return (0);
1290 }
1291 
1292 int
1293 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1294     struct sctp_nets *net)
1295 {
1296 	struct sctp_nets *alt;
1297 
1298 	/* first threshold managment */
1299 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1300 		/* Assoc is over */
1301 		return (1);
1302 	}
1303 	/* second select an alternative */
1304 	alt = sctp_find_alternate_net(stcb, net, 0);
1305 
1306 	/* third generate a shutdown into the queue for out net */
1307 	sctp_send_shutdown_ack(stcb, alt);
1308 
1309 	/* fourth restart timer */
1310 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1311 	return (0);
1312 }
1313 
1314 static void
1315 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1316     struct sctp_tcb *stcb)
1317 {
1318 	struct sctp_stream_out *outs;
1319 	struct sctp_stream_queue_pending *sp;
1320 	unsigned int chks_in_queue = 0;
1321 	int being_filled = 0;
1322 
1323 	/*
1324 	 * This function is ONLY called when the send/sent queues are empty.
1325 	 */
1326 	if ((stcb == NULL) || (inp == NULL))
1327 		return;
1328 
1329 	if (stcb->asoc.sent_queue_retran_cnt) {
1330 		printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1331 		    stcb->asoc.sent_queue_retran_cnt);
1332 		stcb->asoc.sent_queue_retran_cnt = 0;
1333 	}
1334 	SCTP_TCB_SEND_LOCK(stcb);
1335 	if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1336 		int i, cnt = 0;
1337 
1338 		/* Check to see if a spoke fell off the wheel */
1339 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1340 			if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1341 				sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1342 				cnt++;
1343 			}
1344 		}
1345 		if (cnt) {
1346 			/* yep, we lost a spoke or two */
1347 			printf("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1348 		} else {
1349 			/* no spokes lost, */
1350 			stcb->asoc.total_output_queue_size = 0;
1351 		}
1352 		SCTP_TCB_SEND_UNLOCK(stcb);
1353 		return;
1354 	}
1355 	SCTP_TCB_SEND_UNLOCK(stcb);
1356 	/* Check to see if some data queued, if so report it */
1357 	TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1358 		if (!TAILQ_EMPTY(&outs->outqueue)) {
1359 			TAILQ_FOREACH(sp, &outs->outqueue, next) {
1360 				if (sp->msg_is_complete)
1361 					being_filled++;
1362 				chks_in_queue++;
1363 			}
1364 		}
1365 	}
1366 	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1367 		printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1368 		    stcb->asoc.stream_queue_cnt, chks_in_queue);
1369 	}
1370 	if (chks_in_queue) {
1371 		/* call the output queue function */
1372 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1373 		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1374 		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1375 			/*
1376 			 * Probably should go in and make it go back through
1377 			 * and add fragments allowed
1378 			 */
1379 			if (being_filled == 0) {
1380 				printf("Still nothing moved %d chunks are stuck\n",
1381 				    chks_in_queue);
1382 			}
1383 		}
1384 	} else {
1385 		printf("Found no chunks on any queue tot:%lu\n",
1386 		    (u_long)stcb->asoc.total_output_queue_size);
1387 		stcb->asoc.total_output_queue_size = 0;
1388 	}
1389 }
1390 
1391 int
1392 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1393     struct sctp_nets *net, int cnt_of_unconf)
1394 {
1395 	if (net) {
1396 		if (net->hb_responded == 0) {
1397 			if (net->ro._s_addr) {
1398 				/*
1399 				 * Invalidate the src address if we did not
1400 				 * get a response last time.
1401 				 */
1402 				sctp_free_ifa(net->ro._s_addr);
1403 				net->ro._s_addr = NULL;
1404 				net->src_addr_selected = 0;
1405 			}
1406 			sctp_backoff_on_timeout(stcb, net, 1, 0);
1407 		}
1408 		/* Zero PBA, if it needs it */
1409 		if (net->partial_bytes_acked) {
1410 			net->partial_bytes_acked = 0;
1411 		}
1412 	}
1413 	if ((stcb->asoc.total_output_queue_size > 0) &&
1414 	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1415 	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1416 		sctp_audit_stream_queues_for_size(inp, stcb);
1417 	}
1418 	/* Send a new HB, this will do threshold managment, pick a new dest */
1419 	if (cnt_of_unconf == 0) {
1420 		if (sctp_send_hb(stcb, 0, NULL) < 0) {
1421 			return (1);
1422 		}
1423 	} else {
1424 		/*
1425 		 * this will send out extra hb's up to maxburst if there are
1426 		 * any unconfirmed addresses.
1427 		 */
1428 		int cnt_sent = 0;
1429 
1430 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1431 			if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1432 			    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1433 				cnt_sent++;
1434 				if (net->hb_responded == 0) {
1435 					/* Did we respond last time? */
1436 					if (net->ro._s_addr) {
1437 						sctp_free_ifa(net->ro._s_addr);
1438 						net->ro._s_addr = NULL;
1439 						net->src_addr_selected = 0;
1440 					}
1441 				}
1442 				if (sctp_send_hb(stcb, 1, net) == 0) {
1443 					break;
1444 				}
1445 				if (cnt_sent >= sctp_hb_maxburst)
1446 					break;
1447 			}
1448 		}
1449 	}
1450 	return (0);
1451 }
1452 
1453 int
1454 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1455 {
1456 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1457 		/* its running */
1458 		return (1);
1459 	} else {
1460 		/* nope */
1461 		return (0);
1462 	}
1463 }
1464 
1465 int
1466 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1467 {
1468 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1469 		/* its running */
1470 		return (1);
1471 	} else {
1472 		/* nope */
1473 		return (0);
1474 	}
1475 }
1476 
1477 #define SCTP_NUMBER_OF_MTU_SIZES 18
1478 static uint32_t mtu_sizes[] = {
1479 	68,
1480 	296,
1481 	508,
1482 	512,
1483 	544,
1484 	576,
1485 	1006,
1486 	1492,
1487 	1500,
1488 	1536,
1489 	2002,
1490 	2048,
1491 	4352,
1492 	4464,
1493 	8166,
1494 	17914,
1495 	32000,
1496 	65535
1497 };
1498 
1499 
1500 static uint32_t
1501 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1502 {
1503 	/* select another MTU that is just bigger than this one */
1504 	int i;
1505 
1506 	for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1507 		if (cur_mtu < mtu_sizes[i]) {
1508 			/* no max_mtu is bigger than this one */
1509 			return (mtu_sizes[i]);
1510 		}
1511 	}
1512 	/* here return the highest allowable */
1513 	return (cur_mtu);
1514 }
1515 
1516 
1517 void
1518 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1519     struct sctp_tcb *stcb,
1520     struct sctp_nets *net)
1521 {
1522 	uint32_t next_mtu;
1523 
1524 	/* restart the timer in any case */
1525 	next_mtu = sctp_getnext_mtu(inp, net->mtu);
1526 	if (next_mtu <= net->mtu) {
1527 		/* nothing to do */
1528 		return;
1529 	}
1530 	if (net->ro.ro_rt != NULL) {
1531 		/*
1532 		 * only if we have a route and interface do we set anything.
1533 		 * Note we always restart the timer though just in case it
1534 		 * is updated (i.e. the ifp) or route/ifp is populated.
1535 		 */
1536 		if (net->ro.ro_rt->rt_ifp != NULL) {
1537 			if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1538 				/* ok it will fit out the door */
1539 				net->mtu = next_mtu;
1540 			}
1541 		}
1542 	}
1543 	/* restart the timer */
1544 	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1545 }
1546 
1547 void
1548 sctp_autoclose_timer(struct sctp_inpcb *inp,
1549     struct sctp_tcb *stcb,
1550     struct sctp_nets *net)
1551 {
1552 	struct timeval tn, *tim_touse;
1553 	struct sctp_association *asoc;
1554 	int ticks_gone_by;
1555 
1556 	SCTP_GETTIME_TIMEVAL(&tn);
1557 	if (stcb->asoc.sctp_autoclose_ticks &&
1558 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1559 		/* Auto close is on */
1560 		asoc = &stcb->asoc;
1561 		/* pick the time to use */
1562 		if (asoc->time_last_rcvd.tv_sec >
1563 		    asoc->time_last_sent.tv_sec) {
1564 			tim_touse = &asoc->time_last_rcvd;
1565 		} else {
1566 			tim_touse = &asoc->time_last_sent;
1567 		}
1568 		/* Now has long enough transpired to autoclose? */
1569 		ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1570 		if ((ticks_gone_by > 0) &&
1571 		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1572 			/*
1573 			 * autoclose time has hit, call the output routine,
1574 			 * which should do nothing just to be SURE we don't
1575 			 * have hanging data. We can then safely check the
1576 			 * queues and know that we are clear to send
1577 			 * shutdown
1578 			 */
1579 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1580 			/* Are we clean? */
1581 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1582 			    TAILQ_EMPTY(&asoc->sent_queue)) {
1583 				/*
1584 				 * there is nothing queued to send, so I'm
1585 				 * done...
1586 				 */
1587 				if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1588 					/* only send SHUTDOWN 1st time thru */
1589 					sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1590 					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1591 					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1592 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1593 					}
1594 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1595 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1596 					    stcb->sctp_ep, stcb,
1597 					    asoc->primary_destination);
1598 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1599 					    stcb->sctp_ep, stcb,
1600 					    asoc->primary_destination);
1601 				}
1602 			}
1603 		} else {
1604 			/*
1605 			 * No auto close at this time, reset t-o to check
1606 			 * later
1607 			 */
1608 			int tmp;
1609 
1610 			/* fool the timer startup to use the time left */
1611 			tmp = asoc->sctp_autoclose_ticks;
1612 			asoc->sctp_autoclose_ticks -= ticks_gone_by;
1613 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1614 			    net);
1615 			/* restore the real tick value */
1616 			asoc->sctp_autoclose_ticks = tmp;
1617 		}
1618 	}
1619 }
1620 
1621 void
1622 sctp_iterator_timer(struct sctp_iterator *it)
1623 {
1624 	int iteration_count = 0;
1625 	int inp_skip = 0;
1626 
1627 	/*
1628 	 * only one iterator can run at a time. This is the only way we can
1629 	 * cleanly pull ep's from underneath all the running interators when
1630 	 * a ep is freed.
1631 	 */
1632 	SCTP_ITERATOR_LOCK();
1633 	if (it->inp == NULL) {
1634 		/* iterator is complete */
1635 done_with_iterator:
1636 		SCTP_ITERATOR_UNLOCK();
1637 		SCTP_INP_INFO_WLOCK();
1638 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1639 		/* stopping the callout is not needed, in theory */
1640 		SCTP_INP_INFO_WUNLOCK();
1641 		SCTP_OS_TIMER_STOP(&it->tmr.timer);
1642 		if (it->function_atend != NULL) {
1643 			(*it->function_atend) (it->pointer, it->val);
1644 		}
1645 		SCTP_FREE(it);
1646 		return;
1647 	}
1648 select_a_new_ep:
1649 	SCTP_INP_WLOCK(it->inp);
1650 	while (((it->pcb_flags) &&
1651 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1652 	    ((it->pcb_features) &&
1653 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1654 		/* endpoint flags or features don't match, so keep looking */
1655 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1656 			SCTP_INP_WUNLOCK(it->inp);
1657 			goto done_with_iterator;
1658 		}
1659 		SCTP_INP_WUNLOCK(it->inp);
1660 		it->inp = LIST_NEXT(it->inp, sctp_list);
1661 		if (it->inp == NULL) {
1662 			goto done_with_iterator;
1663 		}
1664 		SCTP_INP_WLOCK(it->inp);
1665 	}
1666 	if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1667 	    (it->inp->inp_starting_point_for_iterator != it)) {
1668 		printf("Iterator collision, waiting for one at %p\n",
1669 		    it->inp);
1670 		SCTP_INP_WUNLOCK(it->inp);
1671 		goto start_timer_return;
1672 	}
1673 	/* mark the current iterator on the endpoint */
1674 	it->inp->inp_starting_point_for_iterator = it;
1675 	SCTP_INP_WUNLOCK(it->inp);
1676 	SCTP_INP_RLOCK(it->inp);
1677 	/* now go through each assoc which is in the desired state */
1678 	if (it->done_current_ep == 0) {
1679 		if (it->function_inp != NULL)
1680 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1681 		it->done_current_ep = 1;
1682 	}
1683 	if (it->stcb == NULL) {
1684 		/* run the per instance function */
1685 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1686 	}
1687 	SCTP_INP_RUNLOCK(it->inp);
1688 	if ((inp_skip) || it->stcb == NULL) {
1689 		if (it->function_inp_end != NULL) {
1690 			inp_skip = (*it->function_inp_end) (it->inp,
1691 			    it->pointer,
1692 			    it->val);
1693 		}
1694 		goto no_stcb;
1695 	}
1696 	if ((it->stcb) &&
1697 	    (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1698 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1699 	}
1700 	while (it->stcb) {
1701 		SCTP_TCB_LOCK(it->stcb);
1702 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1703 			/* not in the right state... keep looking */
1704 			SCTP_TCB_UNLOCK(it->stcb);
1705 			goto next_assoc;
1706 		}
1707 		/* mark the current iterator on the assoc */
1708 		it->stcb->asoc.stcb_starting_point_for_iterator = it;
1709 		/* see if we have limited out the iterator loop */
1710 		iteration_count++;
1711 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1712 	start_timer_return:
1713 			/* set a timer to continue this later */
1714 			SCTP_TCB_UNLOCK(it->stcb);
1715 			sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1716 			    (struct sctp_inpcb *)it, NULL, NULL);
1717 			SCTP_ITERATOR_UNLOCK();
1718 			return;
1719 		}
1720 		/* run function on this one */
1721 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1722 
1723 		/*
1724 		 * we lie here, it really needs to have its own type but
1725 		 * first I must verify that this won't effect things :-0
1726 		 */
1727 		if (it->no_chunk_output == 0)
1728 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1729 
1730 		SCTP_TCB_UNLOCK(it->stcb);
1731 next_assoc:
1732 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1733 		if (it->stcb == NULL) {
1734 			if (it->function_inp_end != NULL) {
1735 				inp_skip = (*it->function_inp_end) (it->inp,
1736 				    it->pointer,
1737 				    it->val);
1738 			}
1739 		}
1740 	}
1741 no_stcb:
1742 	/* done with all assocs on this endpoint, move on to next endpoint */
1743 	it->done_current_ep = 0;
1744 	SCTP_INP_WLOCK(it->inp);
1745 	it->inp->inp_starting_point_for_iterator = NULL;
1746 	SCTP_INP_WUNLOCK(it->inp);
1747 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1748 		it->inp = NULL;
1749 	} else {
1750 		SCTP_INP_INFO_RLOCK();
1751 		it->inp = LIST_NEXT(it->inp, sctp_list);
1752 		SCTP_INP_INFO_RUNLOCK();
1753 	}
1754 	if (it->inp == NULL) {
1755 		goto done_with_iterator;
1756 	}
1757 	goto select_a_new_ep;
1758 }
1759