xref: /freebsd/sys/netinet/sctp_timer.c (revision 588ff6c0cc9aaf10ba19080d9f8acbd8be36abf3)
1 /*-
2  * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #define _IP_VHL
37 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_pcb.h>
39 #ifdef INET6
40 #include <netinet6/sctp6_var.h>
41 #endif
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_timer.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_indata.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_input.h>
50 #include <netinet/sctp.h>
51 #include <netinet/sctp_uio.h>
52 
53 
54 #ifdef SCTP_DEBUG
55 extern uint32_t sctp_debug_on;
56 
57 #endif				/* SCTP_DEBUG */
58 
59 
60 extern unsigned int sctp_early_fr_msec;
61 
62 void
63 sctp_early_fr_timer(struct sctp_inpcb *inp,
64     struct sctp_tcb *stcb,
65     struct sctp_nets *net)
66 {
67 	struct sctp_tmit_chunk *chk, *tp2;
68 	struct timeval now, min_wait, tv;
69 	unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
70 
71 	/* an early FR is occuring. */
72 	SCTP_GETTIME_TIMEVAL(&now);
73 	/* get cur rto in micro-seconds */
74 	if (net->lastsa == 0) {
75 		/* Hmm no rtt estimate yet? */
76 		cur_rtt = stcb->asoc.initial_rto >> 2;
77 	} else {
78 
79 		cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
80 	}
81 	if (cur_rtt < sctp_early_fr_msec) {
82 		cur_rtt = sctp_early_fr_msec;
83 	}
84 	cur_rtt *= 1000;
85 	tv.tv_sec = cur_rtt / 1000000;
86 	tv.tv_usec = cur_rtt % 1000000;
87 	min_wait = now;
88 	timevalsub(&min_wait, &tv);
89 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
90 		/*
91 		 * if we hit here, we don't have enough seconds on the clock
92 		 * to account for the RTO. We just let the lower seconds be
93 		 * the bounds and don't worry about it. This may mean we
94 		 * will mark a lot more than we should.
95 		 */
96 		min_wait.tv_sec = min_wait.tv_usec = 0;
97 	}
98 	chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
99 	for (; chk != NULL; chk = tp2) {
100 		tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
101 		if (chk->whoTo != net) {
102 			continue;
103 		}
104 		if (chk->sent == SCTP_DATAGRAM_RESEND)
105 			cnt_resend++;
106 		else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
107 		    (chk->sent < SCTP_DATAGRAM_RESEND)) {
108 			/* pending, may need retran */
109 			if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
110 				/*
111 				 * we have reached a chunk that was sent
112 				 * some seconds past our min.. forget it we
113 				 * will find no more to send.
114 				 */
115 				continue;
116 			} else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
117 				/*
118 				 * we must look at the micro seconds to
119 				 * know.
120 				 */
121 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
122 					/*
123 					 * ok it was sent after our boundary
124 					 * time.
125 					 */
126 					continue;
127 				}
128 			}
129 #ifdef SCTP_EARLYFR_LOGGING
130 			sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
131 			    4, SCTP_FR_MARKED_EARLY);
132 #endif
133 			SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
134 			chk->sent = SCTP_DATAGRAM_RESEND;
135 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
136 			/* double book size since we are doing an early FR */
137 			chk->book_size_scale++;
138 			cnt += chk->send_size;
139 			if ((cnt + net->flight_size) > net->cwnd) {
140 				/* Mark all we could possibly resend */
141 				break;
142 			}
143 		}
144 	}
145 	if (cnt) {
146 #ifdef SCTP_CWND_MONITOR
147 		int old_cwnd;
148 
149 		old_cwnd = net->cwnd;
150 #endif
151 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
152 		/*
153 		 * make a small adjustment to cwnd and force to CA.
154 		 */
155 
156 		if (net->cwnd > net->mtu)
157 			/* drop down one MTU after sending */
158 			net->cwnd -= net->mtu;
159 		if (net->cwnd < net->ssthresh)
160 			/* still in SS move to CA */
161 			net->ssthresh = net->cwnd - 1;
162 #ifdef SCTP_CWND_MONITOR
163 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
164 #endif
165 	} else if (cnt_resend) {
166 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
167 	}
168 	/* Restart it? */
169 	if (net->flight_size < net->cwnd) {
170 		SCTP_STAT_INCR(sctps_earlyfrstrtmr);
171 		sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
172 	}
173 }
174 
175 void
176 sctp_audit_retranmission_queue(struct sctp_association *asoc)
177 {
178 	struct sctp_tmit_chunk *chk;
179 
180 #ifdef SCTP_DEBUG
181 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
182 		printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
183 		    asoc->sent_queue_retran_cnt,
184 		    asoc->sent_queue_cnt);
185 	}
186 #endif				/* SCTP_DEBUG */
187 	asoc->sent_queue_retran_cnt = 0;
188 	asoc->sent_queue_cnt = 0;
189 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
190 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
191 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
192 		}
193 		asoc->sent_queue_cnt++;
194 	}
195 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
196 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
197 			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
198 		}
199 	}
200 #ifdef SCTP_DEBUG
201 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
202 		printf("Audit completes retran:%d onqueue:%d\n",
203 		    asoc->sent_queue_retran_cnt,
204 		    asoc->sent_queue_cnt);
205 	}
206 #endif				/* SCTP_DEBUG */
207 }
208 
209 int
210 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
211     struct sctp_nets *net, uint16_t threshold)
212 {
213 	if (net) {
214 		net->error_count++;
215 #ifdef SCTP_DEBUG
216 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
217 			printf("Error count for %p now %d thresh:%d\n",
218 			    net, net->error_count,
219 			    net->failure_threshold);
220 		}
221 #endif				/* SCTP_DEBUG */
222 		if (net->error_count > net->failure_threshold) {
223 			/* We had a threshold failure */
224 			if (net->dest_state & SCTP_ADDR_REACHABLE) {
225 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
226 				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
227 				if (net == stcb->asoc.primary_destination) {
228 					net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
229 				}
230 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
231 				    stcb,
232 				    SCTP_FAILED_THRESHOLD,
233 				    (void *)net);
234 			}
235 		}
236 		/*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
237 		 *********ROUTING CODE
238 		 */
239 		/*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
240 		 *********ROUTING CODE
241 		 */
242 	}
243 	if (stcb == NULL)
244 		return (0);
245 
246 	if (net) {
247 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
248 			stcb->asoc.overall_error_count++;
249 		}
250 	} else {
251 		stcb->asoc.overall_error_count++;
252 	}
253 #ifdef SCTP_DEBUG
254 	if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
255 		printf("Overall error count for %p now %d thresh:%u state:%x\n",
256 		    &stcb->asoc,
257 		    stcb->asoc.overall_error_count,
258 		    (uint32_t) threshold,
259 		    ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
260 	}
261 #endif				/* SCTP_DEBUG */
262 	/*
263 	 * We specifically do not do >= to give the assoc one more change
264 	 * before we fail it.
265 	 */
266 	if (stcb->asoc.overall_error_count > threshold) {
267 		/* Abort notification sends a ULP notify */
268 		struct mbuf *oper;
269 
270 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
271 		    0, M_DONTWAIT, 1, MT_DATA);
272 		if (oper) {
273 			struct sctp_paramhdr *ph;
274 			uint32_t *ippp;
275 
276 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
277 			    sizeof(uint32_t);
278 			ph = mtod(oper, struct sctp_paramhdr *);
279 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
280 			ph->param_length = htons(SCTP_BUF_LEN(oper));
281 			ippp = (uint32_t *) (ph + 1);
282 			*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
283 		}
284 		inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
285 		sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
286 		return (1);
287 	}
288 	return (0);
289 }
290 
291 struct sctp_nets *
292 sctp_find_alternate_net(struct sctp_tcb *stcb,
293     struct sctp_nets *net,
294     int highest_ssthresh)
295 {
296 	/* Find and return an alternate network if possible */
297 	struct sctp_nets *alt, *mnet, *hthresh = NULL;
298 	int once;
299 	uint32_t val = 0;
300 
301 	if (stcb->asoc.numnets == 1) {
302 		/* No others but net */
303 		return (TAILQ_FIRST(&stcb->asoc.nets));
304 	}
305 	if (highest_ssthresh) {
306 		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
307 			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
308 			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
309 			    ) {
310 				/*
311 				 * will skip ones that are not-reachable or
312 				 * unconfirmed
313 				 */
314 				continue;
315 			}
316 			if (val > mnet->ssthresh) {
317 				hthresh = mnet;
318 				val = mnet->ssthresh;
319 			} else if (val == mnet->ssthresh) {
320 				uint32_t rndval;
321 				uint8_t this_random;
322 
323 				if (stcb->asoc.hb_random_idx > 3) {
324 					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
325 					memcpy(stcb->asoc.hb_random_values, &rndval,
326 					    sizeof(stcb->asoc.hb_random_values));
327 					this_random = stcb->asoc.hb_random_values[0];
328 					stcb->asoc.hb_random_idx = 0;
329 					stcb->asoc.hb_ect_randombit = 0;
330 				} else {
331 					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
332 					stcb->asoc.hb_random_idx++;
333 					stcb->asoc.hb_ect_randombit = 0;
334 				}
335 				if (this_random % 2) {
336 					hthresh = mnet;
337 					val = mnet->ssthresh;
338 				}
339 			}
340 		}
341 		if (hthresh) {
342 			return (hthresh);
343 		}
344 	}
345 	mnet = net;
346 	once = 0;
347 
348 	if (mnet == NULL) {
349 		mnet = TAILQ_FIRST(&stcb->asoc.nets);
350 	}
351 	do {
352 		alt = TAILQ_NEXT(mnet, sctp_next);
353 		if (alt == NULL) {
354 			once++;
355 			if (once > 1) {
356 				break;
357 			}
358 			alt = TAILQ_FIRST(&stcb->asoc.nets);
359 		}
360 		if (alt->ro.ro_rt == NULL) {
361 			struct sockaddr_in6 *sin6;
362 
363 			sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
364 			if (sin6->sin6_family == AF_INET6) {
365 				(void)sa6_embedscope(sin6, ip6_use_defzone);
366 			}
367 			rtalloc_ign((struct route *)&alt->ro, 0UL);
368 			if (sin6->sin6_family == AF_INET6) {
369 				(void)sa6_recoverscope(sin6);
370 			}
371 			alt->src_addr_selected = 0;
372 		}
373 		if (
374 		    ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
375 		    (alt->ro.ro_rt != NULL) &&
376 		    (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
377 		    ) {
378 			/* Found a reachable address */
379 			break;
380 		}
381 		mnet = alt;
382 	} while (alt != NULL);
383 
384 	if (alt == NULL) {
385 		/* Case where NO insv network exists (dormant state) */
386 		/* we rotate destinations */
387 		once = 0;
388 		mnet = net;
389 		do {
390 			alt = TAILQ_NEXT(mnet, sctp_next);
391 			if (alt == NULL) {
392 				once++;
393 				if (once > 1) {
394 					break;
395 				}
396 				alt = TAILQ_FIRST(&stcb->asoc.nets);
397 			}
398 			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
399 			    (alt != net)) {
400 				/* Found an alternate address */
401 				break;
402 			}
403 			mnet = alt;
404 		} while (alt != NULL);
405 	}
406 	if (alt == NULL) {
407 		return (net);
408 	}
409 	return (alt);
410 }
411 
412 static void
413 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
414     struct sctp_nets *net,
415     int win_probe,
416     int num_marked)
417 {
418 	net->RTO <<= 1;
419 	if (net->RTO > stcb->asoc.maxrto) {
420 		net->RTO = stcb->asoc.maxrto;
421 	}
422 	if ((win_probe == 0) && num_marked) {
423 		/* We don't apply penalty to window probe scenarios */
424 #ifdef SCTP_CWND_MONITOR
425 		int old_cwnd = net->cwnd;
426 
427 #endif
428 		net->ssthresh = net->cwnd >> 1;
429 		if (net->ssthresh < (net->mtu << 1)) {
430 			net->ssthresh = (net->mtu << 1);
431 		}
432 		net->cwnd = net->mtu;
433 		/* floor of 1 mtu */
434 		if (net->cwnd < net->mtu)
435 			net->cwnd = net->mtu;
436 #ifdef SCTP_CWND_MONITOR
437 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
438 #endif
439 
440 		net->partial_bytes_acked = 0;
441 	}
442 }
443 
444 extern int sctp_peer_chunk_oh;
445 
446 static int
447 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
448     struct sctp_nets *net,
449     struct sctp_nets *alt,
450     int window_probe,
451     int *num_marked)
452 {
453 
454 	/*
455 	 * Mark all chunks (well not all) that were sent to *net for
456 	 * retransmission. Move them to alt for there destination as well...
457 	 * We only mark chunks that have been outstanding long enough to
458 	 * have received feed-back.
459 	 */
460 	struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
461 	struct sctp_nets *lnets;
462 	struct timeval now, min_wait, tv;
463 	int cur_rtt;
464 	int orig_rwnd, audit_tf, num_mk, fir;
465 	unsigned int cnt_mk;
466 	uint32_t orig_flight;
467 	uint32_t tsnlast, tsnfirst;
468 
469 	/*
470 	 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
471 	 * then pick dest with largest ssthresh for any retransmission.
472 	 * (iyengar@cis.udel.edu, 2005/08/12)
473 	 */
474 	if (sctp_cmt_on_off) {
475 		alt = sctp_find_alternate_net(stcb, net, 1);
476 		/*
477 		 * CUCv2: If a different dest is picked for the
478 		 * retransmission, then new (rtx-)pseudo_cumack needs to be
479 		 * tracked for orig dest. Let CUCv2 track new (rtx-)
480 		 * pseudo-cumack always.
481 		 */
482 		net->find_pseudo_cumack = 1;
483 		net->find_rtx_pseudo_cumack = 1;
484 	}
485 	/* none in flight now */
486 	audit_tf = 0;
487 	fir = 0;
488 	/*
489 	 * figure out how long a data chunk must be pending before we can
490 	 * mark it ..
491 	 */
492 	SCTP_GETTIME_TIMEVAL(&now);
493 	/* get cur rto in micro-seconds */
494 	cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
495 	cur_rtt *= 1000;
496 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
497 	sctp_log_fr(cur_rtt,
498 	    stcb->asoc.peers_rwnd,
499 	    window_probe,
500 	    SCTP_FR_T3_MARK_TIME);
501 	sctp_log_fr(net->flight_size,
502 	    SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
503 	    SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
504 	    SCTP_FR_CWND_REPORT);
505 	sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
506 #endif
507 	tv.tv_sec = cur_rtt / 1000000;
508 	tv.tv_usec = cur_rtt % 1000000;
509 	min_wait = now;
510 	timevalsub(&min_wait, &tv);
511 	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
512 		/*
513 		 * if we hit here, we don't have enough seconds on the clock
514 		 * to account for the RTO. We just let the lower seconds be
515 		 * the bounds and don't worry about it. This may mean we
516 		 * will mark a lot more than we should.
517 		 */
518 		min_wait.tv_sec = min_wait.tv_usec = 0;
519 	}
520 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
521 	sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
522 	sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
523 #endif
524 	/*
525 	 * Our rwnd will be incorrect here since we are not adding back the
526 	 * cnt * mbuf but we will fix that down below.
527 	 */
528 	orig_rwnd = stcb->asoc.peers_rwnd;
529 	orig_flight = net->flight_size;
530 	net->rto_pending = 0;
531 	net->fast_retran_ip = 0;
532 	/* Now on to each chunk */
533 	num_mk = cnt_mk = 0;
534 	tsnfirst = tsnlast = 0;
535 	chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
536 	for (; chk != NULL; chk = tp2) {
537 		tp2 = TAILQ_NEXT(chk, sctp_next);
538 		if ((compare_with_wrap(stcb->asoc.last_acked_seq,
539 		    chk->rec.data.TSN_seq,
540 		    MAX_TSN)) ||
541 		    (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
542 			/* Strange case our list got out of order? */
543 			printf("Our list is out of order?\n");
544 			panic("Out of order list");
545 		}
546 		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
547 			/*
548 			 * found one to mark: If it is less than
549 			 * DATAGRAM_ACKED it MUST not be a skipped or marked
550 			 * TSN but instead one that is either already set
551 			 * for retransmission OR one that needs
552 			 * retransmission.
553 			 */
554 
555 			/* validate its been outstanding long enough */
556 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
557 			sctp_log_fr(chk->rec.data.TSN_seq,
558 			    chk->sent_rcv_time.tv_sec,
559 			    chk->sent_rcv_time.tv_usec,
560 			    SCTP_FR_T3_MARK_TIME);
561 #endif
562 			if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
563 				/*
564 				 * we have reached a chunk that was sent
565 				 * some seconds past our min.. forget it we
566 				 * will find no more to send.
567 				 */
568 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
569 				sctp_log_fr(0,
570 				    chk->sent_rcv_time.tv_sec,
571 				    chk->sent_rcv_time.tv_usec,
572 				    SCTP_FR_T3_STOPPED);
573 #endif
574 				continue;
575 			} else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
576 			    (window_probe == 0)) {
577 				/*
578 				 * we must look at the micro seconds to
579 				 * know.
580 				 */
581 				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
582 					/*
583 					 * ok it was sent after our boundary
584 					 * time.
585 					 */
586 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
587 					sctp_log_fr(0,
588 					    chk->sent_rcv_time.tv_sec,
589 					    chk->sent_rcv_time.tv_usec,
590 					    SCTP_FR_T3_STOPPED);
591 #endif
592 					continue;
593 				}
594 			}
595 			if (PR_SCTP_TTL_ENABLED(chk->flags)) {
596 				/* Is it expired? */
597 				if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
598 				    ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
599 				    (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
600 					/* Yes so drop it */
601 					if (chk->data) {
602 						sctp_release_pr_sctp_chunk(stcb,
603 						    chk,
604 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
605 						    &stcb->asoc.sent_queue);
606 					}
607 				}
608 				continue;
609 			}
610 			if (PR_SCTP_RTX_ENABLED(chk->flags)) {
611 				/* Has it been retransmitted tv_sec times? */
612 				if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
613 					if (chk->data) {
614 						sctp_release_pr_sctp_chunk(stcb,
615 						    chk,
616 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
617 						    &stcb->asoc.sent_queue);
618 					}
619 				}
620 				continue;
621 			}
622 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
623 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
624 				num_mk++;
625 				if (fir == 0) {
626 					fir = 1;
627 					tsnfirst = chk->rec.data.TSN_seq;
628 				}
629 				tsnlast = chk->rec.data.TSN_seq;
630 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
631 				sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
632 				    0, SCTP_FR_T3_MARKED);
633 
634 #endif
635 			}
636 			if (stcb->asoc.total_flight_count > 0)
637 				stcb->asoc.total_flight_count--;
638 			chk->sent = SCTP_DATAGRAM_RESEND;
639 			SCTP_STAT_INCR(sctps_markedretrans);
640 			net->marked_retrans++;
641 			stcb->asoc.marked_retrans++;
642 #ifdef SCTP_FLIGHT_LOGGING
643 			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
644 			    chk->whoTo->flight_size,
645 			    chk->book_size,
646 			    (uintptr_t) stcb,
647 			    chk->rec.data.TSN_seq);
648 #endif
649 
650 			if (net->flight_size >= chk->book_size)
651 				net->flight_size -= chk->book_size;
652 			else
653 				net->flight_size = 0;
654 
655 			stcb->asoc.peers_rwnd += chk->send_size;
656 			stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
657 
658 			/* reset the TSN for striking and other FR stuff */
659 			chk->rec.data.doing_fast_retransmit = 0;
660 			/* Clear any time so NO RTT is being done */
661 			chk->do_rtt = 0;
662 			if (alt != net) {
663 				sctp_free_remote_addr(chk->whoTo);
664 				chk->no_fr_allowed = 1;
665 				chk->whoTo = alt;
666 				atomic_add_int(&alt->ref_count, 1);
667 			} else {
668 				chk->no_fr_allowed = 0;
669 				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
670 					chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
671 				} else {
672 					chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
673 				}
674 			}
675 			if (sctp_cmt_on_off == 1) {
676 				chk->no_fr_allowed = 1;
677 			}
678 		} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
679 			/* remember highest acked one */
680 			could_be_sent = chk;
681 		}
682 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
683 			cnt_mk++;
684 		}
685 	}
686 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
687 	sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
688 #endif
689 
690 	if (stcb->asoc.total_flight >= (orig_flight - net->flight_size)) {
691 		stcb->asoc.total_flight -= (orig_flight - net->flight_size);
692 	} else {
693 		stcb->asoc.total_flight = 0;
694 		stcb->asoc.total_flight_count = 0;
695 		audit_tf = 1;
696 	}
697 
698 #ifdef SCTP_DEBUG
699 	if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
700 		if (num_mk) {
701 			printf("LAST TSN marked was %x\n", tsnlast);
702 			printf("Num marked for retransmission was %d peer-rwd:%ld\n",
703 			    num_mk, (u_long)stcb->asoc.peers_rwnd);
704 			printf("LAST TSN marked was %x\n", tsnlast);
705 			printf("Num marked for retransmission was %d peer-rwd:%d\n",
706 			    num_mk,
707 			    (int)stcb->asoc.peers_rwnd
708 			    );
709 		}
710 	}
711 #endif
712 	*num_marked = num_mk;
713 	if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
714 		/* fix it so we retransmit the highest acked anyway */
715 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
716 		cnt_mk++;
717 		could_be_sent->sent = SCTP_DATAGRAM_RESEND;
718 	}
719 	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
720 #ifdef INVARIANTS
721 		printf("Local Audit says there are %d for retran asoc cnt:%d\n",
722 		    cnt_mk, stcb->asoc.sent_queue_retran_cnt);
723 #endif
724 #ifndef SCTP_AUDITING_ENABLED
725 		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
726 #endif
727 	}
728 	/* Now check for a ECN Echo that may be stranded */
729 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
730 		if ((chk->whoTo == net) &&
731 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
732 			sctp_free_remote_addr(chk->whoTo);
733 			chk->whoTo = alt;
734 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
735 				chk->sent = SCTP_DATAGRAM_RESEND;
736 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
737 			}
738 			atomic_add_int(&alt->ref_count, 1);
739 		}
740 	}
741 	if (audit_tf) {
742 #ifdef SCTP_DEBUG
743 		if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
744 			printf("Audit total flight due to negative value net:%p\n",
745 			    net);
746 		}
747 #endif				/* SCTP_DEBUG */
748 		stcb->asoc.total_flight = 0;
749 		stcb->asoc.total_flight_count = 0;
750 		/* Clear all networks flight size */
751 		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
752 			lnets->flight_size = 0;
753 #ifdef SCTP_DEBUG
754 			if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
755 				printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
756 				    lnets, lnets->cwnd, lnets->ssthresh);
757 			}
758 #endif				/* SCTP_DEBUG */
759 		}
760 		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
761 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
762 #ifdef SCTP_FLIGHT_LOGGING
763 				sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
764 				    chk->whoTo->flight_size,
765 				    chk->book_size,
766 				    (uintptr_t) stcb,
767 				    chk->rec.data.TSN_seq);
768 #endif
769 				stcb->asoc.total_flight += chk->book_size;
770 				chk->whoTo->flight_size += chk->book_size;
771 				stcb->asoc.total_flight_count++;
772 			}
773 		}
774 	}
775 	/*
776 	 * Setup the ecn nonce re-sync point. We do this since
777 	 * retranmissions are NOT setup for ECN. This means that do to
778 	 * Karn's rule, we don't know the total of the peers ecn bits.
779 	 */
780 	chk = TAILQ_FIRST(&stcb->asoc.send_queue);
781 	if (chk == NULL) {
782 		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
783 	} else {
784 		stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
785 	}
786 	stcb->asoc.nonce_wait_for_ecne = 0;
787 	stcb->asoc.nonce_sum_check = 0;
788 	/* We return 1 if we only have a window probe outstanding */
789 	return (0);
790 }
791 
792 static void
793 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
794     struct sctp_nets *net,
795     struct sctp_nets *alt)
796 {
797 	struct sctp_association *asoc;
798 	struct sctp_stream_out *outs;
799 	struct sctp_tmit_chunk *chk;
800 	struct sctp_stream_queue_pending *sp;
801 
802 	if (net == alt)
803 		/* nothing to do */
804 		return;
805 
806 	asoc = &stcb->asoc;
807 
808 	/*
809 	 * now through all the streams checking for chunks sent to our bad
810 	 * network.
811 	 */
812 	TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
813 		/* now clean up any chunks here */
814 		TAILQ_FOREACH(sp, &outs->outqueue, next) {
815 			if (sp->net == net) {
816 				sctp_free_remote_addr(sp->net);
817 				sp->net = alt;
818 				atomic_add_int(&alt->ref_count, 1);
819 			}
820 		}
821 	}
822 	/* Now check the pending queue */
823 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
824 		if (chk->whoTo == net) {
825 			sctp_free_remote_addr(chk->whoTo);
826 			chk->whoTo = alt;
827 			atomic_add_int(&alt->ref_count, 1);
828 		}
829 	}
830 
831 }
832 
833 int
834 sctp_t3rxt_timer(struct sctp_inpcb *inp,
835     struct sctp_tcb *stcb,
836     struct sctp_nets *net)
837 {
838 	struct sctp_nets *alt;
839 	int win_probe, num_mk;
840 
841 #ifdef SCTP_FR_LOGGING
842 	sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
843 #ifdef SCTP_CWND_LOGGING
844 	{
845 		struct sctp_nets *lnet;
846 
847 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
848 			if (net == lnet) {
849 				sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
850 			} else {
851 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
852 			}
853 		}
854 	}
855 #endif
856 #endif
857 	/* Find an alternate and mark those for retransmission */
858 	if ((stcb->asoc.peers_rwnd == 0) &&
859 	    (stcb->asoc.total_flight < net->mtu)) {
860 		SCTP_STAT_INCR(sctps_timowindowprobe);
861 		win_probe = 1;
862 	} else {
863 		win_probe = 0;
864 	}
865 	alt = sctp_find_alternate_net(stcb, net, 0);
866 	sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
867 	/* FR Loss recovery just ended with the T3. */
868 	stcb->asoc.fast_retran_loss_recovery = 0;
869 
870 	/* CMT FR loss recovery ended with the T3 */
871 	net->fast_retran_loss_recovery = 0;
872 
873 	/*
874 	 * setup the sat loss recovery that prevents satellite cwnd advance.
875 	 */
876 	stcb->asoc.sat_t3_loss_recovery = 1;
877 	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
878 
879 	/* Backoff the timer and cwnd */
880 	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
881 	if (win_probe == 0) {
882 		/* We don't do normal threshold management on window probes */
883 		if (sctp_threshold_management(inp, stcb, net,
884 		    stcb->asoc.max_send_times)) {
885 			/* Association was destroyed */
886 			return (1);
887 		} else {
888 			if (net != stcb->asoc.primary_destination) {
889 				/* send a immediate HB if our RTO is stale */
890 				struct timeval now;
891 				unsigned int ms_goneby;
892 
893 				SCTP_GETTIME_TIMEVAL(&now);
894 				if (net->last_sent_time.tv_sec) {
895 					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
896 				} else {
897 					ms_goneby = 0;
898 				}
899 				if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
900 					/*
901 					 * no recent feed back in an RTO or
902 					 * more, request a RTT update
903 					 */
904 					sctp_send_hb(stcb, 1, net);
905 				}
906 			}
907 		}
908 	} else {
909 		/*
910 		 * For a window probe we don't penalize the net's but only
911 		 * the association. This may fail it if SACKs are not coming
912 		 * back. If sack's are coming with rwnd locked at 0, we will
913 		 * continue to hold things waiting for rwnd to raise
914 		 */
915 		if (sctp_threshold_management(inp, stcb, NULL,
916 		    stcb->asoc.max_send_times)) {
917 			/* Association was destroyed */
918 			return (1);
919 		}
920 	}
921 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
922 		/* Move all pending over too */
923 		sctp_move_all_chunks_to_alt(stcb, net, alt);
924 		/* Was it our primary? */
925 		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
926 			/*
927 			 * Yes, note it as such and find an alternate note:
928 			 * this means HB code must use this to resent the
929 			 * primary if it goes active AND if someone does a
930 			 * change-primary then this flag must be cleared
931 			 * from any net structures.
932 			 */
933 			if (sctp_set_primary_addr(stcb,
934 			    (struct sockaddr *)NULL,
935 			    alt) == 0) {
936 				net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
937 				net->src_addr_selected = 0;
938 			}
939 		}
940 	}
941 	/*
942 	 * Special case for cookie-echo'ed case, we don't do output but must
943 	 * await the COOKIE-ACK before retransmission
944 	 */
945 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
946 		/*
947 		 * Here we just reset the timer and start again since we
948 		 * have not established the asoc
949 		 */
950 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
951 		return (0);
952 	}
953 	if (stcb->asoc.peer_supports_prsctp) {
954 		struct sctp_tmit_chunk *lchk;
955 
956 		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
957 		/* C3. See if we need to send a Fwd-TSN */
958 		if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
959 		    stcb->asoc.last_acked_seq, MAX_TSN)) {
960 			/*
961 			 * ISSUE with ECN, see FWD-TSN processing for notes
962 			 * on issues that will occur when the ECN NONCE
963 			 * stuff is put into SCTP for cross checking.
964 			 */
965 			send_forward_tsn(stcb, &stcb->asoc);
966 			if (lchk) {
967 				/* Assure a timer is up */
968 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
969 			}
970 		}
971 	}
972 #ifdef SCTP_CWND_MONITOR
973 	sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
974 #endif
975 	return (0);
976 }
977 
978 int
979 sctp_t1init_timer(struct sctp_inpcb *inp,
980     struct sctp_tcb *stcb,
981     struct sctp_nets *net)
982 {
983 	/* bump the thresholds */
984 	if (stcb->asoc.delayed_connection) {
985 		/*
986 		 * special hook for delayed connection. The library did NOT
987 		 * complete the rest of its sends.
988 		 */
989 		stcb->asoc.delayed_connection = 0;
990 		sctp_send_initiate(inp, stcb);
991 		return (0);
992 	}
993 	if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
994 		return (0);
995 	}
996 	if (sctp_threshold_management(inp, stcb, net,
997 	    stcb->asoc.max_init_times)) {
998 		/* Association was destroyed */
999 		return (1);
1000 	}
1001 	stcb->asoc.dropped_special_cnt = 0;
1002 	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1003 	if (stcb->asoc.initial_init_rto_max < net->RTO) {
1004 		net->RTO = stcb->asoc.initial_init_rto_max;
1005 	}
1006 	if (stcb->asoc.numnets > 1) {
1007 		/* If we have more than one addr use it */
1008 		struct sctp_nets *alt;
1009 
1010 		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1011 		if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1012 			sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1013 			stcb->asoc.primary_destination = alt;
1014 		}
1015 	}
1016 	/* Send out a new init */
1017 	sctp_send_initiate(inp, stcb);
1018 	return (0);
1019 }
1020 
1021 /*
1022  * For cookie and asconf we actually need to find and mark for resend, then
1023  * increment the resend counter (after all the threshold management stuff of
1024  * course).
1025  */
1026 int
1027 sctp_cookie_timer(struct sctp_inpcb *inp,
1028     struct sctp_tcb *stcb,
1029     struct sctp_nets *net)
1030 {
1031 	struct sctp_nets *alt;
1032 	struct sctp_tmit_chunk *cookie;
1033 
1034 	/* first before all else we must find the cookie */
1035 	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1036 		if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1037 			break;
1038 		}
1039 	}
1040 	if (cookie == NULL) {
1041 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1042 			/* FOOBAR! */
1043 			struct mbuf *oper;
1044 
1045 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1046 			    0, M_DONTWAIT, 1, MT_DATA);
1047 			if (oper) {
1048 				struct sctp_paramhdr *ph;
1049 				uint32_t *ippp;
1050 
1051 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1052 				    sizeof(uint32_t);
1053 				ph = mtod(oper, struct sctp_paramhdr *);
1054 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1055 				ph->param_length = htons(SCTP_BUF_LEN(oper));
1056 				ippp = (uint32_t *) (ph + 1);
1057 				*ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
1058 			}
1059 			inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1060 			sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1061 			    oper);
1062 		} else {
1063 #ifdef INVARIANTS
1064 			panic("Cookie timer expires in wrong state?");
1065 #else
1066 			printf("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1067 			return (0);
1068 #endif
1069 		}
1070 		return (0);
1071 	}
1072 	/* Ok we found the cookie, threshold management next */
1073 	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1074 	    stcb->asoc.max_init_times)) {
1075 		/* Assoc is over */
1076 		return (1);
1077 	}
1078 	/*
1079 	 * cleared theshold management now lets backoff the address & select
1080 	 * an alternate
1081 	 */
1082 	stcb->asoc.dropped_special_cnt = 0;
1083 	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1084 	alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1085 	if (alt != cookie->whoTo) {
1086 		sctp_free_remote_addr(cookie->whoTo);
1087 		cookie->whoTo = alt;
1088 		atomic_add_int(&alt->ref_count, 1);
1089 	}
1090 	/* Now mark the retran info */
1091 	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1092 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1093 	}
1094 	cookie->sent = SCTP_DATAGRAM_RESEND;
1095 	/*
1096 	 * Now call the output routine to kick out the cookie again, Note we
1097 	 * don't mark any chunks for retran so that FR will need to kick in
1098 	 * to move these (or a send timer).
1099 	 */
1100 	return (0);
1101 }
1102 
1103 int
1104 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1105     struct sctp_nets *net)
1106 {
1107 	struct sctp_nets *alt;
1108 	struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1109 
1110 	if (stcb->asoc.stream_reset_outstanding == 0) {
1111 		return (0);
1112 	}
1113 	/* find the existing STRRESET, we use the seq number we sent out on */
1114 	sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1115 	if (strrst == NULL) {
1116 		return (0);
1117 	}
1118 	/* do threshold management */
1119 	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1120 	    stcb->asoc.max_send_times)) {
1121 		/* Assoc is over */
1122 		return (1);
1123 	}
1124 	/*
1125 	 * cleared theshold management now lets backoff the address & select
1126 	 * an alternate
1127 	 */
1128 	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1129 	alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1130 	sctp_free_remote_addr(strrst->whoTo);
1131 	strrst->whoTo = alt;
1132 	atomic_add_int(&alt->ref_count, 1);
1133 
1134 	/* See if a ECN Echo is also stranded */
1135 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1136 		if ((chk->whoTo == net) &&
1137 		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1138 			sctp_free_remote_addr(chk->whoTo);
1139 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
1140 				chk->sent = SCTP_DATAGRAM_RESEND;
1141 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1142 			}
1143 			chk->whoTo = alt;
1144 			atomic_add_int(&alt->ref_count, 1);
1145 		}
1146 	}
1147 	if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1148 		/*
1149 		 * If the address went un-reachable, we need to move to
1150 		 * alternates for ALL chk's in queue
1151 		 */
1152 		sctp_move_all_chunks_to_alt(stcb, net, alt);
1153 	}
1154 	/* mark the retran info */
1155 	if (strrst->sent != SCTP_DATAGRAM_RESEND)
1156 		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1157 	strrst->sent = SCTP_DATAGRAM_RESEND;
1158 
1159 	/* restart the timer */
1160 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1161 	return (0);
1162 }
1163 
1164 int
1165 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1166     struct sctp_nets *net)
1167 {
1168 	struct sctp_nets *alt;
1169 	struct sctp_tmit_chunk *asconf, *chk;
1170 
1171 	/* is this the first send, or a retransmission? */
1172 	if (stcb->asoc.asconf_sent == 0) {
1173 		/* compose a new ASCONF chunk and send it */
1174 		sctp_send_asconf(stcb, net);
1175 	} else {
1176 		/* Retransmission of the existing ASCONF needed... */
1177 
1178 		/* find the existing ASCONF */
1179 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1180 		    sctp_next) {
1181 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
1182 				break;
1183 			}
1184 		}
1185 		if (asconf == NULL) {
1186 			return (0);
1187 		}
1188 		/* do threshold management */
1189 		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1190 		    stcb->asoc.max_send_times)) {
1191 			/* Assoc is over */
1192 			return (1);
1193 		}
1194 		/*
1195 		 * PETER? FIX? How will the following code ever run? If the
1196 		 * max_send_times is hit, threshold managment will blow away
1197 		 * the association?
1198 		 */
1199 		if (asconf->snd_count > stcb->asoc.max_send_times) {
1200 			/*
1201 			 * Something is rotten, peer is not responding to
1202 			 * ASCONFs but maybe is to data etc.  e.g. it is not
1203 			 * properly handling the chunk type upper bits Mark
1204 			 * this peer as ASCONF incapable and cleanup
1205 			 */
1206 #ifdef SCTP_DEBUG
1207 			if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1208 				printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1209 			}
1210 #endif				/* SCTP_DEBUG */
1211 			sctp_asconf_cleanup(stcb, net);
1212 			return (0);
1213 		}
1214 		/*
1215 		 * cleared theshold management now lets backoff the address
1216 		 * & select an alternate
1217 		 */
1218 		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1219 		alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1220 		sctp_free_remote_addr(asconf->whoTo);
1221 		asconf->whoTo = alt;
1222 		atomic_add_int(&alt->ref_count, 1);
1223 
1224 		/* See if a ECN Echo is also stranded */
1225 		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1226 			if ((chk->whoTo == net) &&
1227 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1228 				sctp_free_remote_addr(chk->whoTo);
1229 				chk->whoTo = alt;
1230 				if (chk->sent != SCTP_DATAGRAM_RESEND) {
1231 					chk->sent = SCTP_DATAGRAM_RESEND;
1232 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1233 				}
1234 				atomic_add_int(&alt->ref_count, 1);
1235 			}
1236 		}
1237 		if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1238 			/*
1239 			 * If the address went un-reachable, we need to move
1240 			 * to alternates for ALL chk's in queue
1241 			 */
1242 			sctp_move_all_chunks_to_alt(stcb, net, alt);
1243 		}
1244 		/* mark the retran info */
1245 		if (asconf->sent != SCTP_DATAGRAM_RESEND)
1246 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1247 		asconf->sent = SCTP_DATAGRAM_RESEND;
1248 	}
1249 	return (0);
1250 }
1251 
1252 /*
1253  * For the shutdown and shutdown-ack, we do not keep one around on the
1254  * control queue. This means we must generate a new one and call the general
1255  * chunk output routine, AFTER having done threshold management.
1256  */
1257 int
1258 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1259     struct sctp_nets *net)
1260 {
1261 	struct sctp_nets *alt;
1262 
1263 	/* first threshold managment */
1264 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1265 		/* Assoc is over */
1266 		return (1);
1267 	}
1268 	/* second select an alternative */
1269 	alt = sctp_find_alternate_net(stcb, net, 0);
1270 
1271 	/* third generate a shutdown into the queue for out net */
1272 	if (alt) {
1273 		sctp_send_shutdown(stcb, alt);
1274 	} else {
1275 		/*
1276 		 * if alt is NULL, there is no dest to send to??
1277 		 */
1278 		return (0);
1279 	}
1280 	/* fourth restart timer */
1281 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1282 	return (0);
1283 }
1284 
1285 int
1286 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1287     struct sctp_nets *net)
1288 {
1289 	struct sctp_nets *alt;
1290 
1291 	/* first threshold managment */
1292 	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1293 		/* Assoc is over */
1294 		return (1);
1295 	}
1296 	/* second select an alternative */
1297 	alt = sctp_find_alternate_net(stcb, net, 0);
1298 
1299 	/* third generate a shutdown into the queue for out net */
1300 	sctp_send_shutdown_ack(stcb, alt);
1301 
1302 	/* fourth restart timer */
1303 	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1304 	return (0);
1305 }
1306 
1307 static void
1308 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1309     struct sctp_tcb *stcb)
1310 {
1311 	struct sctp_stream_out *outs;
1312 	struct sctp_stream_queue_pending *sp;
1313 	unsigned int chks_in_queue = 0;
1314 	int being_filled = 0;
1315 
1316 	/*
1317 	 * This function is ONLY called when the send/sent queues are empty.
1318 	 */
1319 	if ((stcb == NULL) || (inp == NULL))
1320 		return;
1321 
1322 	if (stcb->asoc.sent_queue_retran_cnt) {
1323 		printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1324 		    stcb->asoc.sent_queue_retran_cnt);
1325 		stcb->asoc.sent_queue_retran_cnt = 0;
1326 	}
1327 	SCTP_TCB_SEND_LOCK(stcb);
1328 	if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1329 		int i, cnt = 0;
1330 
1331 		/* Check to see if a spoke fell off the wheel */
1332 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1333 			if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1334 				sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1335 				cnt++;
1336 			}
1337 		}
1338 		if (cnt) {
1339 			/* yep, we lost a spoke or two */
1340 			printf("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1341 		} else {
1342 			/* no spokes lost, */
1343 			stcb->asoc.total_output_queue_size = 0;
1344 		}
1345 		SCTP_TCB_SEND_UNLOCK(stcb);
1346 		return;
1347 	}
1348 	SCTP_TCB_SEND_UNLOCK(stcb);
1349 	/* Check to see if some data queued, if so report it */
1350 	TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1351 		if (!TAILQ_EMPTY(&outs->outqueue)) {
1352 			TAILQ_FOREACH(sp, &outs->outqueue, next) {
1353 				if (sp->msg_is_complete)
1354 					being_filled++;
1355 				chks_in_queue++;
1356 			}
1357 		}
1358 	}
1359 	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1360 		printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1361 		    stcb->asoc.stream_queue_cnt, chks_in_queue);
1362 	}
1363 	if (chks_in_queue) {
1364 		/* call the output queue function */
1365 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1366 		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1367 		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1368 			/*
1369 			 * Probably should go in and make it go back through
1370 			 * and add fragments allowed
1371 			 */
1372 			if (being_filled == 0) {
1373 				printf("Still nothing moved %d chunks are stuck\n",
1374 				    chks_in_queue);
1375 			}
1376 		}
1377 	} else {
1378 		printf("Found no chunks on any queue tot:%lu\n",
1379 		    (u_long)stcb->asoc.total_output_queue_size);
1380 		stcb->asoc.total_output_queue_size = 0;
1381 	}
1382 }
1383 
1384 int
1385 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1386     struct sctp_nets *net, int cnt_of_unconf)
1387 {
1388 	if (net) {
1389 		if (net->hb_responded == 0) {
1390 			sctp_backoff_on_timeout(stcb, net, 1, 0);
1391 		}
1392 		/* Zero PBA, if it needs it */
1393 		if (net->partial_bytes_acked) {
1394 			net->partial_bytes_acked = 0;
1395 		}
1396 	}
1397 	if ((stcb->asoc.total_output_queue_size > 0) &&
1398 	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1399 	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1400 		sctp_audit_stream_queues_for_size(inp, stcb);
1401 	}
1402 	/* Send a new HB, this will do threshold managment, pick a new dest */
1403 	if (cnt_of_unconf == 0) {
1404 		if (sctp_send_hb(stcb, 0, NULL) < 0) {
1405 			return (1);
1406 		}
1407 	} else {
1408 		/*
1409 		 * this will send out extra hb's up to maxburst if there are
1410 		 * any unconfirmed addresses.
1411 		 */
1412 		int cnt_sent = 0;
1413 
1414 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1415 			if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1416 			    (net->dest_state & SCTP_ADDR_REACHABLE)) {
1417 				cnt_sent++;
1418 				if (sctp_send_hb(stcb, 1, net) == 0) {
1419 					break;
1420 				}
1421 				if (cnt_sent >= stcb->asoc.max_burst)
1422 					break;
1423 			}
1424 		}
1425 	}
1426 	return (0);
1427 }
1428 
1429 int
1430 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1431 {
1432 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1433 		/* its running */
1434 		return (1);
1435 	} else {
1436 		/* nope */
1437 		return (0);
1438 	}
1439 }
1440 
1441 int
1442 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1443 {
1444 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1445 		/* its running */
1446 		return (1);
1447 	} else {
1448 		/* nope */
1449 		return (0);
1450 	}
1451 }
1452 
1453 #define SCTP_NUMBER_OF_MTU_SIZES 18
1454 static uint32_t mtu_sizes[] = {
1455 	68,
1456 	296,
1457 	508,
1458 	512,
1459 	544,
1460 	576,
1461 	1006,
1462 	1492,
1463 	1500,
1464 	1536,
1465 	2002,
1466 	2048,
1467 	4352,
1468 	4464,
1469 	8166,
1470 	17914,
1471 	32000,
1472 	65535
1473 };
1474 
1475 
1476 static uint32_t
1477 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1478 {
1479 	/* select another MTU that is just bigger than this one */
1480 	int i;
1481 
1482 	for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1483 		if (cur_mtu < mtu_sizes[i]) {
1484 			/* no max_mtu is bigger than this one */
1485 			return (mtu_sizes[i]);
1486 		}
1487 	}
1488 	/* here return the highest allowable */
1489 	return (cur_mtu);
1490 }
1491 
1492 
1493 void
1494 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1495     struct sctp_tcb *stcb,
1496     struct sctp_nets *net)
1497 {
1498 	uint32_t next_mtu;
1499 
1500 	/* restart the timer in any case */
1501 	next_mtu = sctp_getnext_mtu(inp, net->mtu);
1502 	if (next_mtu <= net->mtu) {
1503 		/* nothing to do */
1504 		return;
1505 	}
1506 	if (net->ro.ro_rt != NULL) {
1507 		/*
1508 		 * only if we have a route and interface do we set anything.
1509 		 * Note we always restart the timer though just in case it
1510 		 * is updated (i.e. the ifp) or route/ifp is populated.
1511 		 */
1512 		if (net->ro.ro_rt->rt_ifp != NULL) {
1513 			if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1514 				/* ok it will fit out the door */
1515 				net->mtu = next_mtu;
1516 			}
1517 		}
1518 	}
1519 	/* restart the timer */
1520 	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1521 }
1522 
1523 void
1524 sctp_autoclose_timer(struct sctp_inpcb *inp,
1525     struct sctp_tcb *stcb,
1526     struct sctp_nets *net)
1527 {
1528 	struct timeval tn, *tim_touse;
1529 	struct sctp_association *asoc;
1530 	int ticks_gone_by;
1531 
1532 	SCTP_GETTIME_TIMEVAL(&tn);
1533 	if (stcb->asoc.sctp_autoclose_ticks &&
1534 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1535 		/* Auto close is on */
1536 		asoc = &stcb->asoc;
1537 		/* pick the time to use */
1538 		if (asoc->time_last_rcvd.tv_sec >
1539 		    asoc->time_last_sent.tv_sec) {
1540 			tim_touse = &asoc->time_last_rcvd;
1541 		} else {
1542 			tim_touse = &asoc->time_last_sent;
1543 		}
1544 		/* Now has long enough transpired to autoclose? */
1545 		ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1546 		if ((ticks_gone_by > 0) &&
1547 		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1548 			/*
1549 			 * autoclose time has hit, call the output routine,
1550 			 * which should do nothing just to be SURE we don't
1551 			 * have hanging data. We can then safely check the
1552 			 * queues and know that we are clear to send
1553 			 * shutdown
1554 			 */
1555 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1556 			/* Are we clean? */
1557 			if (TAILQ_EMPTY(&asoc->send_queue) &&
1558 			    TAILQ_EMPTY(&asoc->sent_queue)) {
1559 				/*
1560 				 * there is nothing queued to send, so I'm
1561 				 * done...
1562 				 */
1563 				if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1564 					/* only send SHUTDOWN 1st time thru */
1565 					sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1566 					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1567 					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1568 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1569 					}
1570 					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1571 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1572 					    stcb->sctp_ep, stcb,
1573 					    asoc->primary_destination);
1574 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1575 					    stcb->sctp_ep, stcb,
1576 					    asoc->primary_destination);
1577 				}
1578 			}
1579 		} else {
1580 			/*
1581 			 * No auto close at this time, reset t-o to check
1582 			 * later
1583 			 */
1584 			int tmp;
1585 
1586 			/* fool the timer startup to use the time left */
1587 			tmp = asoc->sctp_autoclose_ticks;
1588 			asoc->sctp_autoclose_ticks -= ticks_gone_by;
1589 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1590 			    net);
1591 			/* restore the real tick value */
1592 			asoc->sctp_autoclose_ticks = tmp;
1593 		}
1594 	}
1595 }
1596 
1597 void
1598 sctp_iterator_timer(struct sctp_iterator *it)
1599 {
1600 	int iteration_count = 0;
1601 
1602 	/*
1603 	 * only one iterator can run at a time. This is the only way we can
1604 	 * cleanly pull ep's from underneath all the running interators when
1605 	 * a ep is freed.
1606 	 */
1607 	SCTP_ITERATOR_LOCK();
1608 	if (it->inp == NULL) {
1609 		/* iterator is complete */
1610 done_with_iterator:
1611 		SCTP_ITERATOR_UNLOCK();
1612 		SCTP_INP_INFO_WLOCK();
1613 		LIST_REMOVE(it, sctp_nxt_itr);
1614 		/* stopping the callout is not needed, in theory */
1615 		SCTP_INP_INFO_WUNLOCK();
1616 		SCTP_OS_TIMER_STOP(&it->tmr.timer);
1617 		if (it->function_atend != NULL) {
1618 			(*it->function_atend) (it->pointer, it->val);
1619 		}
1620 		SCTP_FREE(it);
1621 		return;
1622 	}
1623 select_a_new_ep:
1624 	SCTP_INP_WLOCK(it->inp);
1625 	while (((it->pcb_flags) &&
1626 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1627 	    ((it->pcb_features) &&
1628 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1629 		/* endpoint flags or features don't match, so keep looking */
1630 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1631 			SCTP_INP_WUNLOCK(it->inp);
1632 			goto done_with_iterator;
1633 		}
1634 		SCTP_INP_WUNLOCK(it->inp);
1635 		it->inp = LIST_NEXT(it->inp, sctp_list);
1636 		if (it->inp == NULL) {
1637 			goto done_with_iterator;
1638 		}
1639 		SCTP_INP_WLOCK(it->inp);
1640 	}
1641 	if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1642 	    (it->inp->inp_starting_point_for_iterator != it)) {
1643 		printf("Iterator collision, waiting for one at %p\n",
1644 		    it->inp);
1645 		SCTP_INP_WUNLOCK(it->inp);
1646 		goto start_timer_return;
1647 	}
1648 	/* mark the current iterator on the endpoint */
1649 	it->inp->inp_starting_point_for_iterator = it;
1650 	SCTP_INP_WUNLOCK(it->inp);
1651 	SCTP_INP_RLOCK(it->inp);
1652 	/* now go through each assoc which is in the desired state */
1653 	if (it->stcb == NULL) {
1654 		/* run the per instance function */
1655 		if (it->function_inp != NULL)
1656 			(*it->function_inp) (it->inp, it->pointer, it->val);
1657 
1658 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1659 	}
1660 	SCTP_INP_RUNLOCK(it->inp);
1661 	if ((it->stcb) &&
1662 	    (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1663 		it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1664 	}
1665 	while (it->stcb) {
1666 		SCTP_TCB_LOCK(it->stcb);
1667 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1668 			/* not in the right state... keep looking */
1669 			SCTP_TCB_UNLOCK(it->stcb);
1670 			goto next_assoc;
1671 		}
1672 		/* mark the current iterator on the assoc */
1673 		it->stcb->asoc.stcb_starting_point_for_iterator = it;
1674 		/* see if we have limited out the iterator loop */
1675 		iteration_count++;
1676 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1677 	start_timer_return:
1678 			/* set a timer to continue this later */
1679 			SCTP_TCB_UNLOCK(it->stcb);
1680 			sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1681 			    (struct sctp_inpcb *)it, NULL, NULL);
1682 			SCTP_ITERATOR_UNLOCK();
1683 			return;
1684 		}
1685 		/* run function on this one */
1686 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1687 
1688 		/*
1689 		 * we lie here, it really needs to have its own type but
1690 		 * first I must verify that this won't effect things :-0
1691 		 */
1692 		if (it->no_chunk_output == 0)
1693 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1694 
1695 		SCTP_TCB_UNLOCK(it->stcb);
1696 next_assoc:
1697 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1698 	}
1699 	/* done with all assocs on this endpoint, move on to next endpoint */
1700 	SCTP_INP_WLOCK(it->inp);
1701 	it->inp->inp_starting_point_for_iterator = NULL;
1702 	SCTP_INP_WUNLOCK(it->inp);
1703 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1704 		it->inp = NULL;
1705 	} else {
1706 		SCTP_INP_INFO_RLOCK();
1707 		it->inp = LIST_NEXT(it->inp, sctp_list);
1708 		SCTP_INP_INFO_RUNLOCK();
1709 	}
1710 	if (it->inp == NULL) {
1711 		goto done_with_iterator;
1712 	}
1713 	goto select_a_new_ep;
1714 }
1715