1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #define _IP_VHL 37 #include <netinet/sctp_os.h> 38 #include <netinet/sctp_pcb.h> 39 #ifdef INET6 40 #include <netinet6/sctp6_var.h> 41 #endif 42 #include <netinet/sctp_var.h> 43 #include <netinet/sctp_sysctl.h> 44 #include <netinet/sctp_timer.h> 45 #include <netinet/sctputil.h> 46 #include <netinet/sctp_output.h> 47 #include <netinet/sctp_header.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_input.h> 51 #include <netinet/sctp.h> 52 #include <netinet/sctp_uio.h> 53 54 55 56 void 57 sctp_early_fr_timer(struct sctp_inpcb *inp, 58 struct sctp_tcb *stcb, 59 struct sctp_nets *net) 60 { 61 struct sctp_tmit_chunk *chk, *tp2; 62 struct timeval now, min_wait, tv; 63 unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 64 65 /* an early FR is occuring. */ 66 (void)SCTP_GETTIME_TIMEVAL(&now); 67 /* get cur rto in micro-seconds */ 68 if (net->lastsa == 0) { 69 /* Hmm no rtt estimate yet? */ 70 cur_rtt = stcb->asoc.initial_rto >> 2; 71 } else { 72 73 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 74 } 75 if (cur_rtt < sctp_early_fr_msec) { 76 cur_rtt = sctp_early_fr_msec; 77 } 78 cur_rtt *= 1000; 79 tv.tv_sec = cur_rtt / 1000000; 80 tv.tv_usec = cur_rtt % 1000000; 81 min_wait = now; 82 timevalsub(&min_wait, &tv); 83 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 84 /* 85 * if we hit here, we don't have enough seconds on the clock 86 * to account for the RTO. We just let the lower seconds be 87 * the bounds and don't worry about it. This may mean we 88 * will mark a lot more than we should. 89 */ 90 min_wait.tv_sec = min_wait.tv_usec = 0; 91 } 92 chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 93 for (; chk != NULL; chk = tp2) { 94 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 95 if (chk->whoTo != net) { 96 continue; 97 } 98 if (chk->sent == SCTP_DATAGRAM_RESEND) 99 cnt_resend++; 100 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 101 (chk->sent < SCTP_DATAGRAM_RESEND)) { 102 /* pending, may need retran */ 103 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 104 /* 105 * we have reached a chunk that was sent 106 * some seconds past our min.. forget it we 107 * will find no more to send. 108 */ 109 continue; 110 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 111 /* 112 * we must look at the micro seconds to 113 * know. 114 */ 115 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 116 /* 117 * ok it was sent after our boundary 118 * time. 119 */ 120 continue; 121 } 122 } 123 if (sctp_logging_level & SCTP_EARLYFR_LOGGING_ENABLE) { 124 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 125 4, SCTP_FR_MARKED_EARLY); 126 } 127 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 128 chk->sent = SCTP_DATAGRAM_RESEND; 129 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 130 /* double book size since we are doing an early FR */ 131 chk->book_size_scale++; 132 cnt += chk->send_size; 133 if ((cnt + net->flight_size) > net->cwnd) { 134 /* Mark all we could possibly resend */ 135 break; 136 } 137 } 138 } 139 if (cnt) { 140 /* 141 * JRS - Use the congestion control given in the congestion 142 * control module 143 */ 144 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 145 } else if (cnt_resend) { 146 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 147 } 148 /* Restart it? */ 149 if (net->flight_size < net->cwnd) { 150 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 151 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 152 } 153 } 154 155 void 156 sctp_audit_retranmission_queue(struct sctp_association *asoc) 157 { 158 struct sctp_tmit_chunk *chk; 159 160 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 161 asoc->sent_queue_retran_cnt, 162 asoc->sent_queue_cnt); 163 asoc->sent_queue_retran_cnt = 0; 164 asoc->sent_queue_cnt = 0; 165 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 166 if (chk->sent == SCTP_DATAGRAM_RESEND) { 167 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 168 } 169 asoc->sent_queue_cnt++; 170 } 171 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 172 if (chk->sent == SCTP_DATAGRAM_RESEND) { 173 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 174 } 175 } 176 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 177 asoc->sent_queue_retran_cnt, 178 asoc->sent_queue_cnt); 179 } 180 181 int 182 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 183 struct sctp_nets *net, uint16_t threshold) 184 { 185 if (net) { 186 net->error_count++; 187 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 188 net, net->error_count, 189 net->failure_threshold); 190 if (net->error_count > net->failure_threshold) { 191 /* We had a threshold failure */ 192 if (net->dest_state & SCTP_ADDR_REACHABLE) { 193 net->dest_state &= ~SCTP_ADDR_REACHABLE; 194 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 195 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 196 if (net == stcb->asoc.primary_destination) { 197 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 198 } 199 /* 200 * JRS 5/14/07 - If a destination is 201 * unreachable, the PF bit is turned off. 202 * This allows an unambiguous use of the PF 203 * bit for destinations that are reachable 204 * but potentially failed. If the 205 * destination is set to the unreachable 206 * state, also set the destination to the PF 207 * state. 208 */ 209 /* 210 * Add debug message here if destination is 211 * not in PF state. 212 */ 213 /* Stop any running T3 timers here? */ 214 if (sctp_cmt_on_off && sctp_cmt_pf) { 215 net->dest_state &= ~SCTP_ADDR_PF; 216 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 217 net); 218 } 219 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 220 stcb, 221 SCTP_FAILED_THRESHOLD, 222 (void *)net, SCTP_SO_NOT_LOCKED); 223 } 224 } 225 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 226 *********ROUTING CODE 227 */ 228 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 229 *********ROUTING CODE 230 */ 231 } 232 if (stcb == NULL) 233 return (0); 234 235 if (net) { 236 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 237 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 238 sctp_misc_ints(SCTP_THRESHOLD_INCR, 239 stcb->asoc.overall_error_count, 240 (stcb->asoc.overall_error_count + 1), 241 SCTP_FROM_SCTP_TIMER, 242 __LINE__); 243 } 244 stcb->asoc.overall_error_count++; 245 } 246 } else { 247 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 248 sctp_misc_ints(SCTP_THRESHOLD_INCR, 249 stcb->asoc.overall_error_count, 250 (stcb->asoc.overall_error_count + 1), 251 SCTP_FROM_SCTP_TIMER, 252 __LINE__); 253 } 254 stcb->asoc.overall_error_count++; 255 } 256 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 257 &stcb->asoc, stcb->asoc.overall_error_count, 258 (uint32_t) threshold, 259 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 260 /* 261 * We specifically do not do >= to give the assoc one more change 262 * before we fail it. 263 */ 264 if (stcb->asoc.overall_error_count > threshold) { 265 /* Abort notification sends a ULP notify */ 266 struct mbuf *oper; 267 268 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 269 0, M_DONTWAIT, 1, MT_DATA); 270 if (oper) { 271 struct sctp_paramhdr *ph; 272 uint32_t *ippp; 273 274 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 275 sizeof(uint32_t); 276 ph = mtod(oper, struct sctp_paramhdr *); 277 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 278 ph->param_length = htons(SCTP_BUF_LEN(oper)); 279 ippp = (uint32_t *) (ph + 1); 280 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 281 } 282 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 283 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 284 return (1); 285 } 286 return (0); 287 } 288 289 struct sctp_nets * 290 sctp_find_alternate_net(struct sctp_tcb *stcb, 291 struct sctp_nets *net, 292 int mode) 293 { 294 /* Find and return an alternate network if possible */ 295 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 296 int once; 297 298 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 299 int min_errors = -1; 300 uint32_t max_cwnd = 0; 301 302 if (stcb->asoc.numnets == 1) { 303 /* No others but net */ 304 return (TAILQ_FIRST(&stcb->asoc.nets)); 305 } 306 /* 307 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 308 * net algorithm. This algorithm chooses the active destination (not 309 * in PF state) with the largest cwnd value. If all destinations are 310 * in PF state, unreachable, or unconfirmed, choose the desination 311 * that is in PF state with the lowest error count. In case of a 312 * tie, choose the destination that was most recently active. 313 */ 314 if (mode == 2) { 315 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 316 /* 317 * JRS 5/14/07 - If the destination is unreachable 318 * or unconfirmed, skip it. 319 */ 320 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 321 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 322 continue; 323 } 324 /* 325 * JRS 5/14/07 - If the destination is reachable 326 * but in PF state, compare the error count of the 327 * destination to the minimum error count seen thus 328 * far. Store the destination with the lower error 329 * count. If the error counts are equal, store the 330 * destination that was most recently active. 331 */ 332 if (mnet->dest_state & SCTP_ADDR_PF) { 333 /* 334 * JRS 5/14/07 - If the destination under 335 * consideration is the current destination, 336 * work as if the error count is one higher. 337 * The actual error count will not be 338 * incremented until later in the t3 339 * handler. 340 */ 341 if (mnet == net) { 342 if (min_errors == -1) { 343 min_errors = mnet->error_count + 1; 344 min_errors_net = mnet; 345 } else if (mnet->error_count + 1 < min_errors) { 346 min_errors = mnet->error_count + 1; 347 min_errors_net = mnet; 348 } else if (mnet->error_count + 1 == min_errors 349 && mnet->last_active > min_errors_net->last_active) { 350 min_errors_net = mnet; 351 min_errors = mnet->error_count + 1; 352 } 353 continue; 354 } else { 355 if (min_errors == -1) { 356 min_errors = mnet->error_count; 357 min_errors_net = mnet; 358 } else if (mnet->error_count < min_errors) { 359 min_errors = mnet->error_count; 360 min_errors_net = mnet; 361 } else if (mnet->error_count == min_errors 362 && mnet->last_active > min_errors_net->last_active) { 363 min_errors_net = mnet; 364 min_errors = mnet->error_count; 365 } 366 continue; 367 } 368 } 369 /* 370 * JRS 5/14/07 - If the destination is reachable and 371 * not in PF state, compare the cwnd of the 372 * destination to the highest cwnd seen thus far. 373 * Store the destination with the higher cwnd value. 374 * If the cwnd values are equal, randomly choose one 375 * of the two destinations. 376 */ 377 if (max_cwnd < mnet->cwnd) { 378 max_cwnd_net = mnet; 379 max_cwnd = mnet->cwnd; 380 } else if (max_cwnd == mnet->cwnd) { 381 uint32_t rndval; 382 uint8_t this_random; 383 384 if (stcb->asoc.hb_random_idx > 3) { 385 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 386 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 387 this_random = stcb->asoc.hb_random_values[0]; 388 stcb->asoc.hb_random_idx++; 389 stcb->asoc.hb_ect_randombit = 0; 390 } else { 391 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 392 stcb->asoc.hb_random_idx++; 393 stcb->asoc.hb_ect_randombit = 0; 394 } 395 if (this_random % 2 == 1) { 396 max_cwnd_net = mnet; 397 max_cwnd = mnet->cwnd; 398 //Useless ? 399 } 400 } 401 } 402 /* 403 * JRS 5/14/07 - After all destination have been considered 404 * as alternates, check to see if there was some active 405 * destination (not in PF state). If not, check to see if 406 * there was some PF destination with the minimum number of 407 * errors. If not, return the original destination. If 408 * there is a min_errors_net, remove the PF flag from that 409 * destination, set the cwnd to one or two MTUs, and return 410 * the destination as an alt. If there was some active 411 * destination with a highest cwnd, return the destination 412 * as an alt. 413 */ 414 if (max_cwnd_net == NULL) { 415 if (min_errors_net == NULL) { 416 return (net); 417 } 418 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 419 min_errors_net->cwnd = min_errors_net->mtu * sctp_cmt_pf; 420 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 421 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 422 stcb, min_errors_net, 423 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 424 } 425 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 426 min_errors_net, min_errors_net->error_count); 427 return (min_errors_net); 428 } else { 429 return (max_cwnd_net); 430 } 431 } 432 /* 433 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 434 * choosing an alternate net. 435 */ 436 else if (mode == 1) { 437 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 438 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 439 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 440 ) { 441 /* 442 * will skip ones that are not-reachable or 443 * unconfirmed 444 */ 445 continue; 446 } 447 if (max_cwnd < mnet->cwnd) { 448 max_cwnd_net = mnet; 449 max_cwnd = mnet->cwnd; 450 } else if (max_cwnd == mnet->cwnd) { 451 uint32_t rndval; 452 uint8_t this_random; 453 454 if (stcb->asoc.hb_random_idx > 3) { 455 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 456 memcpy(stcb->asoc.hb_random_values, &rndval, 457 sizeof(stcb->asoc.hb_random_values)); 458 this_random = stcb->asoc.hb_random_values[0]; 459 stcb->asoc.hb_random_idx = 0; 460 stcb->asoc.hb_ect_randombit = 0; 461 } else { 462 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 463 stcb->asoc.hb_random_idx++; 464 stcb->asoc.hb_ect_randombit = 0; 465 } 466 if (this_random % 2) { 467 max_cwnd_net = mnet; 468 max_cwnd = mnet->cwnd; 469 } 470 } 471 } 472 if (max_cwnd_net) { 473 return (max_cwnd_net); 474 } 475 } 476 mnet = net; 477 once = 0; 478 479 if (mnet == NULL) { 480 mnet = TAILQ_FIRST(&stcb->asoc.nets); 481 } 482 do { 483 alt = TAILQ_NEXT(mnet, sctp_next); 484 if (alt == NULL) { 485 once++; 486 if (once > 1) { 487 break; 488 } 489 alt = TAILQ_FIRST(&stcb->asoc.nets); 490 } 491 if (alt->ro.ro_rt == NULL) { 492 if (alt->ro._s_addr) { 493 sctp_free_ifa(alt->ro._s_addr); 494 alt->ro._s_addr = NULL; 495 } 496 alt->src_addr_selected = 0; 497 } 498 if ( 499 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 500 (alt->ro.ro_rt != NULL) && 501 /* sa_ignore NO_NULL_CHK */ 502 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 503 ) { 504 /* Found a reachable address */ 505 break; 506 } 507 mnet = alt; 508 } while (alt != NULL); 509 510 if (alt == NULL) { 511 /* Case where NO insv network exists (dormant state) */ 512 /* we rotate destinations */ 513 once = 0; 514 mnet = net; 515 do { 516 alt = TAILQ_NEXT(mnet, sctp_next); 517 if (alt == NULL) { 518 once++; 519 if (once > 1) { 520 break; 521 } 522 alt = TAILQ_FIRST(&stcb->asoc.nets); 523 } 524 /* sa_ignore NO_NULL_CHK */ 525 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 526 (alt != net)) { 527 /* Found an alternate address */ 528 break; 529 } 530 mnet = alt; 531 } while (alt != NULL); 532 } 533 if (alt == NULL) { 534 return (net); 535 } 536 return (alt); 537 } 538 539 540 541 static void 542 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 543 struct sctp_nets *net, 544 int win_probe, 545 int num_marked) 546 { 547 if (net->RTO == 0) { 548 net->RTO = stcb->asoc.minrto; 549 } 550 net->RTO <<= 1; 551 if (net->RTO > stcb->asoc.maxrto) { 552 net->RTO = stcb->asoc.maxrto; 553 } 554 if ((win_probe == 0) && num_marked) { 555 /* We don't apply penalty to window probe scenarios */ 556 /* JRS - Use the congestion control given in the CC module */ 557 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 558 } 559 } 560 561 static int 562 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 563 struct sctp_nets *net, 564 struct sctp_nets *alt, 565 int window_probe, 566 int *num_marked) 567 { 568 569 /* 570 * Mark all chunks (well not all) that were sent to *net for 571 * retransmission. Move them to alt for there destination as well... 572 * We only mark chunks that have been outstanding long enough to 573 * have received feed-back. 574 */ 575 struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL; 576 struct sctp_nets *lnets; 577 struct timeval now, min_wait, tv; 578 int cur_rtt; 579 int audit_tf, num_mk, fir; 580 unsigned int cnt_mk; 581 uint32_t orig_flight, orig_tf; 582 uint32_t tsnlast, tsnfirst; 583 584 585 /* none in flight now */ 586 audit_tf = 0; 587 fir = 0; 588 /* 589 * figure out how long a data chunk must be pending before we can 590 * mark it .. 591 */ 592 (void)SCTP_GETTIME_TIMEVAL(&now); 593 /* get cur rto in micro-seconds */ 594 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 595 cur_rtt *= 1000; 596 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 597 sctp_log_fr(cur_rtt, 598 stcb->asoc.peers_rwnd, 599 window_probe, 600 SCTP_FR_T3_MARK_TIME); 601 sctp_log_fr(net->flight_size, 602 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 603 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 604 SCTP_FR_CWND_REPORT); 605 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 606 } 607 tv.tv_sec = cur_rtt / 1000000; 608 tv.tv_usec = cur_rtt % 1000000; 609 min_wait = now; 610 timevalsub(&min_wait, &tv); 611 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 612 /* 613 * if we hit here, we don't have enough seconds on the clock 614 * to account for the RTO. We just let the lower seconds be 615 * the bounds and don't worry about it. This may mean we 616 * will mark a lot more than we should. 617 */ 618 min_wait.tv_sec = min_wait.tv_usec = 0; 619 } 620 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 621 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 622 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 623 } 624 /* 625 * Our rwnd will be incorrect here since we are not adding back the 626 * cnt * mbuf but we will fix that down below. 627 */ 628 orig_flight = net->flight_size; 629 orig_tf = stcb->asoc.total_flight; 630 631 net->fast_retran_ip = 0; 632 /* Now on to each chunk */ 633 num_mk = cnt_mk = 0; 634 tsnfirst = tsnlast = 0; 635 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 636 for (; chk != NULL; chk = tp2) { 637 tp2 = TAILQ_NEXT(chk, sctp_next); 638 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 639 chk->rec.data.TSN_seq, 640 MAX_TSN)) || 641 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 642 /* Strange case our list got out of order? */ 643 SCTP_PRINTF("Our list is out of order?\n"); 644 panic("Out of order list"); 645 } 646 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 647 /* 648 * found one to mark: If it is less than 649 * DATAGRAM_ACKED it MUST not be a skipped or marked 650 * TSN but instead one that is either already set 651 * for retransmission OR one that needs 652 * retransmission. 653 */ 654 655 /* validate its been outstanding long enough */ 656 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 657 sctp_log_fr(chk->rec.data.TSN_seq, 658 chk->sent_rcv_time.tv_sec, 659 chk->sent_rcv_time.tv_usec, 660 SCTP_FR_T3_MARK_TIME); 661 } 662 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 663 /* 664 * we have reached a chunk that was sent 665 * some seconds past our min.. forget it we 666 * will find no more to send. 667 */ 668 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 669 sctp_log_fr(0, 670 chk->sent_rcv_time.tv_sec, 671 chk->sent_rcv_time.tv_usec, 672 SCTP_FR_T3_STOPPED); 673 } 674 continue; 675 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 676 (window_probe == 0)) { 677 /* 678 * we must look at the micro seconds to 679 * know. 680 */ 681 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 682 /* 683 * ok it was sent after our boundary 684 * time. 685 */ 686 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 687 sctp_log_fr(0, 688 chk->sent_rcv_time.tv_sec, 689 chk->sent_rcv_time.tv_usec, 690 SCTP_FR_T3_STOPPED); 691 } 692 continue; 693 } 694 } 695 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 696 /* Is it expired? */ 697 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 698 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 699 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 700 /* Yes so drop it */ 701 if (chk->data) { 702 (void)sctp_release_pr_sctp_chunk(stcb, 703 chk, 704 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 705 &stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED); 706 } 707 } 708 continue; 709 } 710 if (PR_SCTP_RTX_ENABLED(chk->flags)) { 711 /* Has it been retransmitted tv_sec times? */ 712 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 713 if (chk->data) { 714 (void)sctp_release_pr_sctp_chunk(stcb, 715 chk, 716 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 717 &stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED); 718 } 719 } 720 continue; 721 } 722 if (chk->sent < SCTP_DATAGRAM_RESEND) { 723 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 724 num_mk++; 725 if (fir == 0) { 726 fir = 1; 727 tsnfirst = chk->rec.data.TSN_seq; 728 } 729 tsnlast = chk->rec.data.TSN_seq; 730 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 731 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 732 0, SCTP_FR_T3_MARKED); 733 } 734 if (chk->rec.data.chunk_was_revoked) { 735 /* deflate the cwnd */ 736 chk->whoTo->cwnd -= chk->book_size; 737 chk->rec.data.chunk_was_revoked = 0; 738 } 739 net->marked_retrans++; 740 stcb->asoc.marked_retrans++; 741 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 742 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 743 chk->whoTo->flight_size, 744 chk->book_size, 745 (uintptr_t) chk->whoTo, 746 chk->rec.data.TSN_seq); 747 } 748 sctp_flight_size_decrease(chk); 749 sctp_total_flight_decrease(stcb, chk); 750 stcb->asoc.peers_rwnd += chk->send_size; 751 stcb->asoc.peers_rwnd += sctp_peer_chunk_oh; 752 } 753 chk->sent = SCTP_DATAGRAM_RESEND; 754 SCTP_STAT_INCR(sctps_markedretrans); 755 756 /* reset the TSN for striking and other FR stuff */ 757 chk->rec.data.doing_fast_retransmit = 0; 758 /* Clear any time so NO RTT is being done */ 759 chk->do_rtt = 0; 760 if (alt != net) { 761 sctp_free_remote_addr(chk->whoTo); 762 chk->no_fr_allowed = 1; 763 chk->whoTo = alt; 764 atomic_add_int(&alt->ref_count, 1); 765 } else { 766 chk->no_fr_allowed = 0; 767 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 768 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 769 } else { 770 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 771 } 772 } 773 /* 774 * CMT: Do not allow FRs on retransmitted TSNs. 775 */ 776 if (sctp_cmt_on_off == 1) { 777 chk->no_fr_allowed = 1; 778 } 779 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 780 /* remember highest acked one */ 781 could_be_sent = chk; 782 } 783 if (chk->sent == SCTP_DATAGRAM_RESEND) { 784 cnt_mk++; 785 } 786 } 787 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 788 /* we did not subtract the same things? */ 789 audit_tf = 1; 790 } 791 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 792 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 793 } 794 #ifdef SCTP_DEBUG 795 if (num_mk) { 796 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 797 tsnlast); 798 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 799 num_mk, (u_long)stcb->asoc.peers_rwnd); 800 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 801 tsnlast); 802 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 803 num_mk, 804 (int)stcb->asoc.peers_rwnd); 805 } 806 #endif 807 *num_marked = num_mk; 808 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 809 /* fix it so we retransmit the highest acked anyway */ 810 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 811 cnt_mk++; 812 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 813 } 814 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 815 #ifdef INVARIANTS 816 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 817 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 818 #endif 819 #ifndef SCTP_AUDITING_ENABLED 820 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 821 #endif 822 } 823 /* Now check for a ECN Echo that may be stranded */ 824 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 825 if ((chk->whoTo == net) && 826 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 827 sctp_free_remote_addr(chk->whoTo); 828 chk->whoTo = alt; 829 if (chk->sent != SCTP_DATAGRAM_RESEND) { 830 chk->sent = SCTP_DATAGRAM_RESEND; 831 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 832 } 833 atomic_add_int(&alt->ref_count, 1); 834 } 835 } 836 if (audit_tf) { 837 SCTPDBG(SCTP_DEBUG_TIMER4, 838 "Audit total flight due to negative value net:%p\n", 839 net); 840 stcb->asoc.total_flight = 0; 841 stcb->asoc.total_flight_count = 0; 842 /* Clear all networks flight size */ 843 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 844 lnets->flight_size = 0; 845 SCTPDBG(SCTP_DEBUG_TIMER4, 846 "Net:%p c-f cwnd:%d ssthresh:%d\n", 847 lnets, lnets->cwnd, lnets->ssthresh); 848 } 849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 850 if (chk->sent < SCTP_DATAGRAM_RESEND) { 851 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 852 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 853 chk->whoTo->flight_size, 854 chk->book_size, 855 (uintptr_t) chk->whoTo, 856 chk->rec.data.TSN_seq); 857 } 858 sctp_flight_size_increase(chk); 859 sctp_total_flight_increase(stcb, chk); 860 } 861 } 862 } 863 /* 864 * Setup the ecn nonce re-sync point. We do this since 865 * retranmissions are NOT setup for ECN. This means that do to 866 * Karn's rule, we don't know the total of the peers ecn bits. 867 */ 868 chk = TAILQ_FIRST(&stcb->asoc.send_queue); 869 if (chk == NULL) { 870 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 871 } else { 872 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 873 } 874 stcb->asoc.nonce_wait_for_ecne = 0; 875 stcb->asoc.nonce_sum_check = 0; 876 /* We return 1 if we only have a window probe outstanding */ 877 return (0); 878 } 879 880 static void 881 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 882 struct sctp_nets *net, 883 struct sctp_nets *alt) 884 { 885 struct sctp_association *asoc; 886 struct sctp_stream_out *outs; 887 struct sctp_tmit_chunk *chk; 888 struct sctp_stream_queue_pending *sp; 889 890 if (net == alt) 891 /* nothing to do */ 892 return; 893 894 asoc = &stcb->asoc; 895 896 /* 897 * now through all the streams checking for chunks sent to our bad 898 * network. 899 */ 900 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 901 /* now clean up any chunks here */ 902 TAILQ_FOREACH(sp, &outs->outqueue, next) { 903 if (sp->net == net) { 904 sctp_free_remote_addr(sp->net); 905 sp->net = alt; 906 atomic_add_int(&alt->ref_count, 1); 907 } 908 } 909 } 910 /* Now check the pending queue */ 911 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 912 if (chk->whoTo == net) { 913 sctp_free_remote_addr(chk->whoTo); 914 chk->whoTo = alt; 915 atomic_add_int(&alt->ref_count, 1); 916 } 917 } 918 919 } 920 921 int 922 sctp_t3rxt_timer(struct sctp_inpcb *inp, 923 struct sctp_tcb *stcb, 924 struct sctp_nets *net) 925 { 926 struct sctp_nets *alt; 927 int win_probe, num_mk; 928 929 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 930 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 931 } 932 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 933 struct sctp_nets *lnet; 934 935 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 936 if (net == lnet) { 937 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 938 } else { 939 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 940 } 941 } 942 } 943 /* Find an alternate and mark those for retransmission */ 944 if ((stcb->asoc.peers_rwnd == 0) && 945 (stcb->asoc.total_flight < net->mtu)) { 946 SCTP_STAT_INCR(sctps_timowindowprobe); 947 win_probe = 1; 948 } else { 949 win_probe = 0; 950 } 951 952 /* 953 * JRS 5/14/07 - If CMT PF is on and the destination if not already 954 * in PF state, set the destination to PF state and store the 955 * current time as the time that the destination was last active. In 956 * addition, find an alternate destination with PF-based 957 * find_alt_net(). 958 */ 959 if (sctp_cmt_on_off && sctp_cmt_pf) { 960 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 961 net->dest_state |= SCTP_ADDR_PF; 962 net->last_active = sctp_get_tick_count(); 963 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 964 net); 965 } 966 alt = sctp_find_alternate_net(stcb, net, 2); 967 } else if (sctp_cmt_on_off) { 968 /* 969 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 970 * used, then pick dest with largest ssthresh for any 971 * retransmission. 972 */ 973 alt = net; 974 alt = sctp_find_alternate_net(stcb, alt, 1); 975 /* 976 * CUCv2: If a different dest is picked for the 977 * retransmission, then new (rtx-)pseudo_cumack needs to be 978 * tracked for orig dest. Let CUCv2 track new (rtx-) 979 * pseudo-cumack always. 980 */ 981 net->find_pseudo_cumack = 1; 982 net->find_rtx_pseudo_cumack = 1; 983 } else { /* CMT is OFF */ 984 alt = sctp_find_alternate_net(stcb, net, 0); 985 } 986 987 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk); 988 /* FR Loss recovery just ended with the T3. */ 989 stcb->asoc.fast_retran_loss_recovery = 0; 990 991 /* CMT FR loss recovery ended with the T3 */ 992 net->fast_retran_loss_recovery = 0; 993 994 /* 995 * setup the sat loss recovery that prevents satellite cwnd advance. 996 */ 997 stcb->asoc.sat_t3_loss_recovery = 1; 998 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 999 1000 /* Backoff the timer and cwnd */ 1001 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk); 1002 if (win_probe == 0) { 1003 /* We don't do normal threshold management on window probes */ 1004 if (sctp_threshold_management(inp, stcb, net, 1005 stcb->asoc.max_send_times)) { 1006 /* Association was destroyed */ 1007 return (1); 1008 } else { 1009 if (net != stcb->asoc.primary_destination) { 1010 /* send a immediate HB if our RTO is stale */ 1011 struct timeval now; 1012 unsigned int ms_goneby; 1013 1014 (void)SCTP_GETTIME_TIMEVAL(&now); 1015 if (net->last_sent_time.tv_sec) { 1016 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1017 } else { 1018 ms_goneby = 0; 1019 } 1020 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1021 /* 1022 * no recent feed back in an RTO or 1023 * more, request a RTT update 1024 */ 1025 if (sctp_send_hb(stcb, 1, net) < 0) 1026 return 1; 1027 } 1028 } 1029 } 1030 } else { 1031 /* 1032 * For a window probe we don't penalize the net's but only 1033 * the association. This may fail it if SACKs are not coming 1034 * back. If sack's are coming with rwnd locked at 0, we will 1035 * continue to hold things waiting for rwnd to raise 1036 */ 1037 if (sctp_threshold_management(inp, stcb, NULL, 1038 stcb->asoc.max_send_times)) { 1039 /* Association was destroyed */ 1040 return (1); 1041 } 1042 } 1043 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1044 /* Move all pending over too */ 1045 sctp_move_all_chunks_to_alt(stcb, net, alt); 1046 1047 /* 1048 * Get the address that failed, to force a new src address 1049 * selecton and a route allocation. 1050 */ 1051 if (net->ro._s_addr) { 1052 sctp_free_ifa(net->ro._s_addr); 1053 net->ro._s_addr = NULL; 1054 } 1055 net->src_addr_selected = 0; 1056 1057 /* Force a route allocation too */ 1058 if (net->ro.ro_rt) { 1059 RTFREE(net->ro.ro_rt); 1060 net->ro.ro_rt = NULL; 1061 } 1062 /* Was it our primary? */ 1063 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1064 /* 1065 * Yes, note it as such and find an alternate note: 1066 * this means HB code must use this to resent the 1067 * primary if it goes active AND if someone does a 1068 * change-primary then this flag must be cleared 1069 * from any net structures. 1070 */ 1071 if (sctp_set_primary_addr(stcb, 1072 (struct sockaddr *)NULL, 1073 alt) == 0) { 1074 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1075 } 1076 } 1077 } else if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) { 1078 /* 1079 * JRS 5/14/07 - If the destination hasn't failed completely 1080 * but is in PF state, a PF-heartbeat needs to be sent 1081 * manually. 1082 */ 1083 if (sctp_send_hb(stcb, 1, net) < 0) 1084 return 1; 1085 } 1086 /* 1087 * Special case for cookie-echo'ed case, we don't do output but must 1088 * await the COOKIE-ACK before retransmission 1089 */ 1090 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1091 /* 1092 * Here we just reset the timer and start again since we 1093 * have not established the asoc 1094 */ 1095 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1096 return (0); 1097 } 1098 if (stcb->asoc.peer_supports_prsctp) { 1099 struct sctp_tmit_chunk *lchk; 1100 1101 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1102 /* C3. See if we need to send a Fwd-TSN */ 1103 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 1104 stcb->asoc.last_acked_seq, MAX_TSN)) { 1105 /* 1106 * ISSUE with ECN, see FWD-TSN processing for notes 1107 * on issues that will occur when the ECN NONCE 1108 * stuff is put into SCTP for cross checking. 1109 */ 1110 send_forward_tsn(stcb, &stcb->asoc); 1111 if (lchk) { 1112 /* Assure a timer is up */ 1113 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1114 } 1115 } 1116 } 1117 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) { 1118 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1119 } 1120 return (0); 1121 } 1122 1123 int 1124 sctp_t1init_timer(struct sctp_inpcb *inp, 1125 struct sctp_tcb *stcb, 1126 struct sctp_nets *net) 1127 { 1128 /* bump the thresholds */ 1129 if (stcb->asoc.delayed_connection) { 1130 /* 1131 * special hook for delayed connection. The library did NOT 1132 * complete the rest of its sends. 1133 */ 1134 stcb->asoc.delayed_connection = 0; 1135 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1136 return (0); 1137 } 1138 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1139 return (0); 1140 } 1141 if (sctp_threshold_management(inp, stcb, net, 1142 stcb->asoc.max_init_times)) { 1143 /* Association was destroyed */ 1144 return (1); 1145 } 1146 stcb->asoc.dropped_special_cnt = 0; 1147 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0); 1148 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1149 net->RTO = stcb->asoc.initial_init_rto_max; 1150 } 1151 if (stcb->asoc.numnets > 1) { 1152 /* If we have more than one addr use it */ 1153 struct sctp_nets *alt; 1154 1155 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1156 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1157 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1158 stcb->asoc.primary_destination = alt; 1159 } 1160 } 1161 /* Send out a new init */ 1162 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1163 return (0); 1164 } 1165 1166 /* 1167 * For cookie and asconf we actually need to find and mark for resend, then 1168 * increment the resend counter (after all the threshold management stuff of 1169 * course). 1170 */ 1171 int 1172 sctp_cookie_timer(struct sctp_inpcb *inp, 1173 struct sctp_tcb *stcb, 1174 struct sctp_nets *net) 1175 { 1176 struct sctp_nets *alt; 1177 struct sctp_tmit_chunk *cookie; 1178 1179 /* first before all else we must find the cookie */ 1180 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1181 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1182 break; 1183 } 1184 } 1185 if (cookie == NULL) { 1186 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1187 /* FOOBAR! */ 1188 struct mbuf *oper; 1189 1190 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1191 0, M_DONTWAIT, 1, MT_DATA); 1192 if (oper) { 1193 struct sctp_paramhdr *ph; 1194 uint32_t *ippp; 1195 1196 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1197 sizeof(uint32_t); 1198 ph = mtod(oper, struct sctp_paramhdr *); 1199 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1200 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1201 ippp = (uint32_t *) (ph + 1); 1202 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1203 } 1204 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1205 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1206 oper, SCTP_SO_NOT_LOCKED); 1207 } else { 1208 #ifdef INVARIANTS 1209 panic("Cookie timer expires in wrong state?"); 1210 #else 1211 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1212 return (0); 1213 #endif 1214 } 1215 return (0); 1216 } 1217 /* Ok we found the cookie, threshold management next */ 1218 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1219 stcb->asoc.max_init_times)) { 1220 /* Assoc is over */ 1221 return (1); 1222 } 1223 /* 1224 * cleared theshold management now lets backoff the address & select 1225 * an alternate 1226 */ 1227 stcb->asoc.dropped_special_cnt = 0; 1228 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0); 1229 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1230 if (alt != cookie->whoTo) { 1231 sctp_free_remote_addr(cookie->whoTo); 1232 cookie->whoTo = alt; 1233 atomic_add_int(&alt->ref_count, 1); 1234 } 1235 /* Now mark the retran info */ 1236 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1237 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1238 } 1239 cookie->sent = SCTP_DATAGRAM_RESEND; 1240 /* 1241 * Now call the output routine to kick out the cookie again, Note we 1242 * don't mark any chunks for retran so that FR will need to kick in 1243 * to move these (or a send timer). 1244 */ 1245 return (0); 1246 } 1247 1248 int 1249 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1250 struct sctp_nets *net) 1251 { 1252 struct sctp_nets *alt; 1253 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1254 1255 if (stcb->asoc.stream_reset_outstanding == 0) { 1256 return (0); 1257 } 1258 /* find the existing STRRESET, we use the seq number we sent out on */ 1259 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1260 if (strrst == NULL) { 1261 return (0); 1262 } 1263 /* do threshold management */ 1264 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1265 stcb->asoc.max_send_times)) { 1266 /* Assoc is over */ 1267 return (1); 1268 } 1269 /* 1270 * cleared theshold management now lets backoff the address & select 1271 * an alternate 1272 */ 1273 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0); 1274 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1275 sctp_free_remote_addr(strrst->whoTo); 1276 strrst->whoTo = alt; 1277 atomic_add_int(&alt->ref_count, 1); 1278 1279 /* See if a ECN Echo is also stranded */ 1280 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1281 if ((chk->whoTo == net) && 1282 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1283 sctp_free_remote_addr(chk->whoTo); 1284 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1285 chk->sent = SCTP_DATAGRAM_RESEND; 1286 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1287 } 1288 chk->whoTo = alt; 1289 atomic_add_int(&alt->ref_count, 1); 1290 } 1291 } 1292 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1293 /* 1294 * If the address went un-reachable, we need to move to 1295 * alternates for ALL chk's in queue 1296 */ 1297 sctp_move_all_chunks_to_alt(stcb, net, alt); 1298 } 1299 /* mark the retran info */ 1300 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1301 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1302 strrst->sent = SCTP_DATAGRAM_RESEND; 1303 1304 /* restart the timer */ 1305 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1306 return (0); 1307 } 1308 1309 int 1310 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1311 struct sctp_nets *net) 1312 { 1313 struct sctp_nets *alt; 1314 struct sctp_tmit_chunk *asconf, *chk; 1315 1316 /* is this a first send, or a retransmission? */ 1317 if (stcb->asoc.asconf_sent == 0) { 1318 /* compose a new ASCONF chunk and send it */ 1319 sctp_send_asconf(stcb, net); 1320 } else { 1321 /* 1322 * Retransmission of the existing ASCONF is needed 1323 */ 1324 1325 /* find the existing ASCONF */ 1326 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 1327 sctp_next) { 1328 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 1329 break; 1330 } 1331 } 1332 if (asconf == NULL) { 1333 return (0); 1334 } 1335 /* do threshold management */ 1336 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1337 stcb->asoc.max_send_times)) { 1338 /* Assoc is over */ 1339 return (1); 1340 } 1341 if (asconf->snd_count > stcb->asoc.max_send_times) { 1342 /* 1343 * Something is rotten: our peer is not responding 1344 * to ASCONFs but apparently is to other chunks. 1345 * i.e. it is not properly handling the chunk type 1346 * upper bits. Mark this peer as ASCONF incapable 1347 * and cleanup. 1348 */ 1349 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1350 sctp_asconf_cleanup(stcb, net); 1351 return (0); 1352 } 1353 /* 1354 * cleared threshold management, so now backoff the net and 1355 * select an alternate 1356 */ 1357 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0); 1358 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1359 sctp_free_remote_addr(asconf->whoTo); 1360 asconf->whoTo = alt; 1361 atomic_add_int(&alt->ref_count, 1); 1362 1363 /* See if an ECN Echo is also stranded */ 1364 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1365 if ((chk->whoTo == net) && 1366 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1367 sctp_free_remote_addr(chk->whoTo); 1368 chk->whoTo = alt; 1369 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1370 chk->sent = SCTP_DATAGRAM_RESEND; 1371 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1372 } 1373 atomic_add_int(&alt->ref_count, 1); 1374 } 1375 } 1376 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1377 /* 1378 * If the address went un-reachable, we need to move 1379 * to the alternate for ALL chunks in queue 1380 */ 1381 sctp_move_all_chunks_to_alt(stcb, net, alt); 1382 } 1383 /* mark the retran info */ 1384 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1385 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1386 asconf->sent = SCTP_DATAGRAM_RESEND; 1387 } 1388 return (0); 1389 } 1390 1391 /* 1392 * For the shutdown and shutdown-ack, we do not keep one around on the 1393 * control queue. This means we must generate a new one and call the general 1394 * chunk output routine, AFTER having done threshold management. 1395 */ 1396 int 1397 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1398 struct sctp_nets *net) 1399 { 1400 struct sctp_nets *alt; 1401 1402 /* first threshold managment */ 1403 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1404 /* Assoc is over */ 1405 return (1); 1406 } 1407 /* second select an alternative */ 1408 alt = sctp_find_alternate_net(stcb, net, 0); 1409 1410 /* third generate a shutdown into the queue for out net */ 1411 if (alt) { 1412 sctp_send_shutdown(stcb, alt); 1413 } else { 1414 /* 1415 * if alt is NULL, there is no dest to send to?? 1416 */ 1417 return (0); 1418 } 1419 /* fourth restart timer */ 1420 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1421 return (0); 1422 } 1423 1424 int 1425 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1426 struct sctp_nets *net) 1427 { 1428 struct sctp_nets *alt; 1429 1430 /* first threshold managment */ 1431 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1432 /* Assoc is over */ 1433 return (1); 1434 } 1435 /* second select an alternative */ 1436 alt = sctp_find_alternate_net(stcb, net, 0); 1437 1438 /* third generate a shutdown into the queue for out net */ 1439 sctp_send_shutdown_ack(stcb, alt); 1440 1441 /* fourth restart timer */ 1442 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1443 return (0); 1444 } 1445 1446 static void 1447 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1448 struct sctp_tcb *stcb) 1449 { 1450 struct sctp_stream_out *outs; 1451 struct sctp_stream_queue_pending *sp; 1452 unsigned int chks_in_queue = 0; 1453 int being_filled = 0; 1454 1455 /* 1456 * This function is ONLY called when the send/sent queues are empty. 1457 */ 1458 if ((stcb == NULL) || (inp == NULL)) 1459 return; 1460 1461 if (stcb->asoc.sent_queue_retran_cnt) { 1462 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1463 stcb->asoc.sent_queue_retran_cnt); 1464 stcb->asoc.sent_queue_retran_cnt = 0; 1465 } 1466 SCTP_TCB_SEND_LOCK(stcb); 1467 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1468 int i, cnt = 0; 1469 1470 /* Check to see if a spoke fell off the wheel */ 1471 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1472 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1473 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1474 cnt++; 1475 } 1476 } 1477 if (cnt) { 1478 /* yep, we lost a spoke or two */ 1479 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1480 } else { 1481 /* no spokes lost, */ 1482 stcb->asoc.total_output_queue_size = 0; 1483 } 1484 SCTP_TCB_SEND_UNLOCK(stcb); 1485 return; 1486 } 1487 SCTP_TCB_SEND_UNLOCK(stcb); 1488 /* Check to see if some data queued, if so report it */ 1489 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1490 if (!TAILQ_EMPTY(&outs->outqueue)) { 1491 TAILQ_FOREACH(sp, &outs->outqueue, next) { 1492 if (sp->msg_is_complete) 1493 being_filled++; 1494 chks_in_queue++; 1495 } 1496 } 1497 } 1498 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1499 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1500 stcb->asoc.stream_queue_cnt, chks_in_queue); 1501 } 1502 if (chks_in_queue) { 1503 /* call the output queue function */ 1504 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1505 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1506 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1507 /* 1508 * Probably should go in and make it go back through 1509 * and add fragments allowed 1510 */ 1511 if (being_filled == 0) { 1512 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1513 chks_in_queue); 1514 } 1515 } 1516 } else { 1517 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1518 (u_long)stcb->asoc.total_output_queue_size); 1519 stcb->asoc.total_output_queue_size = 0; 1520 } 1521 } 1522 1523 int 1524 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1525 struct sctp_nets *net, int cnt_of_unconf) 1526 { 1527 int ret; 1528 1529 if (net) { 1530 if (net->hb_responded == 0) { 1531 if (net->ro._s_addr) { 1532 /* 1533 * Invalidate the src address if we did not 1534 * get a response last time. 1535 */ 1536 sctp_free_ifa(net->ro._s_addr); 1537 net->ro._s_addr = NULL; 1538 net->src_addr_selected = 0; 1539 } 1540 sctp_backoff_on_timeout(stcb, net, 1, 0); 1541 } 1542 /* Zero PBA, if it needs it */ 1543 if (net->partial_bytes_acked) { 1544 net->partial_bytes_acked = 0; 1545 } 1546 } 1547 if ((stcb->asoc.total_output_queue_size > 0) && 1548 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1549 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1550 sctp_audit_stream_queues_for_size(inp, stcb); 1551 } 1552 /* Send a new HB, this will do threshold managment, pick a new dest */ 1553 if (cnt_of_unconf == 0) { 1554 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1555 return (1); 1556 } 1557 } else { 1558 /* 1559 * this will send out extra hb's up to maxburst if there are 1560 * any unconfirmed addresses. 1561 */ 1562 uint32_t cnt_sent = 0; 1563 1564 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1565 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1566 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1567 cnt_sent++; 1568 if (net->hb_responded == 0) { 1569 /* Did we respond last time? */ 1570 if (net->ro._s_addr) { 1571 sctp_free_ifa(net->ro._s_addr); 1572 net->ro._s_addr = NULL; 1573 net->src_addr_selected = 0; 1574 } 1575 } 1576 ret = sctp_send_hb(stcb, 1, net); 1577 if (ret < 0) 1578 return 1; 1579 else if (ret == 0) { 1580 break; 1581 } 1582 if (cnt_sent >= sctp_hb_maxburst) 1583 break; 1584 } 1585 } 1586 } 1587 return (0); 1588 } 1589 1590 int 1591 sctp_is_hb_timer_running(struct sctp_tcb *stcb) 1592 { 1593 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1594 /* its running */ 1595 return (1); 1596 } else { 1597 /* nope */ 1598 return (0); 1599 } 1600 } 1601 1602 int 1603 sctp_is_sack_timer_running(struct sctp_tcb *stcb) 1604 { 1605 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1606 /* its running */ 1607 return (1); 1608 } else { 1609 /* nope */ 1610 return (0); 1611 } 1612 } 1613 1614 #define SCTP_NUMBER_OF_MTU_SIZES 18 1615 static uint32_t mtu_sizes[] = { 1616 68, 1617 296, 1618 508, 1619 512, 1620 544, 1621 576, 1622 1006, 1623 1492, 1624 1500, 1625 1536, 1626 2002, 1627 2048, 1628 4352, 1629 4464, 1630 8166, 1631 17914, 1632 32000, 1633 65535 1634 }; 1635 1636 1637 static uint32_t 1638 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1639 { 1640 /* select another MTU that is just bigger than this one */ 1641 int i; 1642 1643 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1644 if (cur_mtu < mtu_sizes[i]) { 1645 /* no max_mtu is bigger than this one */ 1646 return (mtu_sizes[i]); 1647 } 1648 } 1649 /* here return the highest allowable */ 1650 return (cur_mtu); 1651 } 1652 1653 1654 void 1655 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1656 struct sctp_tcb *stcb, 1657 struct sctp_nets *net) 1658 { 1659 uint32_t next_mtu; 1660 1661 /* restart the timer in any case */ 1662 next_mtu = sctp_getnext_mtu(inp, net->mtu); 1663 if (next_mtu <= net->mtu) { 1664 /* nothing to do */ 1665 return; 1666 } { 1667 uint32_t mtu; 1668 1669 if ((net->src_addr_selected == 0) || 1670 (net->ro._s_addr == NULL) || 1671 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1672 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1673 sctp_free_ifa(net->ro._s_addr); 1674 net->ro._s_addr = NULL; 1675 net->src_addr_selected = 0; 1676 } else if (net->ro._s_addr == NULL) { 1677 net->ro._s_addr = sctp_source_address_selection(inp, 1678 stcb, 1679 (sctp_route_t *) & net->ro, 1680 net, 0, stcb->asoc.vrf_id); 1681 } 1682 if (net->ro._s_addr) 1683 net->src_addr_selected = 1; 1684 } 1685 if (net->ro._s_addr) { 1686 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1687 if (mtu > next_mtu) { 1688 net->mtu = next_mtu; 1689 } 1690 } 1691 } 1692 /* restart the timer */ 1693 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1694 } 1695 1696 void 1697 sctp_autoclose_timer(struct sctp_inpcb *inp, 1698 struct sctp_tcb *stcb, 1699 struct sctp_nets *net) 1700 { 1701 struct timeval tn, *tim_touse; 1702 struct sctp_association *asoc; 1703 int ticks_gone_by; 1704 1705 (void)SCTP_GETTIME_TIMEVAL(&tn); 1706 if (stcb->asoc.sctp_autoclose_ticks && 1707 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1708 /* Auto close is on */ 1709 asoc = &stcb->asoc; 1710 /* pick the time to use */ 1711 if (asoc->time_last_rcvd.tv_sec > 1712 asoc->time_last_sent.tv_sec) { 1713 tim_touse = &asoc->time_last_rcvd; 1714 } else { 1715 tim_touse = &asoc->time_last_sent; 1716 } 1717 /* Now has long enough transpired to autoclose? */ 1718 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1719 if ((ticks_gone_by > 0) && 1720 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1721 /* 1722 * autoclose time has hit, call the output routine, 1723 * which should do nothing just to be SURE we don't 1724 * have hanging data. We can then safely check the 1725 * queues and know that we are clear to send 1726 * shutdown 1727 */ 1728 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1729 /* Are we clean? */ 1730 if (TAILQ_EMPTY(&asoc->send_queue) && 1731 TAILQ_EMPTY(&asoc->sent_queue)) { 1732 /* 1733 * there is nothing queued to send, so I'm 1734 * done... 1735 */ 1736 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1737 /* only send SHUTDOWN 1st time thru */ 1738 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1739 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1740 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1741 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1742 } 1743 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1744 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1745 stcb->sctp_ep, stcb, 1746 asoc->primary_destination); 1747 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1748 stcb->sctp_ep, stcb, 1749 asoc->primary_destination); 1750 } 1751 } 1752 } else { 1753 /* 1754 * No auto close at this time, reset t-o to check 1755 * later 1756 */ 1757 int tmp; 1758 1759 /* fool the timer startup to use the time left */ 1760 tmp = asoc->sctp_autoclose_ticks; 1761 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1762 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1763 net); 1764 /* restore the real tick value */ 1765 asoc->sctp_autoclose_ticks = tmp; 1766 } 1767 } 1768 } 1769 1770 void 1771 sctp_iterator_timer(struct sctp_iterator *it) 1772 { 1773 int iteration_count = 0; 1774 int inp_skip = 0; 1775 1776 /* 1777 * only one iterator can run at a time. This is the only way we can 1778 * cleanly pull ep's from underneath all the running interators when 1779 * a ep is freed. 1780 */ 1781 SCTP_ITERATOR_LOCK(); 1782 if (it->inp == NULL) { 1783 /* iterator is complete */ 1784 done_with_iterator: 1785 SCTP_ITERATOR_UNLOCK(); 1786 SCTP_INP_INFO_WLOCK(); 1787 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 1788 /* stopping the callout is not needed, in theory */ 1789 SCTP_INP_INFO_WUNLOCK(); 1790 (void)SCTP_OS_TIMER_STOP(&it->tmr.timer); 1791 if (it->function_atend != NULL) { 1792 (*it->function_atend) (it->pointer, it->val); 1793 } 1794 SCTP_FREE(it, SCTP_M_ITER); 1795 return; 1796 } 1797 select_a_new_ep: 1798 SCTP_INP_WLOCK(it->inp); 1799 while (((it->pcb_flags) && 1800 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1801 ((it->pcb_features) && 1802 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1803 /* endpoint flags or features don't match, so keep looking */ 1804 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1805 SCTP_INP_WUNLOCK(it->inp); 1806 goto done_with_iterator; 1807 } 1808 SCTP_INP_WUNLOCK(it->inp); 1809 it->inp = LIST_NEXT(it->inp, sctp_list); 1810 if (it->inp == NULL) { 1811 goto done_with_iterator; 1812 } 1813 SCTP_INP_WLOCK(it->inp); 1814 } 1815 if ((it->inp->inp_starting_point_for_iterator != NULL) && 1816 (it->inp->inp_starting_point_for_iterator != it)) { 1817 SCTP_PRINTF("Iterator collision, waiting for one at %p\n", 1818 it->inp); 1819 SCTP_INP_WUNLOCK(it->inp); 1820 goto start_timer_return; 1821 } 1822 /* mark the current iterator on the endpoint */ 1823 it->inp->inp_starting_point_for_iterator = it; 1824 SCTP_INP_WUNLOCK(it->inp); 1825 SCTP_INP_RLOCK(it->inp); 1826 /* now go through each assoc which is in the desired state */ 1827 if (it->done_current_ep == 0) { 1828 if (it->function_inp != NULL) 1829 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1830 it->done_current_ep = 1; 1831 } 1832 if (it->stcb == NULL) { 1833 /* run the per instance function */ 1834 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1835 } 1836 SCTP_INP_RUNLOCK(it->inp); 1837 if ((inp_skip) || it->stcb == NULL) { 1838 if (it->function_inp_end != NULL) { 1839 inp_skip = (*it->function_inp_end) (it->inp, 1840 it->pointer, 1841 it->val); 1842 } 1843 goto no_stcb; 1844 } 1845 if ((it->stcb) && 1846 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1847 it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1848 } 1849 while (it->stcb) { 1850 SCTP_TCB_LOCK(it->stcb); 1851 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1852 /* not in the right state... keep looking */ 1853 SCTP_TCB_UNLOCK(it->stcb); 1854 goto next_assoc; 1855 } 1856 /* mark the current iterator on the assoc */ 1857 it->stcb->asoc.stcb_starting_point_for_iterator = it; 1858 /* see if we have limited out the iterator loop */ 1859 iteration_count++; 1860 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1861 start_timer_return: 1862 /* set a timer to continue this later */ 1863 if (it->stcb) 1864 SCTP_TCB_UNLOCK(it->stcb); 1865 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, 1866 (struct sctp_inpcb *)it, NULL, NULL); 1867 SCTP_ITERATOR_UNLOCK(); 1868 return; 1869 } 1870 /* run function on this one */ 1871 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1872 1873 /* 1874 * we lie here, it really needs to have its own type but 1875 * first I must verify that this won't effect things :-0 1876 */ 1877 if (it->no_chunk_output == 0) 1878 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1879 1880 SCTP_TCB_UNLOCK(it->stcb); 1881 next_assoc: 1882 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1883 if (it->stcb == NULL) { 1884 if (it->function_inp_end != NULL) { 1885 inp_skip = (*it->function_inp_end) (it->inp, 1886 it->pointer, 1887 it->val); 1888 } 1889 } 1890 } 1891 no_stcb: 1892 /* done with all assocs on this endpoint, move on to next endpoint */ 1893 it->done_current_ep = 0; 1894 SCTP_INP_WLOCK(it->inp); 1895 it->inp->inp_starting_point_for_iterator = NULL; 1896 SCTP_INP_WUNLOCK(it->inp); 1897 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1898 it->inp = NULL; 1899 } else { 1900 SCTP_INP_INFO_RLOCK(); 1901 it->inp = LIST_NEXT(it->inp, sctp_list); 1902 SCTP_INP_INFO_RUNLOCK(); 1903 } 1904 if (it->inp == NULL) { 1905 goto done_with_iterator; 1906 } 1907 goto select_a_new_ep; 1908 } 1909