1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #define _IP_VHL 39 #include <netinet/sctp_os.h> 40 #include <netinet/sctp_pcb.h> 41 #ifdef INET6 42 #endif 43 #include <netinet/sctp_var.h> 44 #include <netinet/sctp_sysctl.h> 45 #include <netinet/sctp_timer.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_header.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_input.h> 52 #include <netinet/sctp.h> 53 #include <netinet/sctp_uio.h> 54 #include <netinet/udp.h> 55 56 57 void 58 sctp_early_fr_timer(struct sctp_inpcb *inp, 59 struct sctp_tcb *stcb, 60 struct sctp_nets *net) 61 { 62 struct sctp_tmit_chunk *chk, *pchk; 63 struct timeval now, min_wait, tv; 64 unsigned int cur_rto, cnt = 0, cnt_resend = 0; 65 66 /* an early FR is occuring. */ 67 (void)SCTP_GETTIME_TIMEVAL(&now); 68 /* get cur rto in micro-seconds */ 69 if (net->lastsa == 0) { 70 /* Hmm no rtt estimate yet? */ 71 cur_rto = stcb->asoc.initial_rto >> 2; 72 } else { 73 74 cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 75 } 76 if (cur_rto < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 77 cur_rto = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 78 } 79 cur_rto *= 1000; 80 tv.tv_sec = cur_rto / 1000000; 81 tv.tv_usec = cur_rto % 1000000; 82 min_wait = now; 83 timevalsub(&min_wait, &tv); 84 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 85 /* 86 * if we hit here, we don't have enough seconds on the clock 87 * to account for the RTO. We just let the lower seconds be 88 * the bounds and don't worry about it. This may mean we 89 * will mark a lot more than we should. 90 */ 91 min_wait.tv_sec = min_wait.tv_usec = 0; 92 } 93 TAILQ_FOREACH_REVERSE_SAFE(chk, &stcb->asoc.sent_queue, sctpchunk_listhead, sctp_next, pchk) { 94 if (chk->whoTo != net) { 95 continue; 96 } 97 if (chk->sent == SCTP_DATAGRAM_RESEND) 98 cnt_resend++; 99 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 100 (chk->sent < SCTP_DATAGRAM_RESEND)) { 101 /* pending, may need retran */ 102 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 103 /* 104 * we have reached a chunk that was sent 105 * some seconds past our min.. forget it we 106 * will find no more to send. 107 */ 108 continue; 109 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 110 /* 111 * we must look at the micro seconds to 112 * know. 113 */ 114 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 115 /* 116 * ok it was sent after our boundary 117 * time. 118 */ 119 continue; 120 } 121 } 122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 123 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 124 4, SCTP_FR_MARKED_EARLY); 125 } 126 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 127 chk->sent = SCTP_DATAGRAM_RESEND; 128 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 129 /* double book size since we are doing an early FR */ 130 chk->book_size_scale++; 131 cnt += chk->send_size; 132 if ((cnt + net->flight_size) > net->cwnd) { 133 /* Mark all we could possibly resend */ 134 break; 135 } 136 } 137 } 138 if (cnt) { 139 /* 140 * JRS - Use the congestion control given in the congestion 141 * control module 142 */ 143 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 144 } else if (cnt_resend) { 145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 146 } 147 /* Restart it? */ 148 if (net->flight_size < net->cwnd) { 149 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 150 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 151 } 152 } 153 154 void 155 sctp_audit_retranmission_queue(struct sctp_association *asoc) 156 { 157 struct sctp_tmit_chunk *chk; 158 159 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 160 asoc->sent_queue_retran_cnt, 161 asoc->sent_queue_cnt); 162 asoc->sent_queue_retran_cnt = 0; 163 asoc->sent_queue_cnt = 0; 164 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 165 if (chk->sent == SCTP_DATAGRAM_RESEND) { 166 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 167 } 168 asoc->sent_queue_cnt++; 169 } 170 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 171 if (chk->sent == SCTP_DATAGRAM_RESEND) { 172 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 173 } 174 } 175 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 176 if (chk->sent == SCTP_DATAGRAM_RESEND) { 177 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 178 } 179 } 180 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 181 asoc->sent_queue_retran_cnt, 182 asoc->sent_queue_cnt); 183 } 184 185 int 186 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 187 struct sctp_nets *net, uint16_t threshold) 188 { 189 if (net) { 190 net->error_count++; 191 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 192 net, net->error_count, 193 net->failure_threshold); 194 if (net->error_count > net->failure_threshold) { 195 /* We had a threshold failure */ 196 if (net->dest_state & SCTP_ADDR_REACHABLE) { 197 net->dest_state &= ~SCTP_ADDR_REACHABLE; 198 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 199 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 200 if (net == stcb->asoc.primary_destination) { 201 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 202 } 203 /* 204 * JRS 5/14/07 - If a destination is 205 * unreachable, the PF bit is turned off. 206 * This allows an unambiguous use of the PF 207 * bit for destinations that are reachable 208 * but potentially failed. If the 209 * destination is set to the unreachable 210 * state, also set the destination to the PF 211 * state. 212 */ 213 /* 214 * Add debug message here if destination is 215 * not in PF state. 216 */ 217 /* Stop any running T3 timers here? */ 218 if ((stcb->asoc.sctp_cmt_on_off > 0) && 219 (stcb->asoc.sctp_cmt_pf > 0)) { 220 net->dest_state &= ~SCTP_ADDR_PF; 221 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 222 net); 223 } 224 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 225 stcb, 226 SCTP_FAILED_THRESHOLD, 227 (void *)net, SCTP_SO_NOT_LOCKED); 228 } 229 } 230 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 231 *********ROUTING CODE 232 */ 233 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 234 *********ROUTING CODE 235 */ 236 } 237 if (stcb == NULL) 238 return (0); 239 240 if (net) { 241 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 243 sctp_misc_ints(SCTP_THRESHOLD_INCR, 244 stcb->asoc.overall_error_count, 245 (stcb->asoc.overall_error_count + 1), 246 SCTP_FROM_SCTP_TIMER, 247 __LINE__); 248 } 249 stcb->asoc.overall_error_count++; 250 } 251 } else { 252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 253 sctp_misc_ints(SCTP_THRESHOLD_INCR, 254 stcb->asoc.overall_error_count, 255 (stcb->asoc.overall_error_count + 1), 256 SCTP_FROM_SCTP_TIMER, 257 __LINE__); 258 } 259 stcb->asoc.overall_error_count++; 260 } 261 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 262 &stcb->asoc, stcb->asoc.overall_error_count, 263 (uint32_t) threshold, 264 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 265 /* 266 * We specifically do not do >= to give the assoc one more change 267 * before we fail it. 268 */ 269 if (stcb->asoc.overall_error_count > threshold) { 270 /* Abort notification sends a ULP notify */ 271 struct mbuf *oper; 272 273 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 274 0, M_DONTWAIT, 1, MT_DATA); 275 if (oper) { 276 struct sctp_paramhdr *ph; 277 uint32_t *ippp; 278 279 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 280 sizeof(uint32_t); 281 ph = mtod(oper, struct sctp_paramhdr *); 282 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 283 ph->param_length = htons(SCTP_BUF_LEN(oper)); 284 ippp = (uint32_t *) (ph + 1); 285 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 286 } 287 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 288 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 289 return (1); 290 } 291 return (0); 292 } 293 294 /* 295 * sctp_find_alternate_net() returns a non-NULL pointer as long 296 * the argument net is non-NULL. 297 */ 298 struct sctp_nets * 299 sctp_find_alternate_net(struct sctp_tcb *stcb, 300 struct sctp_nets *net, 301 int mode) 302 { 303 /* Find and return an alternate network if possible */ 304 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 305 int once; 306 307 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 308 int min_errors = -1; 309 uint32_t max_cwnd = 0; 310 311 if (stcb->asoc.numnets == 1) { 312 /* No others but net */ 313 return (TAILQ_FIRST(&stcb->asoc.nets)); 314 } 315 /* 316 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 317 * net algorithm. This algorithm chooses the active destination (not 318 * in PF state) with the largest cwnd value. If all destinations are 319 * in PF state, unreachable, or unconfirmed, choose the desination 320 * that is in PF state with the lowest error count. In case of a 321 * tie, choose the destination that was most recently active. 322 */ 323 if (mode == 2) { 324 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 325 /* 326 * JRS 5/14/07 - If the destination is unreachable 327 * or unconfirmed, skip it. 328 */ 329 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 330 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 331 continue; 332 } 333 /* 334 * JRS 5/14/07 - If the destination is reachable 335 * but in PF state, compare the error count of the 336 * destination to the minimum error count seen thus 337 * far. Store the destination with the lower error 338 * count. If the error counts are equal, store the 339 * destination that was most recently active. 340 */ 341 if (mnet->dest_state & SCTP_ADDR_PF) { 342 /* 343 * JRS 5/14/07 - If the destination under 344 * consideration is the current destination, 345 * work as if the error count is one higher. 346 * The actual error count will not be 347 * incremented until later in the t3 348 * handler. 349 */ 350 if (mnet == net) { 351 if (min_errors == -1) { 352 min_errors = mnet->error_count + 1; 353 min_errors_net = mnet; 354 } else if (mnet->error_count + 1 < min_errors) { 355 min_errors = mnet->error_count + 1; 356 min_errors_net = mnet; 357 } else if (mnet->error_count + 1 == min_errors 358 && mnet->last_active > min_errors_net->last_active) { 359 min_errors_net = mnet; 360 min_errors = mnet->error_count + 1; 361 } 362 continue; 363 } else { 364 if (min_errors == -1) { 365 min_errors = mnet->error_count; 366 min_errors_net = mnet; 367 } else if (mnet->error_count < min_errors) { 368 min_errors = mnet->error_count; 369 min_errors_net = mnet; 370 } else if (mnet->error_count == min_errors 371 && mnet->last_active > min_errors_net->last_active) { 372 min_errors_net = mnet; 373 min_errors = mnet->error_count; 374 } 375 continue; 376 } 377 } 378 /* 379 * JRS 5/14/07 - If the destination is reachable and 380 * not in PF state, compare the cwnd of the 381 * destination to the highest cwnd seen thus far. 382 * Store the destination with the higher cwnd value. 383 * If the cwnd values are equal, randomly choose one 384 * of the two destinations. 385 */ 386 if (max_cwnd < mnet->cwnd) { 387 max_cwnd_net = mnet; 388 max_cwnd = mnet->cwnd; 389 } else if (max_cwnd == mnet->cwnd) { 390 uint32_t rndval; 391 uint8_t this_random; 392 393 if (stcb->asoc.hb_random_idx > 3) { 394 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 395 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 396 this_random = stcb->asoc.hb_random_values[0]; 397 stcb->asoc.hb_random_idx++; 398 stcb->asoc.hb_ect_randombit = 0; 399 } else { 400 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 401 stcb->asoc.hb_random_idx++; 402 stcb->asoc.hb_ect_randombit = 0; 403 } 404 if (this_random % 2 == 1) { 405 max_cwnd_net = mnet; 406 max_cwnd = mnet->cwnd; /* Useless? */ 407 } 408 } 409 } 410 /* 411 * JRS 5/14/07 - After all destination have been considered 412 * as alternates, check to see if there was some active 413 * destination (not in PF state). If not, check to see if 414 * there was some PF destination with the minimum number of 415 * errors. If not, return the original destination. If 416 * there is a min_errors_net, remove the PF flag from that 417 * destination, set the cwnd to one or two MTUs, and return 418 * the destination as an alt. If there was some active 419 * destination with a highest cwnd, return the destination 420 * as an alt. 421 */ 422 if (max_cwnd_net == NULL) { 423 if (min_errors_net == NULL) { 424 return (net); 425 } 426 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 427 min_errors_net->cwnd = min_errors_net->mtu * stcb->asoc.sctp_cmt_pf; 428 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 429 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 430 stcb, min_errors_net, 431 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 432 } 433 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 434 min_errors_net, min_errors_net->error_count); 435 return (min_errors_net); 436 } else { 437 return (max_cwnd_net); 438 } 439 } 440 /* 441 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 442 * choosing an alternate net. 443 */ 444 else if (mode == 1) { 445 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 446 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 447 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 448 /* 449 * will skip ones that are not-reachable or 450 * unconfirmed 451 */ 452 continue; 453 } 454 if (max_cwnd < mnet->cwnd) { 455 max_cwnd_net = mnet; 456 max_cwnd = mnet->cwnd; 457 } else if (max_cwnd == mnet->cwnd) { 458 uint32_t rndval; 459 uint8_t this_random; 460 461 if (stcb->asoc.hb_random_idx > 3) { 462 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 463 memcpy(stcb->asoc.hb_random_values, &rndval, 464 sizeof(stcb->asoc.hb_random_values)); 465 this_random = stcb->asoc.hb_random_values[0]; 466 stcb->asoc.hb_random_idx = 0; 467 stcb->asoc.hb_ect_randombit = 0; 468 } else { 469 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 470 stcb->asoc.hb_random_idx++; 471 stcb->asoc.hb_ect_randombit = 0; 472 } 473 if (this_random % 2) { 474 max_cwnd_net = mnet; 475 max_cwnd = mnet->cwnd; 476 } 477 } 478 } 479 if (max_cwnd_net) { 480 return (max_cwnd_net); 481 } 482 } 483 mnet = net; 484 once = 0; 485 486 if (mnet == NULL) { 487 mnet = TAILQ_FIRST(&stcb->asoc.nets); 488 if (mnet == NULL) { 489 return (NULL); 490 } 491 } 492 do { 493 alt = TAILQ_NEXT(mnet, sctp_next); 494 if (alt == NULL) { 495 once++; 496 if (once > 1) { 497 break; 498 } 499 alt = TAILQ_FIRST(&stcb->asoc.nets); 500 if (alt == NULL) { 501 return (NULL); 502 } 503 } 504 if (alt->ro.ro_rt == NULL) { 505 if (alt->ro._s_addr) { 506 sctp_free_ifa(alt->ro._s_addr); 507 alt->ro._s_addr = NULL; 508 } 509 alt->src_addr_selected = 0; 510 } 511 /* sa_ignore NO_NULL_CHK */ 512 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 513 (alt->ro.ro_rt != NULL) && 514 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { 515 /* Found a reachable address */ 516 break; 517 } 518 mnet = alt; 519 } while (alt != NULL); 520 521 if (alt == NULL) { 522 /* Case where NO insv network exists (dormant state) */ 523 /* we rotate destinations */ 524 once = 0; 525 mnet = net; 526 do { 527 if (mnet == NULL) { 528 return (TAILQ_FIRST(&stcb->asoc.nets)); 529 } 530 alt = TAILQ_NEXT(mnet, sctp_next); 531 if (alt == NULL) { 532 once++; 533 if (once > 1) { 534 break; 535 } 536 alt = TAILQ_FIRST(&stcb->asoc.nets); 537 } 538 /* sa_ignore NO_NULL_CHK */ 539 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 540 (alt != net)) { 541 /* Found an alternate address */ 542 break; 543 } 544 mnet = alt; 545 } while (alt != NULL); 546 } 547 if (alt == NULL) { 548 return (net); 549 } 550 return (alt); 551 } 552 553 static void 554 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 555 struct sctp_nets *net, 556 int win_probe, 557 int num_marked, int num_abandoned) 558 { 559 if (net->RTO == 0) { 560 net->RTO = stcb->asoc.minrto; 561 } 562 net->RTO <<= 1; 563 if (net->RTO > stcb->asoc.maxrto) { 564 net->RTO = stcb->asoc.maxrto; 565 } 566 if ((win_probe == 0) && (num_marked || num_abandoned)) { 567 /* We don't apply penalty to window probe scenarios */ 568 /* JRS - Use the congestion control given in the CC module */ 569 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 570 } 571 } 572 573 #ifndef INVARIANTS 574 static void 575 sctp_recover_sent_list(struct sctp_tcb *stcb) 576 { 577 struct sctp_tmit_chunk *chk, *nchk; 578 struct sctp_association *asoc; 579 580 asoc = &stcb->asoc; 581 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 582 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { 583 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 584 chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); 585 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 586 if (chk->pr_sctp_on) { 587 if (asoc->pr_sctp_cnt != 0) 588 asoc->pr_sctp_cnt--; 589 } 590 if (chk->data) { 591 /* sa_ignore NO_NULL_CHK */ 592 sctp_free_bufspace(stcb, asoc, chk, 1); 593 sctp_m_freem(chk->data); 594 chk->data = NULL; 595 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 596 asoc->sent_queue_cnt_removeable--; 597 } 598 } 599 asoc->sent_queue_cnt--; 600 sctp_free_a_chunk(stcb, chk); 601 } 602 } 603 SCTP_PRINTF("after recover order is as follows\n"); 604 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 605 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 606 } 607 } 608 609 #endif 610 611 static int 612 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 613 struct sctp_nets *net, 614 struct sctp_nets *alt, 615 int window_probe, 616 int *num_marked, 617 int *num_abandoned) 618 { 619 620 /* 621 * Mark all chunks (well not all) that were sent to *net for 622 * retransmission. Move them to alt for there destination as well... 623 * We only mark chunks that have been outstanding long enough to 624 * have received feed-back. 625 */ 626 struct sctp_tmit_chunk *chk, *nchk; 627 struct sctp_nets *lnets; 628 struct timeval now, min_wait, tv; 629 int cur_rto; 630 int cnt_abandoned; 631 int audit_tf, num_mk, fir; 632 unsigned int cnt_mk; 633 uint32_t orig_flight, orig_tf; 634 uint32_t tsnlast, tsnfirst; 635 int recovery_cnt = 0; 636 637 638 /* none in flight now */ 639 audit_tf = 0; 640 fir = 0; 641 /* 642 * figure out how long a data chunk must be pending before we can 643 * mark it .. 644 */ 645 (void)SCTP_GETTIME_TIMEVAL(&now); 646 /* get cur rto in micro-seconds */ 647 cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 648 cur_rto *= 1000; 649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 650 sctp_log_fr(cur_rto, 651 stcb->asoc.peers_rwnd, 652 window_probe, 653 SCTP_FR_T3_MARK_TIME); 654 sctp_log_fr(net->flight_size, 655 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 656 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 657 SCTP_FR_CWND_REPORT); 658 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 659 } 660 tv.tv_sec = cur_rto / 1000000; 661 tv.tv_usec = cur_rto % 1000000; 662 min_wait = now; 663 timevalsub(&min_wait, &tv); 664 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 665 /* 666 * if we hit here, we don't have enough seconds on the clock 667 * to account for the RTO. We just let the lower seconds be 668 * the bounds and don't worry about it. This may mean we 669 * will mark a lot more than we should. 670 */ 671 min_wait.tv_sec = min_wait.tv_usec = 0; 672 } 673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 674 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 675 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 676 } 677 /* 678 * Our rwnd will be incorrect here since we are not adding back the 679 * cnt * mbuf but we will fix that down below. 680 */ 681 orig_flight = net->flight_size; 682 orig_tf = stcb->asoc.total_flight; 683 684 net->fast_retran_ip = 0; 685 /* Now on to each chunk */ 686 cnt_abandoned = 0; 687 num_mk = cnt_mk = 0; 688 tsnfirst = tsnlast = 0; 689 #ifndef INVARIANTS 690 start_again: 691 #endif 692 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 693 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { 694 /* Strange case our list got out of order? */ 695 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 696 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 697 recovery_cnt++; 698 #ifdef INVARIANTS 699 panic("last acked >= chk on sent-Q"); 700 #else 701 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 702 sctp_recover_sent_list(stcb); 703 if (recovery_cnt < 10) { 704 goto start_again; 705 } else { 706 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 707 } 708 #endif 709 } 710 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 711 /* 712 * found one to mark: If it is less than 713 * DATAGRAM_ACKED it MUST not be a skipped or marked 714 * TSN but instead one that is either already set 715 * for retransmission OR one that needs 716 * retransmission. 717 */ 718 719 /* validate its been outstanding long enough */ 720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 721 sctp_log_fr(chk->rec.data.TSN_seq, 722 chk->sent_rcv_time.tv_sec, 723 chk->sent_rcv_time.tv_usec, 724 SCTP_FR_T3_MARK_TIME); 725 } 726 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 727 /* 728 * we have reached a chunk that was sent 729 * some seconds past our min.. forget it we 730 * will find no more to send. 731 */ 732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 733 sctp_log_fr(0, 734 chk->sent_rcv_time.tv_sec, 735 chk->sent_rcv_time.tv_usec, 736 SCTP_FR_T3_STOPPED); 737 } 738 continue; 739 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 740 (window_probe == 0)) { 741 /* 742 * we must look at the micro seconds to 743 * know. 744 */ 745 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 746 /* 747 * ok it was sent after our boundary 748 * time. 749 */ 750 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 751 sctp_log_fr(0, 752 chk->sent_rcv_time.tv_sec, 753 chk->sent_rcv_time.tv_usec, 754 SCTP_FR_T3_STOPPED); 755 } 756 continue; 757 } 758 } 759 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 760 /* Is it expired? */ 761 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 762 /* Yes so drop it */ 763 if (chk->data) { 764 (void)sctp_release_pr_sctp_chunk(stcb, 765 chk, 766 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 767 SCTP_SO_NOT_LOCKED); 768 cnt_abandoned++; 769 } 770 continue; 771 } 772 } 773 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 774 /* Has it been retransmitted tv_sec times? */ 775 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 776 if (chk->data) { 777 (void)sctp_release_pr_sctp_chunk(stcb, 778 chk, 779 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 780 SCTP_SO_NOT_LOCKED); 781 cnt_abandoned++; 782 } 783 continue; 784 } 785 } 786 if (chk->sent < SCTP_DATAGRAM_RESEND) { 787 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 788 num_mk++; 789 if (fir == 0) { 790 fir = 1; 791 tsnfirst = chk->rec.data.TSN_seq; 792 } 793 tsnlast = chk->rec.data.TSN_seq; 794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 795 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 796 0, SCTP_FR_T3_MARKED); 797 } 798 if (chk->rec.data.chunk_was_revoked) { 799 /* deflate the cwnd */ 800 chk->whoTo->cwnd -= chk->book_size; 801 chk->rec.data.chunk_was_revoked = 0; 802 } 803 net->marked_retrans++; 804 stcb->asoc.marked_retrans++; 805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 806 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 807 chk->whoTo->flight_size, 808 chk->book_size, 809 (uintptr_t) chk->whoTo, 810 chk->rec.data.TSN_seq); 811 } 812 sctp_flight_size_decrease(chk); 813 sctp_total_flight_decrease(stcb, chk); 814 stcb->asoc.peers_rwnd += chk->send_size; 815 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 816 } 817 chk->sent = SCTP_DATAGRAM_RESEND; 818 SCTP_STAT_INCR(sctps_markedretrans); 819 820 /* reset the TSN for striking and other FR stuff */ 821 chk->rec.data.doing_fast_retransmit = 0; 822 /* Clear any time so NO RTT is being done */ 823 chk->do_rtt = 0; 824 if (alt != net) { 825 sctp_free_remote_addr(chk->whoTo); 826 chk->no_fr_allowed = 1; 827 chk->whoTo = alt; 828 atomic_add_int(&alt->ref_count, 1); 829 } else { 830 chk->no_fr_allowed = 0; 831 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 832 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 833 } else { 834 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 835 } 836 } 837 /* 838 * CMT: Do not allow FRs on retransmitted TSNs. 839 */ 840 if (stcb->asoc.sctp_cmt_on_off > 0) { 841 chk->no_fr_allowed = 1; 842 } 843 #ifdef THIS_SHOULD_NOT_BE_DONE 844 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 845 /* remember highest acked one */ 846 could_be_sent = chk; 847 #endif 848 } 849 if (chk->sent == SCTP_DATAGRAM_RESEND) { 850 cnt_mk++; 851 } 852 } 853 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 854 /* we did not subtract the same things? */ 855 audit_tf = 1; 856 } 857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 858 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 859 } 860 #ifdef SCTP_DEBUG 861 if (num_mk) { 862 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 863 tsnlast); 864 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 865 num_mk, (u_long)stcb->asoc.peers_rwnd); 866 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 867 tsnlast); 868 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 869 num_mk, 870 (int)stcb->asoc.peers_rwnd); 871 } 872 #endif 873 *num_marked = num_mk; 874 *num_abandoned = cnt_abandoned; 875 /* 876 * Now check for a ECN Echo that may be stranded And include the 877 * cnt_mk'd to have all resends in the control queue. 878 */ 879 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 880 if (chk->sent == SCTP_DATAGRAM_RESEND) { 881 cnt_mk++; 882 } 883 if ((chk->whoTo == net) && 884 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 885 sctp_free_remote_addr(chk->whoTo); 886 chk->whoTo = alt; 887 if (chk->sent != SCTP_DATAGRAM_RESEND) { 888 chk->sent = SCTP_DATAGRAM_RESEND; 889 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 890 cnt_mk++; 891 } 892 atomic_add_int(&alt->ref_count, 1); 893 } 894 } 895 #ifdef THIS_SHOULD_NOT_BE_DONE 896 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 897 /* fix it so we retransmit the highest acked anyway */ 898 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 899 cnt_mk++; 900 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 901 } 902 #endif 903 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 904 #ifdef INVARIANTS 905 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 906 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 907 #endif 908 #ifndef SCTP_AUDITING_ENABLED 909 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 910 #endif 911 } 912 if (audit_tf) { 913 SCTPDBG(SCTP_DEBUG_TIMER4, 914 "Audit total flight due to negative value net:%p\n", 915 net); 916 stcb->asoc.total_flight = 0; 917 stcb->asoc.total_flight_count = 0; 918 /* Clear all networks flight size */ 919 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 920 lnets->flight_size = 0; 921 SCTPDBG(SCTP_DEBUG_TIMER4, 922 "Net:%p c-f cwnd:%d ssthresh:%d\n", 923 lnets, lnets->cwnd, lnets->ssthresh); 924 } 925 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 926 if (chk->sent < SCTP_DATAGRAM_RESEND) { 927 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 928 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 929 chk->whoTo->flight_size, 930 chk->book_size, 931 (uintptr_t) chk->whoTo, 932 chk->rec.data.TSN_seq); 933 } 934 sctp_flight_size_increase(chk); 935 sctp_total_flight_increase(stcb, chk); 936 } 937 } 938 } 939 /* We return 1 if we only have a window probe outstanding */ 940 return (0); 941 } 942 943 944 int 945 sctp_t3rxt_timer(struct sctp_inpcb *inp, 946 struct sctp_tcb *stcb, 947 struct sctp_nets *net) 948 { 949 struct sctp_nets *alt; 950 int win_probe, num_mk, num_abandoned; 951 952 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 953 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 954 } 955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 956 struct sctp_nets *lnet; 957 958 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 959 if (net == lnet) { 960 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 961 } else { 962 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 963 } 964 } 965 } 966 /* Find an alternate and mark those for retransmission */ 967 if ((stcb->asoc.peers_rwnd == 0) && 968 (stcb->asoc.total_flight < net->mtu)) { 969 SCTP_STAT_INCR(sctps_timowindowprobe); 970 win_probe = 1; 971 } else { 972 win_probe = 0; 973 } 974 975 /* 976 * JRS 5/14/07 - If CMT PF is on and the destination if not already 977 * in PF state, set the destination to PF state and store the 978 * current time as the time that the destination was last active. In 979 * addition, find an alternate destination with PF-based 980 * find_alt_net(). 981 */ 982 if ((stcb->asoc.sctp_cmt_on_off > 0) && 983 (stcb->asoc.sctp_cmt_pf > 0)) { 984 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 985 net->dest_state |= SCTP_ADDR_PF; 986 net->last_active = sctp_get_tick_count(); 987 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 988 net); 989 } 990 alt = sctp_find_alternate_net(stcb, net, 2); 991 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 992 /* 993 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 994 * used, then pick dest with largest ssthresh for any 995 * retransmission. 996 */ 997 alt = sctp_find_alternate_net(stcb, net, 1); 998 /* 999 * CUCv2: If a different dest is picked for the 1000 * retransmission, then new (rtx-)pseudo_cumack needs to be 1001 * tracked for orig dest. Let CUCv2 track new (rtx-) 1002 * pseudo-cumack always. 1003 */ 1004 net->find_pseudo_cumack = 1; 1005 net->find_rtx_pseudo_cumack = 1; 1006 } else { /* CMT is OFF */ 1007 alt = sctp_find_alternate_net(stcb, net, 0); 1008 } 1009 num_mk = 0; 1010 num_abandoned = 0; 1011 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1012 &num_mk, &num_abandoned); 1013 /* FR Loss recovery just ended with the T3. */ 1014 stcb->asoc.fast_retran_loss_recovery = 0; 1015 1016 /* CMT FR loss recovery ended with the T3 */ 1017 net->fast_retran_loss_recovery = 0; 1018 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 1019 (net->flight_size == 0)) { 1020 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net); 1021 } 1022 /* 1023 * setup the sat loss recovery that prevents satellite cwnd advance. 1024 */ 1025 stcb->asoc.sat_t3_loss_recovery = 1; 1026 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1027 1028 /* Backoff the timer and cwnd */ 1029 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1030 if (win_probe == 0) { 1031 /* We don't do normal threshold management on window probes */ 1032 if (sctp_threshold_management(inp, stcb, net, 1033 stcb->asoc.max_send_times)) { 1034 /* Association was destroyed */ 1035 return (1); 1036 } else { 1037 if (net != stcb->asoc.primary_destination) { 1038 /* send a immediate HB if our RTO is stale */ 1039 struct timeval now; 1040 unsigned int ms_goneby; 1041 1042 (void)SCTP_GETTIME_TIMEVAL(&now); 1043 if (net->last_sent_time.tv_sec) { 1044 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1045 } else { 1046 ms_goneby = 0; 1047 } 1048 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1049 /* 1050 * no recent feed back in an RTO or 1051 * more, request a RTT update 1052 */ 1053 if (sctp_send_hb(stcb, 1, net) < 0) 1054 /* 1055 * Less than 0 means we lost 1056 * the assoc 1057 */ 1058 return (1); 1059 } 1060 } 1061 } 1062 } else { 1063 /* 1064 * For a window probe we don't penalize the net's but only 1065 * the association. This may fail it if SACKs are not coming 1066 * back. If sack's are coming with rwnd locked at 0, we will 1067 * continue to hold things waiting for rwnd to raise 1068 */ 1069 if (sctp_threshold_management(inp, stcb, NULL, 1070 stcb->asoc.max_send_times)) { 1071 /* Association was destroyed */ 1072 return (1); 1073 } 1074 } 1075 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1076 /* Move all pending over too */ 1077 sctp_move_chunks_from_net(stcb, net); 1078 1079 /* 1080 * Get the address that failed, to force a new src address 1081 * selecton and a route allocation. 1082 */ 1083 if (net->ro._s_addr) { 1084 sctp_free_ifa(net->ro._s_addr); 1085 net->ro._s_addr = NULL; 1086 } 1087 net->src_addr_selected = 0; 1088 1089 /* Force a route allocation too */ 1090 if (net->ro.ro_rt) { 1091 RTFREE(net->ro.ro_rt); 1092 net->ro.ro_rt = NULL; 1093 } 1094 /* Was it our primary? */ 1095 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1096 /* 1097 * Yes, note it as such and find an alternate note: 1098 * this means HB code must use this to resent the 1099 * primary if it goes active AND if someone does a 1100 * change-primary then this flag must be cleared 1101 * from any net structures. 1102 */ 1103 if (sctp_set_primary_addr(stcb, 1104 (struct sockaddr *)NULL, 1105 alt) == 0) { 1106 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1107 } 1108 } 1109 } else if ((stcb->asoc.sctp_cmt_on_off > 0) && 1110 (stcb->asoc.sctp_cmt_pf > 0) && 1111 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1112 /* 1113 * JRS 5/14/07 - If the destination hasn't failed completely 1114 * but is in PF state, a PF-heartbeat needs to be sent 1115 * manually. 1116 */ 1117 if (sctp_send_hb(stcb, 1, net) < 0) 1118 /* Return less than 0 means we lost the association */ 1119 return (1); 1120 } 1121 /* 1122 * Special case for cookie-echo'ed case, we don't do output but must 1123 * await the COOKIE-ACK before retransmission 1124 */ 1125 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1126 /* 1127 * Here we just reset the timer and start again since we 1128 * have not established the asoc 1129 */ 1130 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1131 return (0); 1132 } 1133 if (stcb->asoc.peer_supports_prsctp) { 1134 struct sctp_tmit_chunk *lchk; 1135 1136 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1137 /* C3. See if we need to send a Fwd-TSN */ 1138 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 1139 send_forward_tsn(stcb, &stcb->asoc); 1140 if (lchk) { 1141 /* Assure a timer is up */ 1142 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1143 } 1144 } 1145 } 1146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1147 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1148 } 1149 return (0); 1150 } 1151 1152 int 1153 sctp_t1init_timer(struct sctp_inpcb *inp, 1154 struct sctp_tcb *stcb, 1155 struct sctp_nets *net) 1156 { 1157 /* bump the thresholds */ 1158 if (stcb->asoc.delayed_connection) { 1159 /* 1160 * special hook for delayed connection. The library did NOT 1161 * complete the rest of its sends. 1162 */ 1163 stcb->asoc.delayed_connection = 0; 1164 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1165 return (0); 1166 } 1167 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1168 return (0); 1169 } 1170 if (sctp_threshold_management(inp, stcb, net, 1171 stcb->asoc.max_init_times)) { 1172 /* Association was destroyed */ 1173 return (1); 1174 } 1175 stcb->asoc.dropped_special_cnt = 0; 1176 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1177 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1178 net->RTO = stcb->asoc.initial_init_rto_max; 1179 } 1180 if (stcb->asoc.numnets > 1) { 1181 /* If we have more than one addr use it */ 1182 struct sctp_nets *alt; 1183 1184 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1185 if (alt != stcb->asoc.primary_destination) { 1186 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1187 stcb->asoc.primary_destination = alt; 1188 } 1189 } 1190 /* Send out a new init */ 1191 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1192 return (0); 1193 } 1194 1195 /* 1196 * For cookie and asconf we actually need to find and mark for resend, then 1197 * increment the resend counter (after all the threshold management stuff of 1198 * course). 1199 */ 1200 int 1201 sctp_cookie_timer(struct sctp_inpcb *inp, 1202 struct sctp_tcb *stcb, 1203 struct sctp_nets *net) 1204 { 1205 struct sctp_nets *alt; 1206 struct sctp_tmit_chunk *cookie; 1207 1208 /* first before all else we must find the cookie */ 1209 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1210 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1211 break; 1212 } 1213 } 1214 if (cookie == NULL) { 1215 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1216 /* FOOBAR! */ 1217 struct mbuf *oper; 1218 1219 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1220 0, M_DONTWAIT, 1, MT_DATA); 1221 if (oper) { 1222 struct sctp_paramhdr *ph; 1223 uint32_t *ippp; 1224 1225 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1226 sizeof(uint32_t); 1227 ph = mtod(oper, struct sctp_paramhdr *); 1228 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1229 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1230 ippp = (uint32_t *) (ph + 1); 1231 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1232 } 1233 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1234 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1235 oper, SCTP_SO_NOT_LOCKED); 1236 } else { 1237 #ifdef INVARIANTS 1238 panic("Cookie timer expires in wrong state?"); 1239 #else 1240 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1241 return (0); 1242 #endif 1243 } 1244 return (0); 1245 } 1246 /* Ok we found the cookie, threshold management next */ 1247 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1248 stcb->asoc.max_init_times)) { 1249 /* Assoc is over */ 1250 return (1); 1251 } 1252 /* 1253 * cleared theshold management now lets backoff the address & select 1254 * an alternate 1255 */ 1256 stcb->asoc.dropped_special_cnt = 0; 1257 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1258 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1259 if (alt != cookie->whoTo) { 1260 sctp_free_remote_addr(cookie->whoTo); 1261 cookie->whoTo = alt; 1262 atomic_add_int(&alt->ref_count, 1); 1263 } 1264 /* Now mark the retran info */ 1265 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1266 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1267 } 1268 cookie->sent = SCTP_DATAGRAM_RESEND; 1269 /* 1270 * Now call the output routine to kick out the cookie again, Note we 1271 * don't mark any chunks for retran so that FR will need to kick in 1272 * to move these (or a send timer). 1273 */ 1274 return (0); 1275 } 1276 1277 int 1278 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1279 struct sctp_nets *net) 1280 { 1281 struct sctp_nets *alt; 1282 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1283 1284 if (stcb->asoc.stream_reset_outstanding == 0) { 1285 return (0); 1286 } 1287 /* find the existing STRRESET, we use the seq number we sent out on */ 1288 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1289 if (strrst == NULL) { 1290 return (0); 1291 } 1292 /* do threshold management */ 1293 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1294 stcb->asoc.max_send_times)) { 1295 /* Assoc is over */ 1296 return (1); 1297 } 1298 /* 1299 * cleared theshold management now lets backoff the address & select 1300 * an alternate 1301 */ 1302 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1303 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1304 sctp_free_remote_addr(strrst->whoTo); 1305 strrst->whoTo = alt; 1306 atomic_add_int(&alt->ref_count, 1); 1307 1308 /* See if a ECN Echo is also stranded */ 1309 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1310 if ((chk->whoTo == net) && 1311 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1312 sctp_free_remote_addr(chk->whoTo); 1313 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1314 chk->sent = SCTP_DATAGRAM_RESEND; 1315 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1316 } 1317 chk->whoTo = alt; 1318 atomic_add_int(&alt->ref_count, 1); 1319 } 1320 } 1321 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1322 /* 1323 * If the address went un-reachable, we need to move to 1324 * alternates for ALL chk's in queue 1325 */ 1326 sctp_move_chunks_from_net(stcb, net); 1327 } 1328 /* mark the retran info */ 1329 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1330 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1331 strrst->sent = SCTP_DATAGRAM_RESEND; 1332 1333 /* restart the timer */ 1334 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1335 return (0); 1336 } 1337 1338 int 1339 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1340 struct sctp_nets *net) 1341 { 1342 struct sctp_nets *alt; 1343 struct sctp_tmit_chunk *asconf, *chk; 1344 1345 /* is this a first send, or a retransmission? */ 1346 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1347 /* compose a new ASCONF chunk and send it */ 1348 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1349 } else { 1350 /* 1351 * Retransmission of the existing ASCONF is needed 1352 */ 1353 1354 /* find the existing ASCONF */ 1355 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1356 if (asconf == NULL) { 1357 return (0); 1358 } 1359 /* do threshold management */ 1360 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1361 stcb->asoc.max_send_times)) { 1362 /* Assoc is over */ 1363 return (1); 1364 } 1365 if (asconf->snd_count > stcb->asoc.max_send_times) { 1366 /* 1367 * Something is rotten: our peer is not responding 1368 * to ASCONFs but apparently is to other chunks. 1369 * i.e. it is not properly handling the chunk type 1370 * upper bits. Mark this peer as ASCONF incapable 1371 * and cleanup. 1372 */ 1373 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1374 sctp_asconf_cleanup(stcb, net); 1375 return (0); 1376 } 1377 /* 1378 * cleared threshold management, so now backoff the net and 1379 * select an alternate 1380 */ 1381 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1382 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1383 if (asconf->whoTo != alt) { 1384 sctp_free_remote_addr(asconf->whoTo); 1385 asconf->whoTo = alt; 1386 atomic_add_int(&alt->ref_count, 1); 1387 } 1388 /* See if an ECN Echo is also stranded */ 1389 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1390 if ((chk->whoTo == net) && 1391 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1392 sctp_free_remote_addr(chk->whoTo); 1393 chk->whoTo = alt; 1394 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1395 chk->sent = SCTP_DATAGRAM_RESEND; 1396 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1397 } 1398 atomic_add_int(&alt->ref_count, 1); 1399 } 1400 } 1401 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1402 if (chk->whoTo != alt) { 1403 sctp_free_remote_addr(chk->whoTo); 1404 chk->whoTo = alt; 1405 atomic_add_int(&alt->ref_count, 1); 1406 } 1407 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1408 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1409 chk->sent = SCTP_DATAGRAM_RESEND; 1410 } 1411 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1412 /* 1413 * If the address went un-reachable, we need to move 1414 * to the alternate for ALL chunks in queue 1415 */ 1416 sctp_move_chunks_from_net(stcb, net); 1417 } 1418 /* mark the retran info */ 1419 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1420 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1421 asconf->sent = SCTP_DATAGRAM_RESEND; 1422 1423 /* send another ASCONF if any and we can do */ 1424 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1425 } 1426 return (0); 1427 } 1428 1429 /* Mobility adaptation */ 1430 void 1431 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1432 struct sctp_nets *net) 1433 { 1434 if (stcb->asoc.deleted_primary == NULL) { 1435 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1436 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1437 return; 1438 } 1439 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1440 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1441 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1442 stcb->asoc.deleted_primary = NULL; 1443 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1444 return; 1445 } 1446 1447 /* 1448 * For the shutdown and shutdown-ack, we do not keep one around on the 1449 * control queue. This means we must generate a new one and call the general 1450 * chunk output routine, AFTER having done threshold management. 1451 * It is assumed that net is non-NULL. 1452 */ 1453 int 1454 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1455 struct sctp_nets *net) 1456 { 1457 struct sctp_nets *alt; 1458 1459 /* first threshold managment */ 1460 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1461 /* Assoc is over */ 1462 return (1); 1463 } 1464 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1465 /* second select an alternative */ 1466 alt = sctp_find_alternate_net(stcb, net, 0); 1467 1468 /* third generate a shutdown into the queue for out net */ 1469 sctp_send_shutdown(stcb, alt); 1470 1471 /* fourth restart timer */ 1472 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1473 return (0); 1474 } 1475 1476 int 1477 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1478 struct sctp_nets *net) 1479 { 1480 struct sctp_nets *alt; 1481 1482 /* first threshold managment */ 1483 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1484 /* Assoc is over */ 1485 return (1); 1486 } 1487 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1488 /* second select an alternative */ 1489 alt = sctp_find_alternate_net(stcb, net, 0); 1490 1491 /* third generate a shutdown into the queue for out net */ 1492 sctp_send_shutdown_ack(stcb, alt); 1493 1494 /* fourth restart timer */ 1495 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1496 return (0); 1497 } 1498 1499 static void 1500 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1501 struct sctp_tcb *stcb) 1502 { 1503 struct sctp_stream_queue_pending *sp; 1504 unsigned int i, chks_in_queue = 0; 1505 int being_filled = 0; 1506 1507 /* 1508 * This function is ONLY called when the send/sent queues are empty. 1509 */ 1510 if ((stcb == NULL) || (inp == NULL)) 1511 return; 1512 1513 if (stcb->asoc.sent_queue_retran_cnt) { 1514 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1515 stcb->asoc.sent_queue_retran_cnt); 1516 stcb->asoc.sent_queue_retran_cnt = 0; 1517 } 1518 if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1519 /* No stream scheduler information, initialize scheduler */ 1520 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0); 1521 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1522 /* yep, we lost a stream or two */ 1523 SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n"); 1524 } else { 1525 /* no streams lost */ 1526 stcb->asoc.total_output_queue_size = 0; 1527 } 1528 } 1529 /* Check to see if some data queued, if so report it */ 1530 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1531 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1532 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1533 if (sp->msg_is_complete) 1534 being_filled++; 1535 chks_in_queue++; 1536 } 1537 } 1538 } 1539 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1540 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1541 stcb->asoc.stream_queue_cnt, chks_in_queue); 1542 } 1543 if (chks_in_queue) { 1544 /* call the output queue function */ 1545 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1546 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1547 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1548 /* 1549 * Probably should go in and make it go back through 1550 * and add fragments allowed 1551 */ 1552 if (being_filled == 0) { 1553 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1554 chks_in_queue); 1555 } 1556 } 1557 } else { 1558 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1559 (u_long)stcb->asoc.total_output_queue_size); 1560 stcb->asoc.total_output_queue_size = 0; 1561 } 1562 } 1563 1564 int 1565 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1566 struct sctp_nets *net, int cnt_of_unconf) 1567 { 1568 int ret; 1569 1570 if (net) { 1571 if (net->hb_responded == 0) { 1572 if (net->ro._s_addr) { 1573 /* 1574 * Invalidate the src address if we did not 1575 * get a response last time. 1576 */ 1577 sctp_free_ifa(net->ro._s_addr); 1578 net->ro._s_addr = NULL; 1579 net->src_addr_selected = 0; 1580 } 1581 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1582 } 1583 /* Zero PBA, if it needs it */ 1584 if (net->partial_bytes_acked) { 1585 net->partial_bytes_acked = 0; 1586 } 1587 } 1588 if ((stcb->asoc.total_output_queue_size > 0) && 1589 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1590 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1591 sctp_audit_stream_queues_for_size(inp, stcb); 1592 } 1593 /* Send a new HB, this will do threshold managment, pick a new dest */ 1594 if (cnt_of_unconf == 0) { 1595 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1596 return (1); 1597 } 1598 } else { 1599 /* 1600 * this will send out extra hb's up to maxburst if there are 1601 * any unconfirmed addresses. 1602 */ 1603 uint32_t cnt_sent = 0; 1604 1605 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1606 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1607 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1608 cnt_sent++; 1609 if (net->hb_responded == 0) { 1610 /* Did we respond last time? */ 1611 if (net->ro._s_addr) { 1612 sctp_free_ifa(net->ro._s_addr); 1613 net->ro._s_addr = NULL; 1614 net->src_addr_selected = 0; 1615 } 1616 } 1617 ret = sctp_send_hb(stcb, 1, net); 1618 if (ret < 0) 1619 return 1; 1620 else if (ret == 0) { 1621 break; 1622 } 1623 if (SCTP_BASE_SYSCTL(sctp_hb_maxburst) && 1624 (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))) 1625 break; 1626 } 1627 } 1628 } 1629 return (0); 1630 } 1631 1632 void 1633 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1634 struct sctp_tcb *stcb, 1635 struct sctp_nets *net) 1636 { 1637 uint32_t next_mtu, mtu; 1638 1639 next_mtu = sctp_get_next_mtu(inp, net->mtu); 1640 1641 if ((next_mtu > net->mtu) && (net->port == 0)) { 1642 if ((net->src_addr_selected == 0) || 1643 (net->ro._s_addr == NULL) || 1644 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1645 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1646 sctp_free_ifa(net->ro._s_addr); 1647 net->ro._s_addr = NULL; 1648 net->src_addr_selected = 0; 1649 } else if (net->ro._s_addr == NULL) { 1650 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1651 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1652 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1653 1654 /* KAME hack: embed scopeid */ 1655 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1656 } 1657 #endif 1658 1659 net->ro._s_addr = sctp_source_address_selection(inp, 1660 stcb, 1661 (sctp_route_t *) & net->ro, 1662 net, 0, stcb->asoc.vrf_id); 1663 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1664 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1665 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1666 1667 (void)sa6_recoverscope(sin6); 1668 } 1669 #endif /* INET6 */ 1670 } 1671 if (net->ro._s_addr) 1672 net->src_addr_selected = 1; 1673 } 1674 if (net->ro._s_addr) { 1675 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1676 if (net->port) { 1677 mtu -= sizeof(struct udphdr); 1678 } 1679 if (mtu > next_mtu) { 1680 net->mtu = next_mtu; 1681 } 1682 } 1683 } 1684 /* restart the timer */ 1685 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1686 } 1687 1688 void 1689 sctp_autoclose_timer(struct sctp_inpcb *inp, 1690 struct sctp_tcb *stcb, 1691 struct sctp_nets *net) 1692 { 1693 struct timeval tn, *tim_touse; 1694 struct sctp_association *asoc; 1695 int ticks_gone_by; 1696 1697 (void)SCTP_GETTIME_TIMEVAL(&tn); 1698 if (stcb->asoc.sctp_autoclose_ticks && 1699 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1700 /* Auto close is on */ 1701 asoc = &stcb->asoc; 1702 /* pick the time to use */ 1703 if (asoc->time_last_rcvd.tv_sec > 1704 asoc->time_last_sent.tv_sec) { 1705 tim_touse = &asoc->time_last_rcvd; 1706 } else { 1707 tim_touse = &asoc->time_last_sent; 1708 } 1709 /* Now has long enough transpired to autoclose? */ 1710 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1711 if ((ticks_gone_by > 0) && 1712 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1713 /* 1714 * autoclose time has hit, call the output routine, 1715 * which should do nothing just to be SURE we don't 1716 * have hanging data. We can then safely check the 1717 * queues and know that we are clear to send 1718 * shutdown 1719 */ 1720 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1721 /* Are we clean? */ 1722 if (TAILQ_EMPTY(&asoc->send_queue) && 1723 TAILQ_EMPTY(&asoc->sent_queue)) { 1724 /* 1725 * there is nothing queued to send, so I'm 1726 * done... 1727 */ 1728 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1729 /* only send SHUTDOWN 1st time thru */ 1730 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1731 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1732 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1733 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1734 } 1735 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1736 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1737 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1738 stcb->sctp_ep, stcb, 1739 asoc->primary_destination); 1740 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1741 stcb->sctp_ep, stcb, 1742 asoc->primary_destination); 1743 } 1744 } 1745 } else { 1746 /* 1747 * No auto close at this time, reset t-o to check 1748 * later 1749 */ 1750 int tmp; 1751 1752 /* fool the timer startup to use the time left */ 1753 tmp = asoc->sctp_autoclose_ticks; 1754 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1755 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1756 net); 1757 /* restore the real tick value */ 1758 asoc->sctp_autoclose_ticks = tmp; 1759 } 1760 } 1761 } 1762