1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #define _IP_VHL 37 #include <netinet/sctp_os.h> 38 #include <netinet/sctp_pcb.h> 39 #ifdef INET6 40 #endif 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_input.h> 50 #include <netinet/sctp.h> 51 #include <netinet/sctp_uio.h> 52 #if defined(INET) || defined(INET6) 53 #include <netinet/udp.h> 54 #endif 55 56 57 void 58 sctp_audit_retranmission_queue(struct sctp_association *asoc) 59 { 60 struct sctp_tmit_chunk *chk; 61 62 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 63 asoc->sent_queue_retran_cnt, 64 asoc->sent_queue_cnt); 65 asoc->sent_queue_retran_cnt = 0; 66 asoc->sent_queue_cnt = 0; 67 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 68 if (chk->sent == SCTP_DATAGRAM_RESEND) { 69 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 70 } 71 asoc->sent_queue_cnt++; 72 } 73 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 74 if (chk->sent == SCTP_DATAGRAM_RESEND) { 75 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 76 } 77 } 78 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 79 if (chk->sent == SCTP_DATAGRAM_RESEND) { 80 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 81 } 82 } 83 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 84 asoc->sent_queue_retran_cnt, 85 asoc->sent_queue_cnt); 86 } 87 88 int 89 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 90 struct sctp_nets *net, uint16_t threshold) 91 { 92 if (net) { 93 net->error_count++; 94 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 95 (void *)net, net->error_count, 96 net->failure_threshold); 97 if (net->error_count > net->failure_threshold) { 98 /* We had a threshold failure */ 99 if (net->dest_state & SCTP_ADDR_REACHABLE) { 100 net->dest_state &= ~SCTP_ADDR_REACHABLE; 101 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 102 net->dest_state &= ~SCTP_ADDR_PF; 103 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 104 stcb, 0, 105 (void *)net, SCTP_SO_NOT_LOCKED); 106 } 107 } else if ((net->pf_threshold < net->failure_threshold) && 108 (net->error_count > net->pf_threshold)) { 109 if (!(net->dest_state & SCTP_ADDR_PF)) { 110 net->dest_state |= SCTP_ADDR_PF; 111 net->last_active = sctp_get_tick_count(); 112 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 113 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 114 stcb->sctp_ep, stcb, net, 115 SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 116 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 117 } 118 } 119 } 120 if (stcb == NULL) 121 return (0); 122 123 if (net) { 124 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 126 sctp_misc_ints(SCTP_THRESHOLD_INCR, 127 stcb->asoc.overall_error_count, 128 (stcb->asoc.overall_error_count + 1), 129 SCTP_FROM_SCTP_TIMER, 130 __LINE__); 131 } 132 stcb->asoc.overall_error_count++; 133 } 134 } else { 135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 136 sctp_misc_ints(SCTP_THRESHOLD_INCR, 137 stcb->asoc.overall_error_count, 138 (stcb->asoc.overall_error_count + 1), 139 SCTP_FROM_SCTP_TIMER, 140 __LINE__); 141 } 142 stcb->asoc.overall_error_count++; 143 } 144 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 145 (void *)&stcb->asoc, stcb->asoc.overall_error_count, 146 (uint32_t) threshold, 147 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 148 /* 149 * We specifically do not do >= to give the assoc one more change 150 * before we fail it. 151 */ 152 if (stcb->asoc.overall_error_count > threshold) { 153 /* Abort notification sends a ULP notify */ 154 struct mbuf *op_err; 155 156 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 157 "Association error counter exceeded"); 158 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2; 159 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 160 return (1); 161 } 162 return (0); 163 } 164 165 /* 166 * sctp_find_alternate_net() returns a non-NULL pointer as long 167 * the argument net is non-NULL. 168 */ 169 struct sctp_nets * 170 sctp_find_alternate_net(struct sctp_tcb *stcb, 171 struct sctp_nets *net, 172 int mode) 173 { 174 /* Find and return an alternate network if possible */ 175 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 176 int once; 177 178 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 179 int min_errors = -1; 180 uint32_t max_cwnd = 0; 181 182 if (stcb->asoc.numnets == 1) { 183 /* No others but net */ 184 return (TAILQ_FIRST(&stcb->asoc.nets)); 185 } 186 /* 187 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 188 * net algorithm. This algorithm chooses the active destination (not 189 * in PF state) with the largest cwnd value. If all destinations are 190 * in PF state, unreachable, or unconfirmed, choose the desination 191 * that is in PF state with the lowest error count. In case of a 192 * tie, choose the destination that was most recently active. 193 */ 194 if (mode == 2) { 195 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 196 /* 197 * JRS 5/14/07 - If the destination is unreachable 198 * or unconfirmed, skip it. 199 */ 200 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 201 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 202 continue; 203 } 204 /* 205 * JRS 5/14/07 - If the destination is reachable 206 * but in PF state, compare the error count of the 207 * destination to the minimum error count seen thus 208 * far. Store the destination with the lower error 209 * count. If the error counts are equal, store the 210 * destination that was most recently active. 211 */ 212 if (mnet->dest_state & SCTP_ADDR_PF) { 213 /* 214 * JRS 5/14/07 - If the destination under 215 * consideration is the current destination, 216 * work as if the error count is one higher. 217 * The actual error count will not be 218 * incremented until later in the t3 219 * handler. 220 */ 221 if (mnet == net) { 222 if (min_errors == -1) { 223 min_errors = mnet->error_count + 1; 224 min_errors_net = mnet; 225 } else if (mnet->error_count + 1 < min_errors) { 226 min_errors = mnet->error_count + 1; 227 min_errors_net = mnet; 228 } else if (mnet->error_count + 1 == min_errors 229 && mnet->last_active > min_errors_net->last_active) { 230 min_errors_net = mnet; 231 min_errors = mnet->error_count + 1; 232 } 233 continue; 234 } else { 235 if (min_errors == -1) { 236 min_errors = mnet->error_count; 237 min_errors_net = mnet; 238 } else if (mnet->error_count < min_errors) { 239 min_errors = mnet->error_count; 240 min_errors_net = mnet; 241 } else if (mnet->error_count == min_errors 242 && mnet->last_active > min_errors_net->last_active) { 243 min_errors_net = mnet; 244 min_errors = mnet->error_count; 245 } 246 continue; 247 } 248 } 249 /* 250 * JRS 5/14/07 - If the destination is reachable and 251 * not in PF state, compare the cwnd of the 252 * destination to the highest cwnd seen thus far. 253 * Store the destination with the higher cwnd value. 254 * If the cwnd values are equal, randomly choose one 255 * of the two destinations. 256 */ 257 if (max_cwnd < mnet->cwnd) { 258 max_cwnd_net = mnet; 259 max_cwnd = mnet->cwnd; 260 } else if (max_cwnd == mnet->cwnd) { 261 uint32_t rndval; 262 uint8_t this_random; 263 264 if (stcb->asoc.hb_random_idx > 3) { 265 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 266 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 267 this_random = stcb->asoc.hb_random_values[0]; 268 stcb->asoc.hb_random_idx++; 269 stcb->asoc.hb_ect_randombit = 0; 270 } else { 271 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 272 stcb->asoc.hb_random_idx++; 273 stcb->asoc.hb_ect_randombit = 0; 274 } 275 if (this_random % 2 == 1) { 276 max_cwnd_net = mnet; 277 max_cwnd = mnet->cwnd; /* Useless? */ 278 } 279 } 280 } 281 if (max_cwnd_net == NULL) { 282 if (min_errors_net == NULL) { 283 return (net); 284 } 285 return (min_errors_net); 286 } else { 287 return (max_cwnd_net); 288 } 289 } 290 /* 291 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 292 * choosing an alternate net. 293 */ 294 else if (mode == 1) { 295 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 296 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 297 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 298 /* 299 * will skip ones that are not-reachable or 300 * unconfirmed 301 */ 302 continue; 303 } 304 if (max_cwnd < mnet->cwnd) { 305 max_cwnd_net = mnet; 306 max_cwnd = mnet->cwnd; 307 } else if (max_cwnd == mnet->cwnd) { 308 uint32_t rndval; 309 uint8_t this_random; 310 311 if (stcb->asoc.hb_random_idx > 3) { 312 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 313 memcpy(stcb->asoc.hb_random_values, &rndval, 314 sizeof(stcb->asoc.hb_random_values)); 315 this_random = stcb->asoc.hb_random_values[0]; 316 stcb->asoc.hb_random_idx = 0; 317 stcb->asoc.hb_ect_randombit = 0; 318 } else { 319 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 320 stcb->asoc.hb_random_idx++; 321 stcb->asoc.hb_ect_randombit = 0; 322 } 323 if (this_random % 2) { 324 max_cwnd_net = mnet; 325 max_cwnd = mnet->cwnd; 326 } 327 } 328 } 329 if (max_cwnd_net) { 330 return (max_cwnd_net); 331 } 332 } 333 mnet = net; 334 once = 0; 335 336 if (mnet == NULL) { 337 mnet = TAILQ_FIRST(&stcb->asoc.nets); 338 if (mnet == NULL) { 339 return (NULL); 340 } 341 } 342 for (;;) { 343 alt = TAILQ_NEXT(mnet, sctp_next); 344 if (alt == NULL) { 345 once++; 346 if (once > 1) { 347 break; 348 } 349 alt = TAILQ_FIRST(&stcb->asoc.nets); 350 if (alt == NULL) { 351 return (NULL); 352 } 353 } 354 if (alt->ro.ro_rt == NULL) { 355 if (alt->ro._s_addr) { 356 sctp_free_ifa(alt->ro._s_addr); 357 alt->ro._s_addr = NULL; 358 } 359 alt->src_addr_selected = 0; 360 } 361 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 362 (alt->ro.ro_rt != NULL) && 363 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { 364 /* Found a reachable address */ 365 break; 366 } 367 mnet = alt; 368 } 369 370 if (alt == NULL) { 371 /* Case where NO insv network exists (dormant state) */ 372 /* we rotate destinations */ 373 once = 0; 374 mnet = net; 375 for (;;) { 376 if (mnet == NULL) { 377 return (TAILQ_FIRST(&stcb->asoc.nets)); 378 } 379 alt = TAILQ_NEXT(mnet, sctp_next); 380 if (alt == NULL) { 381 once++; 382 if (once > 1) { 383 break; 384 } 385 alt = TAILQ_FIRST(&stcb->asoc.nets); 386 if (alt == NULL) { 387 break; 388 } 389 } 390 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 391 (alt != net)) { 392 /* Found an alternate address */ 393 break; 394 } 395 mnet = alt; 396 } 397 } 398 if (alt == NULL) { 399 return (net); 400 } 401 return (alt); 402 } 403 404 static void 405 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 406 struct sctp_nets *net, 407 int win_probe, 408 int num_marked, int num_abandoned) 409 { 410 if (net->RTO == 0) { 411 net->RTO = stcb->asoc.minrto; 412 } 413 net->RTO <<= 1; 414 if (net->RTO > stcb->asoc.maxrto) { 415 net->RTO = stcb->asoc.maxrto; 416 } 417 if ((win_probe == 0) && (num_marked || num_abandoned)) { 418 /* We don't apply penalty to window probe scenarios */ 419 /* JRS - Use the congestion control given in the CC module */ 420 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 421 } 422 } 423 424 #ifndef INVARIANTS 425 static void 426 sctp_recover_sent_list(struct sctp_tcb *stcb) 427 { 428 struct sctp_tmit_chunk *chk, *nchk; 429 struct sctp_association *asoc; 430 431 asoc = &stcb->asoc; 432 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 433 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { 434 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 435 (void *)chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); 436 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 437 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 438 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 439 } 440 } 441 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 442 if (PR_SCTP_ENABLED(chk->flags)) { 443 if (asoc->pr_sctp_cnt != 0) 444 asoc->pr_sctp_cnt--; 445 } 446 if (chk->data) { 447 /* sa_ignore NO_NULL_CHK */ 448 sctp_free_bufspace(stcb, asoc, chk, 1); 449 sctp_m_freem(chk->data); 450 chk->data = NULL; 451 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) { 452 asoc->sent_queue_cnt_removeable--; 453 } 454 } 455 asoc->sent_queue_cnt--; 456 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 457 } 458 } 459 SCTP_PRINTF("after recover order is as follows\n"); 460 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 461 SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.TSN_seq); 462 } 463 } 464 465 #endif 466 467 static int 468 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 469 struct sctp_nets *net, 470 struct sctp_nets *alt, 471 int window_probe, 472 int *num_marked, 473 int *num_abandoned) 474 { 475 476 /* 477 * Mark all chunks (well not all) that were sent to *net for 478 * retransmission. Move them to alt for there destination as well... 479 * We only mark chunks that have been outstanding long enough to 480 * have received feed-back. 481 */ 482 struct sctp_tmit_chunk *chk, *nchk; 483 struct sctp_nets *lnets; 484 struct timeval now, min_wait, tv; 485 int cur_rto; 486 int cnt_abandoned; 487 int audit_tf, num_mk, fir; 488 unsigned int cnt_mk; 489 uint32_t orig_flight, orig_tf; 490 uint32_t tsnlast, tsnfirst; 491 int recovery_cnt = 0; 492 493 494 /* none in flight now */ 495 audit_tf = 0; 496 fir = 0; 497 /* 498 * figure out how long a data chunk must be pending before we can 499 * mark it .. 500 */ 501 (void)SCTP_GETTIME_TIMEVAL(&now); 502 /* get cur rto in micro-seconds */ 503 cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 504 cur_rto *= 1000; 505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 506 sctp_log_fr(cur_rto, 507 stcb->asoc.peers_rwnd, 508 window_probe, 509 SCTP_FR_T3_MARK_TIME); 510 sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT); 511 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 512 } 513 tv.tv_sec = cur_rto / 1000000; 514 tv.tv_usec = cur_rto % 1000000; 515 min_wait = now; 516 timevalsub(&min_wait, &tv); 517 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 518 /* 519 * if we hit here, we don't have enough seconds on the clock 520 * to account for the RTO. We just let the lower seconds be 521 * the bounds and don't worry about it. This may mean we 522 * will mark a lot more than we should. 523 */ 524 min_wait.tv_sec = min_wait.tv_usec = 0; 525 } 526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 527 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 528 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 529 } 530 /* 531 * Our rwnd will be incorrect here since we are not adding back the 532 * cnt * mbuf but we will fix that down below. 533 */ 534 orig_flight = net->flight_size; 535 orig_tf = stcb->asoc.total_flight; 536 537 net->fast_retran_ip = 0; 538 /* Now on to each chunk */ 539 cnt_abandoned = 0; 540 num_mk = cnt_mk = 0; 541 tsnfirst = tsnlast = 0; 542 #ifndef INVARIANTS 543 start_again: 544 #endif 545 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 546 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { 547 /* Strange case our list got out of order? */ 548 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n", 549 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 550 recovery_cnt++; 551 #ifdef INVARIANTS 552 panic("last acked >= chk on sent-Q"); 553 #else 554 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 555 sctp_recover_sent_list(stcb); 556 if (recovery_cnt < 10) { 557 goto start_again; 558 } else { 559 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 560 } 561 #endif 562 } 563 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 564 /* 565 * found one to mark: If it is less than 566 * DATAGRAM_ACKED it MUST not be a skipped or marked 567 * TSN but instead one that is either already set 568 * for retransmission OR one that needs 569 * retransmission. 570 */ 571 572 /* validate its been outstanding long enough */ 573 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 574 sctp_log_fr(chk->rec.data.TSN_seq, 575 chk->sent_rcv_time.tv_sec, 576 chk->sent_rcv_time.tv_usec, 577 SCTP_FR_T3_MARK_TIME); 578 } 579 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 580 /* 581 * we have reached a chunk that was sent 582 * some seconds past our min.. forget it we 583 * will find no more to send. 584 */ 585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 586 sctp_log_fr(0, 587 chk->sent_rcv_time.tv_sec, 588 chk->sent_rcv_time.tv_usec, 589 SCTP_FR_T3_STOPPED); 590 } 591 continue; 592 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 593 (window_probe == 0)) { 594 /* 595 * we must look at the micro seconds to 596 * know. 597 */ 598 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 599 /* 600 * ok it was sent after our boundary 601 * time. 602 */ 603 continue; 604 } 605 } 606 if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) { 607 /* Is it expired? */ 608 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 609 /* Yes so drop it */ 610 if (chk->data) { 611 (void)sctp_release_pr_sctp_chunk(stcb, 612 chk, 613 1, 614 SCTP_SO_NOT_LOCKED); 615 cnt_abandoned++; 616 } 617 continue; 618 } 619 } 620 if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) { 621 /* Has it been retransmitted tv_sec times? */ 622 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 623 if (chk->data) { 624 (void)sctp_release_pr_sctp_chunk(stcb, 625 chk, 626 1, 627 SCTP_SO_NOT_LOCKED); 628 cnt_abandoned++; 629 } 630 continue; 631 } 632 } 633 if (chk->sent < SCTP_DATAGRAM_RESEND) { 634 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 635 num_mk++; 636 if (fir == 0) { 637 fir = 1; 638 tsnfirst = chk->rec.data.TSN_seq; 639 } 640 tsnlast = chk->rec.data.TSN_seq; 641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 642 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 643 0, SCTP_FR_T3_MARKED); 644 } 645 if (chk->rec.data.chunk_was_revoked) { 646 /* deflate the cwnd */ 647 chk->whoTo->cwnd -= chk->book_size; 648 chk->rec.data.chunk_was_revoked = 0; 649 } 650 net->marked_retrans++; 651 stcb->asoc.marked_retrans++; 652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 653 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 654 chk->whoTo->flight_size, 655 chk->book_size, 656 (uintptr_t) chk->whoTo, 657 chk->rec.data.TSN_seq); 658 } 659 sctp_flight_size_decrease(chk); 660 sctp_total_flight_decrease(stcb, chk); 661 stcb->asoc.peers_rwnd += chk->send_size; 662 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 663 } 664 chk->sent = SCTP_DATAGRAM_RESEND; 665 SCTP_STAT_INCR(sctps_markedretrans); 666 667 /* reset the TSN for striking and other FR stuff */ 668 chk->rec.data.doing_fast_retransmit = 0; 669 /* Clear any time so NO RTT is being done */ 670 671 if (chk->do_rtt) { 672 if (chk->whoTo->rto_needed == 0) { 673 chk->whoTo->rto_needed = 1; 674 } 675 } 676 chk->do_rtt = 0; 677 if (alt != net) { 678 sctp_free_remote_addr(chk->whoTo); 679 chk->no_fr_allowed = 1; 680 chk->whoTo = alt; 681 atomic_add_int(&alt->ref_count, 1); 682 } else { 683 chk->no_fr_allowed = 0; 684 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 685 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 686 } else { 687 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 688 } 689 } 690 /* 691 * CMT: Do not allow FRs on retransmitted TSNs. 692 */ 693 if (stcb->asoc.sctp_cmt_on_off > 0) { 694 chk->no_fr_allowed = 1; 695 } 696 #ifdef THIS_SHOULD_NOT_BE_DONE 697 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 698 /* remember highest acked one */ 699 could_be_sent = chk; 700 #endif 701 } 702 if (chk->sent == SCTP_DATAGRAM_RESEND) { 703 cnt_mk++; 704 } 705 } 706 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 707 /* we did not subtract the same things? */ 708 audit_tf = 1; 709 } 710 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 711 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 712 } 713 #ifdef SCTP_DEBUG 714 if (num_mk) { 715 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 716 tsnlast); 717 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 718 num_mk, (u_long)stcb->asoc.peers_rwnd); 719 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 720 tsnlast); 721 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 722 num_mk, 723 (int)stcb->asoc.peers_rwnd); 724 } 725 #endif 726 *num_marked = num_mk; 727 *num_abandoned = cnt_abandoned; 728 /* 729 * Now check for a ECN Echo that may be stranded And include the 730 * cnt_mk'd to have all resends in the control queue. 731 */ 732 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 733 if (chk->sent == SCTP_DATAGRAM_RESEND) { 734 cnt_mk++; 735 } 736 if ((chk->whoTo == net) && 737 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 738 sctp_free_remote_addr(chk->whoTo); 739 chk->whoTo = alt; 740 if (chk->sent != SCTP_DATAGRAM_RESEND) { 741 chk->sent = SCTP_DATAGRAM_RESEND; 742 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 743 cnt_mk++; 744 } 745 atomic_add_int(&alt->ref_count, 1); 746 } 747 } 748 #ifdef THIS_SHOULD_NOT_BE_DONE 749 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 750 /* fix it so we retransmit the highest acked anyway */ 751 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 752 cnt_mk++; 753 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 754 } 755 #endif 756 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 757 #ifdef INVARIANTS 758 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 759 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 760 #endif 761 #ifndef SCTP_AUDITING_ENABLED 762 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 763 #endif 764 } 765 if (audit_tf) { 766 SCTPDBG(SCTP_DEBUG_TIMER4, 767 "Audit total flight due to negative value net:%p\n", 768 (void *)net); 769 stcb->asoc.total_flight = 0; 770 stcb->asoc.total_flight_count = 0; 771 /* Clear all networks flight size */ 772 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 773 lnets->flight_size = 0; 774 SCTPDBG(SCTP_DEBUG_TIMER4, 775 "Net:%p c-f cwnd:%d ssthresh:%d\n", 776 (void *)lnets, lnets->cwnd, lnets->ssthresh); 777 } 778 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 779 if (chk->sent < SCTP_DATAGRAM_RESEND) { 780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 781 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 782 chk->whoTo->flight_size, 783 chk->book_size, 784 (uintptr_t) chk->whoTo, 785 chk->rec.data.TSN_seq); 786 } 787 sctp_flight_size_increase(chk); 788 sctp_total_flight_increase(stcb, chk); 789 } 790 } 791 } 792 /* We return 1 if we only have a window probe outstanding */ 793 return (0); 794 } 795 796 797 int 798 sctp_t3rxt_timer(struct sctp_inpcb *inp, 799 struct sctp_tcb *stcb, 800 struct sctp_nets *net) 801 { 802 struct sctp_nets *alt; 803 int win_probe, num_mk, num_abandoned; 804 805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 806 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 807 } 808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 809 struct sctp_nets *lnet; 810 811 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 812 if (net == lnet) { 813 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 814 } else { 815 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 816 } 817 } 818 } 819 /* Find an alternate and mark those for retransmission */ 820 if ((stcb->asoc.peers_rwnd == 0) && 821 (stcb->asoc.total_flight < net->mtu)) { 822 SCTP_STAT_INCR(sctps_timowindowprobe); 823 win_probe = 1; 824 } else { 825 win_probe = 0; 826 } 827 828 if (win_probe == 0) { 829 /* We don't do normal threshold management on window probes */ 830 if (sctp_threshold_management(inp, stcb, net, 831 stcb->asoc.max_send_times)) { 832 /* Association was destroyed */ 833 return (1); 834 } else { 835 if (net != stcb->asoc.primary_destination) { 836 /* send a immediate HB if our RTO is stale */ 837 struct timeval now; 838 unsigned int ms_goneby; 839 840 (void)SCTP_GETTIME_TIMEVAL(&now); 841 if (net->last_sent_time.tv_sec) { 842 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 843 } else { 844 ms_goneby = 0; 845 } 846 if ((net->dest_state & SCTP_ADDR_PF) == 0) { 847 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 848 /* 849 * no recent feed back in an 850 * RTO or more, request a 851 * RTT update 852 */ 853 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 854 } 855 } 856 } 857 } 858 } else { 859 /* 860 * For a window probe we don't penalize the net's but only 861 * the association. This may fail it if SACKs are not coming 862 * back. If sack's are coming with rwnd locked at 0, we will 863 * continue to hold things waiting for rwnd to raise 864 */ 865 if (sctp_threshold_management(inp, stcb, NULL, 866 stcb->asoc.max_send_times)) { 867 /* Association was destroyed */ 868 return (1); 869 } 870 } 871 if (stcb->asoc.sctp_cmt_on_off > 0) { 872 if (net->pf_threshold < net->failure_threshold) { 873 alt = sctp_find_alternate_net(stcb, net, 2); 874 } else { 875 /* 876 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is 877 * being used, then pick dest with largest ssthresh 878 * for any retransmission. 879 */ 880 alt = sctp_find_alternate_net(stcb, net, 1); 881 /* 882 * CUCv2: If a different dest is picked for the 883 * retransmission, then new (rtx-)pseudo_cumack 884 * needs to be tracked for orig dest. Let CUCv2 885 * track new (rtx-) pseudo-cumack always. 886 */ 887 net->find_pseudo_cumack = 1; 888 net->find_rtx_pseudo_cumack = 1; 889 } 890 } else { 891 alt = sctp_find_alternate_net(stcb, net, 0); 892 } 893 894 num_mk = 0; 895 num_abandoned = 0; 896 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 897 &num_mk, &num_abandoned); 898 /* FR Loss recovery just ended with the T3. */ 899 stcb->asoc.fast_retran_loss_recovery = 0; 900 901 /* CMT FR loss recovery ended with the T3 */ 902 net->fast_retran_loss_recovery = 0; 903 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 904 (net->flight_size == 0)) { 905 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net); 906 } 907 /* 908 * setup the sat loss recovery that prevents satellite cwnd advance. 909 */ 910 stcb->asoc.sat_t3_loss_recovery = 1; 911 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 912 913 /* Backoff the timer and cwnd */ 914 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 915 if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) || 916 (net->dest_state & SCTP_ADDR_PF)) { 917 /* Move all pending over too */ 918 sctp_move_chunks_from_net(stcb, net); 919 920 /* 921 * Get the address that failed, to force a new src address 922 * selecton and a route allocation. 923 */ 924 if (net->ro._s_addr) { 925 sctp_free_ifa(net->ro._s_addr); 926 net->ro._s_addr = NULL; 927 } 928 net->src_addr_selected = 0; 929 930 /* Force a route allocation too */ 931 if (net->ro.ro_rt) { 932 RTFREE(net->ro.ro_rt); 933 net->ro.ro_rt = NULL; 934 } 935 /* Was it our primary? */ 936 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 937 /* 938 * Yes, note it as such and find an alternate note: 939 * this means HB code must use this to resent the 940 * primary if it goes active AND if someone does a 941 * change-primary then this flag must be cleared 942 * from any net structures. 943 */ 944 if (stcb->asoc.alternate) { 945 sctp_free_remote_addr(stcb->asoc.alternate); 946 } 947 stcb->asoc.alternate = alt; 948 atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 949 } 950 } 951 /* 952 * Special case for cookie-echo'ed case, we don't do output but must 953 * await the COOKIE-ACK before retransmission 954 */ 955 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 956 /* 957 * Here we just reset the timer and start again since we 958 * have not established the asoc 959 */ 960 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 961 return (0); 962 } 963 if (stcb->asoc.prsctp_supported) { 964 struct sctp_tmit_chunk *lchk; 965 966 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 967 /* C3. See if we need to send a Fwd-TSN */ 968 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 969 send_forward_tsn(stcb, &stcb->asoc); 970 if (lchk) { 971 /* Assure a timer is up */ 972 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 973 } 974 } 975 } 976 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 977 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 978 } 979 return (0); 980 } 981 982 int 983 sctp_t1init_timer(struct sctp_inpcb *inp, 984 struct sctp_tcb *stcb, 985 struct sctp_nets *net) 986 { 987 /* bump the thresholds */ 988 if (stcb->asoc.delayed_connection) { 989 /* 990 * special hook for delayed connection. The library did NOT 991 * complete the rest of its sends. 992 */ 993 stcb->asoc.delayed_connection = 0; 994 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 995 return (0); 996 } 997 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 998 return (0); 999 } 1000 if (sctp_threshold_management(inp, stcb, net, 1001 stcb->asoc.max_init_times)) { 1002 /* Association was destroyed */ 1003 return (1); 1004 } 1005 stcb->asoc.dropped_special_cnt = 0; 1006 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1007 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1008 net->RTO = stcb->asoc.initial_init_rto_max; 1009 } 1010 if (stcb->asoc.numnets > 1) { 1011 /* If we have more than one addr use it */ 1012 struct sctp_nets *alt; 1013 1014 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1015 if (alt != stcb->asoc.primary_destination) { 1016 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1017 stcb->asoc.primary_destination = alt; 1018 } 1019 } 1020 /* Send out a new init */ 1021 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1022 return (0); 1023 } 1024 1025 /* 1026 * For cookie and asconf we actually need to find and mark for resend, then 1027 * increment the resend counter (after all the threshold management stuff of 1028 * course). 1029 */ 1030 int 1031 sctp_cookie_timer(struct sctp_inpcb *inp, 1032 struct sctp_tcb *stcb, 1033 struct sctp_nets *net SCTP_UNUSED) 1034 { 1035 struct sctp_nets *alt; 1036 struct sctp_tmit_chunk *cookie; 1037 1038 /* first before all else we must find the cookie */ 1039 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1040 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1041 break; 1042 } 1043 } 1044 if (cookie == NULL) { 1045 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1046 /* FOOBAR! */ 1047 struct mbuf *op_err; 1048 1049 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1050 "Cookie timer expired, but no cookie"); 1051 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3; 1052 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1053 } else { 1054 #ifdef INVARIANTS 1055 panic("Cookie timer expires in wrong state?"); 1056 #else 1057 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1058 return (0); 1059 #endif 1060 } 1061 return (0); 1062 } 1063 /* Ok we found the cookie, threshold management next */ 1064 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1065 stcb->asoc.max_init_times)) { 1066 /* Assoc is over */ 1067 return (1); 1068 } 1069 /* 1070 * cleared theshold management now lets backoff the address & select 1071 * an alternate 1072 */ 1073 stcb->asoc.dropped_special_cnt = 0; 1074 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1075 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1076 if (alt != cookie->whoTo) { 1077 sctp_free_remote_addr(cookie->whoTo); 1078 cookie->whoTo = alt; 1079 atomic_add_int(&alt->ref_count, 1); 1080 } 1081 /* Now mark the retran info */ 1082 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1083 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1084 } 1085 cookie->sent = SCTP_DATAGRAM_RESEND; 1086 /* 1087 * Now call the output routine to kick out the cookie again, Note we 1088 * don't mark any chunks for retran so that FR will need to kick in 1089 * to move these (or a send timer). 1090 */ 1091 return (0); 1092 } 1093 1094 int 1095 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1096 struct sctp_nets *net) 1097 { 1098 struct sctp_nets *alt; 1099 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1100 1101 if (stcb->asoc.stream_reset_outstanding == 0) { 1102 return (0); 1103 } 1104 /* find the existing STRRESET, we use the seq number we sent out on */ 1105 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1106 if (strrst == NULL) { 1107 return (0); 1108 } 1109 /* do threshold management */ 1110 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1111 stcb->asoc.max_send_times)) { 1112 /* Assoc is over */ 1113 return (1); 1114 } 1115 /* 1116 * cleared theshold management now lets backoff the address & select 1117 * an alternate 1118 */ 1119 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1120 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1121 sctp_free_remote_addr(strrst->whoTo); 1122 strrst->whoTo = alt; 1123 atomic_add_int(&alt->ref_count, 1); 1124 1125 /* See if a ECN Echo is also stranded */ 1126 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1127 if ((chk->whoTo == net) && 1128 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1129 sctp_free_remote_addr(chk->whoTo); 1130 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1131 chk->sent = SCTP_DATAGRAM_RESEND; 1132 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1133 } 1134 chk->whoTo = alt; 1135 atomic_add_int(&alt->ref_count, 1); 1136 } 1137 } 1138 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1139 /* 1140 * If the address went un-reachable, we need to move to 1141 * alternates for ALL chk's in queue 1142 */ 1143 sctp_move_chunks_from_net(stcb, net); 1144 } 1145 /* mark the retran info */ 1146 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1147 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1148 strrst->sent = SCTP_DATAGRAM_RESEND; 1149 1150 /* restart the timer */ 1151 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1152 return (0); 1153 } 1154 1155 int 1156 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1157 struct sctp_nets *net) 1158 { 1159 struct sctp_nets *alt; 1160 struct sctp_tmit_chunk *asconf, *chk; 1161 1162 /* is this a first send, or a retransmission? */ 1163 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1164 /* compose a new ASCONF chunk and send it */ 1165 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1166 } else { 1167 /* 1168 * Retransmission of the existing ASCONF is needed 1169 */ 1170 1171 /* find the existing ASCONF */ 1172 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1173 if (asconf == NULL) { 1174 return (0); 1175 } 1176 /* do threshold management */ 1177 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1178 stcb->asoc.max_send_times)) { 1179 /* Assoc is over */ 1180 return (1); 1181 } 1182 if (asconf->snd_count > stcb->asoc.max_send_times) { 1183 /* 1184 * Something is rotten: our peer is not responding 1185 * to ASCONFs but apparently is to other chunks. 1186 * i.e. it is not properly handling the chunk type 1187 * upper bits. Mark this peer as ASCONF incapable 1188 * and cleanup. 1189 */ 1190 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1191 sctp_asconf_cleanup(stcb, net); 1192 return (0); 1193 } 1194 /* 1195 * cleared threshold management, so now backoff the net and 1196 * select an alternate 1197 */ 1198 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1199 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1200 if (asconf->whoTo != alt) { 1201 sctp_free_remote_addr(asconf->whoTo); 1202 asconf->whoTo = alt; 1203 atomic_add_int(&alt->ref_count, 1); 1204 } 1205 /* See if an ECN Echo is also stranded */ 1206 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1207 if ((chk->whoTo == net) && 1208 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1209 sctp_free_remote_addr(chk->whoTo); 1210 chk->whoTo = alt; 1211 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1212 chk->sent = SCTP_DATAGRAM_RESEND; 1213 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1214 } 1215 atomic_add_int(&alt->ref_count, 1); 1216 } 1217 } 1218 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1219 if (chk->whoTo != alt) { 1220 sctp_free_remote_addr(chk->whoTo); 1221 chk->whoTo = alt; 1222 atomic_add_int(&alt->ref_count, 1); 1223 } 1224 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1225 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1226 chk->sent = SCTP_DATAGRAM_RESEND; 1227 } 1228 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1229 /* 1230 * If the address went un-reachable, we need to move 1231 * to the alternate for ALL chunks in queue 1232 */ 1233 sctp_move_chunks_from_net(stcb, net); 1234 } 1235 /* mark the retran info */ 1236 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1237 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1238 asconf->sent = SCTP_DATAGRAM_RESEND; 1239 1240 /* send another ASCONF if any and we can do */ 1241 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1242 } 1243 return (0); 1244 } 1245 1246 /* Mobility adaptation */ 1247 void 1248 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1249 struct sctp_nets *net SCTP_UNUSED) 1250 { 1251 if (stcb->asoc.deleted_primary == NULL) { 1252 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1253 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1254 return; 1255 } 1256 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1257 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1258 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1259 stcb->asoc.deleted_primary = NULL; 1260 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1261 return; 1262 } 1263 1264 /* 1265 * For the shutdown and shutdown-ack, we do not keep one around on the 1266 * control queue. This means we must generate a new one and call the general 1267 * chunk output routine, AFTER having done threshold management. 1268 * It is assumed that net is non-NULL. 1269 */ 1270 int 1271 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1272 struct sctp_nets *net) 1273 { 1274 struct sctp_nets *alt; 1275 1276 /* first threshold managment */ 1277 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1278 /* Assoc is over */ 1279 return (1); 1280 } 1281 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1282 /* second select an alternative */ 1283 alt = sctp_find_alternate_net(stcb, net, 0); 1284 1285 /* third generate a shutdown into the queue for out net */ 1286 sctp_send_shutdown(stcb, alt); 1287 1288 /* fourth restart timer */ 1289 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1290 return (0); 1291 } 1292 1293 int 1294 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1295 struct sctp_nets *net) 1296 { 1297 struct sctp_nets *alt; 1298 1299 /* first threshold managment */ 1300 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1301 /* Assoc is over */ 1302 return (1); 1303 } 1304 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1305 /* second select an alternative */ 1306 alt = sctp_find_alternate_net(stcb, net, 0); 1307 1308 /* third generate a shutdown into the queue for out net */ 1309 sctp_send_shutdown_ack(stcb, alt); 1310 1311 /* fourth restart timer */ 1312 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1313 return (0); 1314 } 1315 1316 static void 1317 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1318 struct sctp_tcb *stcb) 1319 { 1320 struct sctp_stream_queue_pending *sp; 1321 unsigned int i, chks_in_queue = 0; 1322 int being_filled = 0; 1323 1324 /* 1325 * This function is ONLY called when the send/sent queues are empty. 1326 */ 1327 if ((stcb == NULL) || (inp == NULL)) 1328 return; 1329 1330 if (stcb->asoc.sent_queue_retran_cnt) { 1331 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1332 stcb->asoc.sent_queue_retran_cnt); 1333 stcb->asoc.sent_queue_retran_cnt = 0; 1334 } 1335 if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1336 /* No stream scheduler information, initialize scheduler */ 1337 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0); 1338 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1339 /* yep, we lost a stream or two */ 1340 SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n"); 1341 } else { 1342 /* no streams lost */ 1343 stcb->asoc.total_output_queue_size = 0; 1344 } 1345 } 1346 /* Check to see if some data queued, if so report it */ 1347 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1348 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1349 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1350 if (sp->msg_is_complete) 1351 being_filled++; 1352 chks_in_queue++; 1353 } 1354 } 1355 } 1356 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1357 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1358 stcb->asoc.stream_queue_cnt, chks_in_queue); 1359 } 1360 if (chks_in_queue) { 1361 /* call the output queue function */ 1362 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1363 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1364 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1365 /* 1366 * Probably should go in and make it go back through 1367 * and add fragments allowed 1368 */ 1369 if (being_filled == 0) { 1370 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1371 chks_in_queue); 1372 } 1373 } 1374 } else { 1375 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1376 (u_long)stcb->asoc.total_output_queue_size); 1377 stcb->asoc.total_output_queue_size = 0; 1378 } 1379 } 1380 1381 int 1382 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1383 struct sctp_nets *net) 1384 { 1385 uint8_t net_was_pf; 1386 1387 if (net->dest_state & SCTP_ADDR_PF) { 1388 net_was_pf = 1; 1389 } else { 1390 net_was_pf = 0; 1391 } 1392 if (net->hb_responded == 0) { 1393 if (net->ro._s_addr) { 1394 /* 1395 * Invalidate the src address if we did not get a 1396 * response last time. 1397 */ 1398 sctp_free_ifa(net->ro._s_addr); 1399 net->ro._s_addr = NULL; 1400 net->src_addr_selected = 0; 1401 } 1402 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1403 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1404 /* Assoc is over */ 1405 return (1); 1406 } 1407 } 1408 /* Zero PBA, if it needs it */ 1409 if (net->partial_bytes_acked) { 1410 net->partial_bytes_acked = 0; 1411 } 1412 if ((stcb->asoc.total_output_queue_size > 0) && 1413 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1414 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1415 sctp_audit_stream_queues_for_size(inp, stcb); 1416 } 1417 if (!(net->dest_state & SCTP_ADDR_NOHB) && 1418 !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) { 1419 /* 1420 * when move to PF during threshold mangement, a HB has been 1421 * queued in that routine 1422 */ 1423 uint32_t ms_gone_by; 1424 1425 if ((net->last_sent_time.tv_sec > 0) || 1426 (net->last_sent_time.tv_usec > 0)) { 1427 struct timeval diff; 1428 1429 SCTP_GETTIME_TIMEVAL(&diff); 1430 timevalsub(&diff, &net->last_sent_time); 1431 ms_gone_by = (uint32_t) (diff.tv_sec * 1000) + 1432 (uint32_t) (diff.tv_usec / 1000); 1433 } else { 1434 ms_gone_by = 0xffffffff; 1435 } 1436 if ((ms_gone_by >= net->heart_beat_delay) || 1437 (net->dest_state & SCTP_ADDR_PF)) { 1438 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 1439 } 1440 } 1441 return (0); 1442 } 1443 1444 void 1445 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1446 struct sctp_tcb *stcb, 1447 struct sctp_nets *net) 1448 { 1449 uint32_t next_mtu, mtu; 1450 1451 next_mtu = sctp_get_next_mtu(net->mtu); 1452 1453 if ((next_mtu > net->mtu) && (net->port == 0)) { 1454 if ((net->src_addr_selected == 0) || 1455 (net->ro._s_addr == NULL) || 1456 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1457 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1458 sctp_free_ifa(net->ro._s_addr); 1459 net->ro._s_addr = NULL; 1460 net->src_addr_selected = 0; 1461 } else if (net->ro._s_addr == NULL) { 1462 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1463 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1464 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1465 1466 /* KAME hack: embed scopeid */ 1467 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1468 } 1469 #endif 1470 1471 net->ro._s_addr = sctp_source_address_selection(inp, 1472 stcb, 1473 (sctp_route_t *) & net->ro, 1474 net, 0, stcb->asoc.vrf_id); 1475 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1476 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1477 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1478 1479 (void)sa6_recoverscope(sin6); 1480 } 1481 #endif /* INET6 */ 1482 } 1483 if (net->ro._s_addr) 1484 net->src_addr_selected = 1; 1485 } 1486 if (net->ro._s_addr) { 1487 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1488 #if defined(INET) || defined(INET6) 1489 if (net->port) { 1490 mtu -= sizeof(struct udphdr); 1491 } 1492 #endif 1493 if (mtu > next_mtu) { 1494 net->mtu = next_mtu; 1495 } 1496 } 1497 } 1498 /* restart the timer */ 1499 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1500 } 1501 1502 void 1503 sctp_autoclose_timer(struct sctp_inpcb *inp, 1504 struct sctp_tcb *stcb, 1505 struct sctp_nets *net) 1506 { 1507 struct timeval tn, *tim_touse; 1508 struct sctp_association *asoc; 1509 int ticks_gone_by; 1510 1511 (void)SCTP_GETTIME_TIMEVAL(&tn); 1512 if (stcb->asoc.sctp_autoclose_ticks && 1513 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1514 /* Auto close is on */ 1515 asoc = &stcb->asoc; 1516 /* pick the time to use */ 1517 if (asoc->time_last_rcvd.tv_sec > 1518 asoc->time_last_sent.tv_sec) { 1519 tim_touse = &asoc->time_last_rcvd; 1520 } else { 1521 tim_touse = &asoc->time_last_sent; 1522 } 1523 /* Now has long enough transpired to autoclose? */ 1524 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1525 if ((ticks_gone_by > 0) && 1526 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1527 /* 1528 * autoclose time has hit, call the output routine, 1529 * which should do nothing just to be SURE we don't 1530 * have hanging data. We can then safely check the 1531 * queues and know that we are clear to send 1532 * shutdown 1533 */ 1534 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1535 /* Are we clean? */ 1536 if (TAILQ_EMPTY(&asoc->send_queue) && 1537 TAILQ_EMPTY(&asoc->sent_queue)) { 1538 /* 1539 * there is nothing queued to send, so I'm 1540 * done... 1541 */ 1542 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1543 /* only send SHUTDOWN 1st time thru */ 1544 struct sctp_nets *netp; 1545 1546 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1547 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1548 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1549 } 1550 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1551 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1552 sctp_stop_timers_for_shutdown(stcb); 1553 if (stcb->asoc.alternate) { 1554 netp = stcb->asoc.alternate; 1555 } else { 1556 netp = stcb->asoc.primary_destination; 1557 } 1558 sctp_send_shutdown(stcb, netp); 1559 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1560 stcb->sctp_ep, stcb, 1561 netp); 1562 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1563 stcb->sctp_ep, stcb, 1564 netp); 1565 } 1566 } 1567 } else { 1568 /* 1569 * No auto close at this time, reset t-o to check 1570 * later 1571 */ 1572 int tmp; 1573 1574 /* fool the timer startup to use the time left */ 1575 tmp = asoc->sctp_autoclose_ticks; 1576 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1577 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1578 net); 1579 /* restore the real tick value */ 1580 asoc->sctp_autoclose_ticks = tmp; 1581 } 1582 } 1583 } 1584