1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #define _IP_VHL 37 #include <netinet/sctp_os.h> 38 #include <netinet/sctp_pcb.h> 39 #ifdef INET6 40 #endif 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_input.h> 50 #include <netinet/sctp.h> 51 #include <netinet/sctp_uio.h> 52 #include <netinet/udp.h> 53 54 55 void 56 sctp_early_fr_timer(struct sctp_inpcb *inp, 57 struct sctp_tcb *stcb, 58 struct sctp_nets *net) 59 { 60 struct sctp_tmit_chunk *chk, *tp2; 61 struct timeval now, min_wait, tv; 62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63 64 /* an early FR is occuring. */ 65 (void)SCTP_GETTIME_TIMEVAL(&now); 66 /* get cur rto in micro-seconds */ 67 if (net->lastsa == 0) { 68 /* Hmm no rtt estimate yet? */ 69 cur_rtt = stcb->asoc.initial_rto >> 2; 70 } else { 71 72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73 } 74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76 } 77 cur_rtt *= 1000; 78 tv.tv_sec = cur_rtt / 1000000; 79 tv.tv_usec = cur_rtt % 1000000; 80 min_wait = now; 81 timevalsub(&min_wait, &tv); 82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83 /* 84 * if we hit here, we don't have enough seconds on the clock 85 * to account for the RTO. We just let the lower seconds be 86 * the bounds and don't worry about it. This may mean we 87 * will mark a lot more than we should. 88 */ 89 min_wait.tv_sec = min_wait.tv_usec = 0; 90 } 91 chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 92 for (; chk != NULL; chk = tp2) { 93 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 94 if (chk->whoTo != net) { 95 continue; 96 } 97 if (chk->sent == SCTP_DATAGRAM_RESEND) 98 cnt_resend++; 99 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 100 (chk->sent < SCTP_DATAGRAM_RESEND)) { 101 /* pending, may need retran */ 102 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 103 /* 104 * we have reached a chunk that was sent 105 * some seconds past our min.. forget it we 106 * will find no more to send. 107 */ 108 continue; 109 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 110 /* 111 * we must look at the micro seconds to 112 * know. 113 */ 114 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 115 /* 116 * ok it was sent after our boundary 117 * time. 118 */ 119 continue; 120 } 121 } 122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 123 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 124 4, SCTP_FR_MARKED_EARLY); 125 } 126 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 127 chk->sent = SCTP_DATAGRAM_RESEND; 128 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 129 /* double book size since we are doing an early FR */ 130 chk->book_size_scale++; 131 cnt += chk->send_size; 132 if ((cnt + net->flight_size) > net->cwnd) { 133 /* Mark all we could possibly resend */ 134 break; 135 } 136 } 137 } 138 if (cnt) { 139 /* 140 * JRS - Use the congestion control given in the congestion 141 * control module 142 */ 143 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 144 } else if (cnt_resend) { 145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 146 } 147 /* Restart it? */ 148 if (net->flight_size < net->cwnd) { 149 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 150 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 151 } 152 } 153 154 void 155 sctp_audit_retranmission_queue(struct sctp_association *asoc) 156 { 157 struct sctp_tmit_chunk *chk; 158 159 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 160 asoc->sent_queue_retran_cnt, 161 asoc->sent_queue_cnt); 162 asoc->sent_queue_retran_cnt = 0; 163 asoc->sent_queue_cnt = 0; 164 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 165 if (chk->sent == SCTP_DATAGRAM_RESEND) { 166 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 167 } 168 asoc->sent_queue_cnt++; 169 } 170 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 171 if (chk->sent == SCTP_DATAGRAM_RESEND) { 172 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 173 } 174 } 175 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 176 if (chk->sent == SCTP_DATAGRAM_RESEND) { 177 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 178 } 179 } 180 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 181 asoc->sent_queue_retran_cnt, 182 asoc->sent_queue_cnt); 183 } 184 185 int 186 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 187 struct sctp_nets *net, uint16_t threshold) 188 { 189 if (net) { 190 net->error_count++; 191 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 192 net, net->error_count, 193 net->failure_threshold); 194 if (net->error_count > net->failure_threshold) { 195 /* We had a threshold failure */ 196 if (net->dest_state & SCTP_ADDR_REACHABLE) { 197 net->dest_state &= ~SCTP_ADDR_REACHABLE; 198 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 199 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 200 if (net == stcb->asoc.primary_destination) { 201 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 202 } 203 /* 204 * JRS 5/14/07 - If a destination is 205 * unreachable, the PF bit is turned off. 206 * This allows an unambiguous use of the PF 207 * bit for destinations that are reachable 208 * but potentially failed. If the 209 * destination is set to the unreachable 210 * state, also set the destination to the PF 211 * state. 212 */ 213 /* 214 * Add debug message here if destination is 215 * not in PF state. 216 */ 217 /* Stop any running T3 timers here? */ 218 if ((stcb->asoc.sctp_cmt_on_off == 1) && 219 (stcb->asoc.sctp_cmt_pf > 0)) { 220 net->dest_state &= ~SCTP_ADDR_PF; 221 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 222 net); 223 } 224 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 225 stcb, 226 SCTP_FAILED_THRESHOLD, 227 (void *)net, SCTP_SO_NOT_LOCKED); 228 } 229 } 230 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 231 *********ROUTING CODE 232 */ 233 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 234 *********ROUTING CODE 235 */ 236 } 237 if (stcb == NULL) 238 return (0); 239 240 if (net) { 241 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 243 sctp_misc_ints(SCTP_THRESHOLD_INCR, 244 stcb->asoc.overall_error_count, 245 (stcb->asoc.overall_error_count + 1), 246 SCTP_FROM_SCTP_TIMER, 247 __LINE__); 248 } 249 stcb->asoc.overall_error_count++; 250 } 251 } else { 252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 253 sctp_misc_ints(SCTP_THRESHOLD_INCR, 254 stcb->asoc.overall_error_count, 255 (stcb->asoc.overall_error_count + 1), 256 SCTP_FROM_SCTP_TIMER, 257 __LINE__); 258 } 259 stcb->asoc.overall_error_count++; 260 } 261 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 262 &stcb->asoc, stcb->asoc.overall_error_count, 263 (uint32_t) threshold, 264 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 265 /* 266 * We specifically do not do >= to give the assoc one more change 267 * before we fail it. 268 */ 269 if (stcb->asoc.overall_error_count > threshold) { 270 /* Abort notification sends a ULP notify */ 271 struct mbuf *oper; 272 273 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 274 0, M_DONTWAIT, 1, MT_DATA); 275 if (oper) { 276 struct sctp_paramhdr *ph; 277 uint32_t *ippp; 278 279 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 280 sizeof(uint32_t); 281 ph = mtod(oper, struct sctp_paramhdr *); 282 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 283 ph->param_length = htons(SCTP_BUF_LEN(oper)); 284 ippp = (uint32_t *) (ph + 1); 285 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 286 } 287 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 288 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 289 return (1); 290 } 291 return (0); 292 } 293 294 struct sctp_nets * 295 sctp_find_alternate_net(struct sctp_tcb *stcb, 296 struct sctp_nets *net, 297 int mode) 298 { 299 /* Find and return an alternate network if possible */ 300 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 301 int once; 302 303 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 304 int min_errors = -1; 305 uint32_t max_cwnd = 0; 306 307 if (stcb->asoc.numnets == 1) { 308 /* No others but net */ 309 return (TAILQ_FIRST(&stcb->asoc.nets)); 310 } 311 /* 312 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 313 * net algorithm. This algorithm chooses the active destination (not 314 * in PF state) with the largest cwnd value. If all destinations are 315 * in PF state, unreachable, or unconfirmed, choose the desination 316 * that is in PF state with the lowest error count. In case of a 317 * tie, choose the destination that was most recently active. 318 */ 319 if (mode == 2) { 320 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 321 /* 322 * JRS 5/14/07 - If the destination is unreachable 323 * or unconfirmed, skip it. 324 */ 325 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 326 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 327 continue; 328 } 329 /* 330 * JRS 5/14/07 - If the destination is reachable 331 * but in PF state, compare the error count of the 332 * destination to the minimum error count seen thus 333 * far. Store the destination with the lower error 334 * count. If the error counts are equal, store the 335 * destination that was most recently active. 336 */ 337 if (mnet->dest_state & SCTP_ADDR_PF) { 338 /* 339 * JRS 5/14/07 - If the destination under 340 * consideration is the current destination, 341 * work as if the error count is one higher. 342 * The actual error count will not be 343 * incremented until later in the t3 344 * handler. 345 */ 346 if (mnet == net) { 347 if (min_errors == -1) { 348 min_errors = mnet->error_count + 1; 349 min_errors_net = mnet; 350 } else if (mnet->error_count + 1 < min_errors) { 351 min_errors = mnet->error_count + 1; 352 min_errors_net = mnet; 353 } else if (mnet->error_count + 1 == min_errors 354 && mnet->last_active > min_errors_net->last_active) { 355 min_errors_net = mnet; 356 min_errors = mnet->error_count + 1; 357 } 358 continue; 359 } else { 360 if (min_errors == -1) { 361 min_errors = mnet->error_count; 362 min_errors_net = mnet; 363 } else if (mnet->error_count < min_errors) { 364 min_errors = mnet->error_count; 365 min_errors_net = mnet; 366 } else if (mnet->error_count == min_errors 367 && mnet->last_active > min_errors_net->last_active) { 368 min_errors_net = mnet; 369 min_errors = mnet->error_count; 370 } 371 continue; 372 } 373 } 374 /* 375 * JRS 5/14/07 - If the destination is reachable and 376 * not in PF state, compare the cwnd of the 377 * destination to the highest cwnd seen thus far. 378 * Store the destination with the higher cwnd value. 379 * If the cwnd values are equal, randomly choose one 380 * of the two destinations. 381 */ 382 if (max_cwnd < mnet->cwnd) { 383 max_cwnd_net = mnet; 384 max_cwnd = mnet->cwnd; 385 } else if (max_cwnd == mnet->cwnd) { 386 uint32_t rndval; 387 uint8_t this_random; 388 389 if (stcb->asoc.hb_random_idx > 3) { 390 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 391 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 392 this_random = stcb->asoc.hb_random_values[0]; 393 stcb->asoc.hb_random_idx++; 394 stcb->asoc.hb_ect_randombit = 0; 395 } else { 396 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 397 stcb->asoc.hb_random_idx++; 398 stcb->asoc.hb_ect_randombit = 0; 399 } 400 if (this_random % 2 == 1) { 401 max_cwnd_net = mnet; 402 max_cwnd = mnet->cwnd; /* Useless? */ 403 } 404 } 405 } 406 /* 407 * JRS 5/14/07 - After all destination have been considered 408 * as alternates, check to see if there was some active 409 * destination (not in PF state). If not, check to see if 410 * there was some PF destination with the minimum number of 411 * errors. If not, return the original destination. If 412 * there is a min_errors_net, remove the PF flag from that 413 * destination, set the cwnd to one or two MTUs, and return 414 * the destination as an alt. If there was some active 415 * destination with a highest cwnd, return the destination 416 * as an alt. 417 */ 418 if (max_cwnd_net == NULL) { 419 if (min_errors_net == NULL) { 420 return (net); 421 } 422 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 423 min_errors_net->cwnd = min_errors_net->mtu * stcb->asoc.sctp_cmt_pf; 424 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 425 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 426 stcb, min_errors_net, 427 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 428 } 429 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 430 min_errors_net, min_errors_net->error_count); 431 return (min_errors_net); 432 } else { 433 return (max_cwnd_net); 434 } 435 } 436 /* 437 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 438 * choosing an alternate net. 439 */ 440 else if (mode == 1) { 441 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 442 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 443 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 444 ) { 445 /* 446 * will skip ones that are not-reachable or 447 * unconfirmed 448 */ 449 continue; 450 } 451 if (max_cwnd < mnet->cwnd) { 452 max_cwnd_net = mnet; 453 max_cwnd = mnet->cwnd; 454 } else if (max_cwnd == mnet->cwnd) { 455 uint32_t rndval; 456 uint8_t this_random; 457 458 if (stcb->asoc.hb_random_idx > 3) { 459 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 460 memcpy(stcb->asoc.hb_random_values, &rndval, 461 sizeof(stcb->asoc.hb_random_values)); 462 this_random = stcb->asoc.hb_random_values[0]; 463 stcb->asoc.hb_random_idx = 0; 464 stcb->asoc.hb_ect_randombit = 0; 465 } else { 466 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 467 stcb->asoc.hb_random_idx++; 468 stcb->asoc.hb_ect_randombit = 0; 469 } 470 if (this_random % 2) { 471 max_cwnd_net = mnet; 472 max_cwnd = mnet->cwnd; 473 } 474 } 475 } 476 if (max_cwnd_net) { 477 return (max_cwnd_net); 478 } 479 } 480 mnet = net; 481 once = 0; 482 483 if (mnet == NULL) { 484 mnet = TAILQ_FIRST(&stcb->asoc.nets); 485 } 486 do { 487 alt = TAILQ_NEXT(mnet, sctp_next); 488 if (alt == NULL) { 489 once++; 490 if (once > 1) { 491 break; 492 } 493 alt = TAILQ_FIRST(&stcb->asoc.nets); 494 } 495 if (alt->ro.ro_rt == NULL) { 496 if (alt->ro._s_addr) { 497 sctp_free_ifa(alt->ro._s_addr); 498 alt->ro._s_addr = NULL; 499 } 500 alt->src_addr_selected = 0; 501 } 502 if ( 503 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 504 (alt->ro.ro_rt != NULL) && 505 /* sa_ignore NO_NULL_CHK */ 506 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 507 ) { 508 /* Found a reachable address */ 509 break; 510 } 511 mnet = alt; 512 } while (alt != NULL); 513 514 if (alt == NULL) { 515 /* Case where NO insv network exists (dormant state) */ 516 /* we rotate destinations */ 517 once = 0; 518 mnet = net; 519 do { 520 alt = TAILQ_NEXT(mnet, sctp_next); 521 if (alt == NULL) { 522 once++; 523 if (once > 1) { 524 break; 525 } 526 alt = TAILQ_FIRST(&stcb->asoc.nets); 527 } 528 /* sa_ignore NO_NULL_CHK */ 529 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 530 (alt != net)) { 531 /* Found an alternate address */ 532 break; 533 } 534 mnet = alt; 535 } while (alt != NULL); 536 } 537 if (alt == NULL) { 538 return (net); 539 } 540 return (alt); 541 } 542 543 544 545 static void 546 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 547 struct sctp_nets *net, 548 int win_probe, 549 int num_marked, int num_abandoned) 550 { 551 if (net->RTO == 0) { 552 net->RTO = stcb->asoc.minrto; 553 } 554 net->RTO <<= 1; 555 if (net->RTO > stcb->asoc.maxrto) { 556 net->RTO = stcb->asoc.maxrto; 557 } 558 if ((win_probe == 0) && (num_marked || num_abandoned)) { 559 /* We don't apply penalty to window probe scenarios */ 560 /* JRS - Use the congestion control given in the CC module */ 561 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 562 } 563 } 564 565 #ifndef INVARIANTS 566 static void 567 sctp_recover_sent_list(struct sctp_tcb *stcb) 568 { 569 struct sctp_tmit_chunk *chk, *tp2; 570 struct sctp_association *asoc; 571 572 asoc = &stcb->asoc; 573 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 574 for (; chk != NULL; chk = tp2) { 575 tp2 = TAILQ_NEXT(chk, sctp_next); 576 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 577 chk->rec.data.TSN_seq, 578 MAX_TSN)) || 579 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 580 581 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 582 chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq); 583 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 584 if (chk->pr_sctp_on) { 585 if (asoc->pr_sctp_cnt != 0) 586 asoc->pr_sctp_cnt--; 587 } 588 if (chk->data) { 589 /* sa_ignore NO_NULL_CHK */ 590 sctp_free_bufspace(stcb, asoc, chk, 1); 591 sctp_m_freem(chk->data); 592 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 593 asoc->sent_queue_cnt_removeable--; 594 } 595 } 596 chk->data = NULL; 597 asoc->sent_queue_cnt--; 598 sctp_free_a_chunk(stcb, chk); 599 } 600 } 601 SCTP_PRINTF("after recover order is as follows\n"); 602 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 603 for (; chk != NULL; chk = tp2) { 604 tp2 = TAILQ_NEXT(chk, sctp_next); 605 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 606 } 607 } 608 609 #endif 610 611 static int 612 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 613 struct sctp_nets *net, 614 struct sctp_nets *alt, 615 int window_probe, 616 int *num_marked, 617 int *num_abandoned) 618 { 619 620 /* 621 * Mark all chunks (well not all) that were sent to *net for 622 * retransmission. Move them to alt for there destination as well... 623 * We only mark chunks that have been outstanding long enough to 624 * have received feed-back. 625 */ 626 struct sctp_tmit_chunk *chk, *tp2; 627 struct sctp_nets *lnets; 628 struct timeval now, min_wait, tv; 629 int cur_rtt; 630 int cnt_abandoned; 631 int audit_tf, num_mk, fir; 632 unsigned int cnt_mk; 633 uint32_t orig_flight, orig_tf; 634 uint32_t tsnlast, tsnfirst; 635 int recovery_cnt = 0; 636 637 638 /* none in flight now */ 639 audit_tf = 0; 640 fir = 0; 641 /* 642 * figure out how long a data chunk must be pending before we can 643 * mark it .. 644 */ 645 (void)SCTP_GETTIME_TIMEVAL(&now); 646 /* get cur rto in micro-seconds */ 647 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 648 cur_rtt *= 1000; 649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 650 sctp_log_fr(cur_rtt, 651 stcb->asoc.peers_rwnd, 652 window_probe, 653 SCTP_FR_T3_MARK_TIME); 654 sctp_log_fr(net->flight_size, 655 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 656 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 657 SCTP_FR_CWND_REPORT); 658 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 659 } 660 tv.tv_sec = cur_rtt / 1000000; 661 tv.tv_usec = cur_rtt % 1000000; 662 min_wait = now; 663 timevalsub(&min_wait, &tv); 664 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 665 /* 666 * if we hit here, we don't have enough seconds on the clock 667 * to account for the RTO. We just let the lower seconds be 668 * the bounds and don't worry about it. This may mean we 669 * will mark a lot more than we should. 670 */ 671 min_wait.tv_sec = min_wait.tv_usec = 0; 672 } 673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 674 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 675 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 676 } 677 /* 678 * Our rwnd will be incorrect here since we are not adding back the 679 * cnt * mbuf but we will fix that down below. 680 */ 681 orig_flight = net->flight_size; 682 orig_tf = stcb->asoc.total_flight; 683 684 net->fast_retran_ip = 0; 685 /* Now on to each chunk */ 686 cnt_abandoned = 0; 687 num_mk = cnt_mk = 0; 688 tsnfirst = tsnlast = 0; 689 #ifndef INVARIANTS 690 start_again: 691 #endif 692 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 693 for (; chk != NULL; chk = tp2) { 694 tp2 = TAILQ_NEXT(chk, sctp_next); 695 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 696 chk->rec.data.TSN_seq, 697 MAX_TSN)) || 698 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 699 /* Strange case our list got out of order? */ 700 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 701 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 702 recovery_cnt++; 703 #ifdef INVARIANTS 704 panic("last acked >= chk on sent-Q"); 705 #else 706 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 707 sctp_recover_sent_list(stcb); 708 if (recovery_cnt < 10) { 709 goto start_again; 710 } else { 711 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 712 } 713 #endif 714 } 715 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 716 /* 717 * found one to mark: If it is less than 718 * DATAGRAM_ACKED it MUST not be a skipped or marked 719 * TSN but instead one that is either already set 720 * for retransmission OR one that needs 721 * retransmission. 722 */ 723 724 /* validate its been outstanding long enough */ 725 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 726 sctp_log_fr(chk->rec.data.TSN_seq, 727 chk->sent_rcv_time.tv_sec, 728 chk->sent_rcv_time.tv_usec, 729 SCTP_FR_T3_MARK_TIME); 730 } 731 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 732 /* 733 * we have reached a chunk that was sent 734 * some seconds past our min.. forget it we 735 * will find no more to send. 736 */ 737 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 738 sctp_log_fr(0, 739 chk->sent_rcv_time.tv_sec, 740 chk->sent_rcv_time.tv_usec, 741 SCTP_FR_T3_STOPPED); 742 } 743 continue; 744 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 745 (window_probe == 0)) { 746 /* 747 * we must look at the micro seconds to 748 * know. 749 */ 750 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 751 /* 752 * ok it was sent after our boundary 753 * time. 754 */ 755 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 756 sctp_log_fr(0, 757 chk->sent_rcv_time.tv_sec, 758 chk->sent_rcv_time.tv_usec, 759 SCTP_FR_T3_STOPPED); 760 } 761 continue; 762 } 763 } 764 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 765 /* Is it expired? */ 766 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 767 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 768 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 769 /* Yes so drop it */ 770 if (chk->data) { 771 (void)sctp_release_pr_sctp_chunk(stcb, 772 chk, 773 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 774 SCTP_SO_NOT_LOCKED); 775 cnt_abandoned++; 776 } 777 continue; 778 } 779 } 780 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 781 /* Has it been retransmitted tv_sec times? */ 782 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 783 if (chk->data) { 784 (void)sctp_release_pr_sctp_chunk(stcb, 785 chk, 786 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 787 SCTP_SO_NOT_LOCKED); 788 cnt_abandoned++; 789 } 790 continue; 791 } 792 } 793 if (chk->sent < SCTP_DATAGRAM_RESEND) { 794 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 795 num_mk++; 796 if (fir == 0) { 797 fir = 1; 798 tsnfirst = chk->rec.data.TSN_seq; 799 } 800 tsnlast = chk->rec.data.TSN_seq; 801 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 802 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 803 0, SCTP_FR_T3_MARKED); 804 } 805 if (chk->rec.data.chunk_was_revoked) { 806 /* deflate the cwnd */ 807 chk->whoTo->cwnd -= chk->book_size; 808 chk->rec.data.chunk_was_revoked = 0; 809 } 810 net->marked_retrans++; 811 stcb->asoc.marked_retrans++; 812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 813 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 814 chk->whoTo->flight_size, 815 chk->book_size, 816 (uintptr_t) chk->whoTo, 817 chk->rec.data.TSN_seq); 818 } 819 sctp_flight_size_decrease(chk); 820 sctp_total_flight_decrease(stcb, chk); 821 stcb->asoc.peers_rwnd += chk->send_size; 822 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 823 } 824 chk->sent = SCTP_DATAGRAM_RESEND; 825 SCTP_STAT_INCR(sctps_markedretrans); 826 827 /* reset the TSN for striking and other FR stuff */ 828 chk->rec.data.doing_fast_retransmit = 0; 829 /* Clear any time so NO RTT is being done */ 830 chk->do_rtt = 0; 831 if (alt != net) { 832 sctp_free_remote_addr(chk->whoTo); 833 chk->no_fr_allowed = 1; 834 chk->whoTo = alt; 835 atomic_add_int(&alt->ref_count, 1); 836 } else { 837 chk->no_fr_allowed = 0; 838 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 839 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 840 } else { 841 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 842 } 843 } 844 /* 845 * CMT: Do not allow FRs on retransmitted TSNs. 846 */ 847 if (stcb->asoc.sctp_cmt_on_off == 1) { 848 chk->no_fr_allowed = 1; 849 } 850 #ifdef THIS_SHOULD_NOT_BE_DONE 851 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 852 /* remember highest acked one */ 853 could_be_sent = chk; 854 #endif 855 } 856 if (chk->sent == SCTP_DATAGRAM_RESEND) { 857 cnt_mk++; 858 } 859 } 860 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 861 /* we did not subtract the same things? */ 862 audit_tf = 1; 863 } 864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 865 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 866 } 867 #ifdef SCTP_DEBUG 868 if (num_mk) { 869 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 870 tsnlast); 871 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 872 num_mk, (u_long)stcb->asoc.peers_rwnd); 873 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 874 tsnlast); 875 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 876 num_mk, 877 (int)stcb->asoc.peers_rwnd); 878 } 879 #endif 880 *num_marked = num_mk; 881 *num_abandoned = cnt_abandoned; 882 /* 883 * Now check for a ECN Echo that may be stranded And include the 884 * cnt_mk'd to have all resends in the control queue. 885 */ 886 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 887 if (chk->sent == SCTP_DATAGRAM_RESEND) { 888 cnt_mk++; 889 } 890 if ((chk->whoTo == net) && 891 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 892 sctp_free_remote_addr(chk->whoTo); 893 chk->whoTo = alt; 894 if (chk->sent != SCTP_DATAGRAM_RESEND) { 895 chk->sent = SCTP_DATAGRAM_RESEND; 896 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 897 cnt_mk++; 898 } 899 atomic_add_int(&alt->ref_count, 1); 900 } 901 } 902 #ifdef THIS_SHOULD_NOT_BE_DONE 903 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 904 /* fix it so we retransmit the highest acked anyway */ 905 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 906 cnt_mk++; 907 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 908 } 909 #endif 910 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 911 #ifdef INVARIANTS 912 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 913 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 914 #endif 915 #ifndef SCTP_AUDITING_ENABLED 916 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 917 #endif 918 } 919 if (audit_tf) { 920 SCTPDBG(SCTP_DEBUG_TIMER4, 921 "Audit total flight due to negative value net:%p\n", 922 net); 923 stcb->asoc.total_flight = 0; 924 stcb->asoc.total_flight_count = 0; 925 /* Clear all networks flight size */ 926 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 927 lnets->flight_size = 0; 928 SCTPDBG(SCTP_DEBUG_TIMER4, 929 "Net:%p c-f cwnd:%d ssthresh:%d\n", 930 lnets, lnets->cwnd, lnets->ssthresh); 931 } 932 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 933 if (chk->sent < SCTP_DATAGRAM_RESEND) { 934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 935 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 936 chk->whoTo->flight_size, 937 chk->book_size, 938 (uintptr_t) chk->whoTo, 939 chk->rec.data.TSN_seq); 940 } 941 sctp_flight_size_increase(chk); 942 sctp_total_flight_increase(stcb, chk); 943 } 944 } 945 } 946 /* 947 * Setup the ecn nonce re-sync point. We do this since 948 * retranmissions are NOT setup for ECN. This means that do to 949 * Karn's rule, we don't know the total of the peers ecn bits. 950 */ 951 chk = TAILQ_FIRST(&stcb->asoc.send_queue); 952 if (chk == NULL) { 953 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 954 } else { 955 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 956 } 957 stcb->asoc.nonce_wait_for_ecne = 0; 958 stcb->asoc.nonce_sum_check = 0; 959 /* We return 1 if we only have a window probe outstanding */ 960 return (0); 961 } 962 963 static void 964 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 965 struct sctp_nets *net, 966 struct sctp_nets *alt) 967 { 968 struct sctp_association *asoc; 969 struct sctp_stream_out *outs; 970 struct sctp_tmit_chunk *chk; 971 struct sctp_stream_queue_pending *sp; 972 973 if (net == alt) 974 /* nothing to do */ 975 return; 976 977 asoc = &stcb->asoc; 978 979 /* 980 * now through all the streams checking for chunks sent to our bad 981 * network. 982 */ 983 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 984 /* now clean up any chunks here */ 985 TAILQ_FOREACH(sp, &outs->outqueue, next) { 986 if (sp->net == net) { 987 sctp_free_remote_addr(sp->net); 988 sp->net = alt; 989 atomic_add_int(&alt->ref_count, 1); 990 } 991 } 992 } 993 /* Now check the pending queue */ 994 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 995 if (chk->whoTo == net) { 996 sctp_free_remote_addr(chk->whoTo); 997 chk->whoTo = alt; 998 atomic_add_int(&alt->ref_count, 1); 999 } 1000 } 1001 1002 } 1003 1004 int 1005 sctp_t3rxt_timer(struct sctp_inpcb *inp, 1006 struct sctp_tcb *stcb, 1007 struct sctp_nets *net) 1008 { 1009 struct sctp_nets *alt; 1010 int win_probe, num_mk, num_abandoned; 1011 1012 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1013 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 1014 } 1015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1016 struct sctp_nets *lnet; 1017 1018 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1019 if (net == lnet) { 1020 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 1021 } else { 1022 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 1023 } 1024 } 1025 } 1026 /* Find an alternate and mark those for retransmission */ 1027 if ((stcb->asoc.peers_rwnd == 0) && 1028 (stcb->asoc.total_flight < net->mtu)) { 1029 SCTP_STAT_INCR(sctps_timowindowprobe); 1030 win_probe = 1; 1031 } else { 1032 win_probe = 0; 1033 } 1034 1035 /* 1036 * JRS 5/14/07 - If CMT PF is on and the destination if not already 1037 * in PF state, set the destination to PF state and store the 1038 * current time as the time that the destination was last active. In 1039 * addition, find an alternate destination with PF-based 1040 * find_alt_net(). 1041 */ 1042 if ((stcb->asoc.sctp_cmt_on_off == 1) && 1043 (stcb->asoc.sctp_cmt_pf > 0)) { 1044 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 1045 net->dest_state |= SCTP_ADDR_PF; 1046 net->last_active = sctp_get_tick_count(); 1047 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 1048 net); 1049 } 1050 alt = sctp_find_alternate_net(stcb, net, 2); 1051 } else if (stcb->asoc.sctp_cmt_on_off == 1) { 1052 /* 1053 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 1054 * used, then pick dest with largest ssthresh for any 1055 * retransmission. 1056 */ 1057 alt = net; 1058 alt = sctp_find_alternate_net(stcb, alt, 1); 1059 /* 1060 * CUCv2: If a different dest is picked for the 1061 * retransmission, then new (rtx-)pseudo_cumack needs to be 1062 * tracked for orig dest. Let CUCv2 track new (rtx-) 1063 * pseudo-cumack always. 1064 */ 1065 net->find_pseudo_cumack = 1; 1066 net->find_rtx_pseudo_cumack = 1; 1067 } else { /* CMT is OFF */ 1068 alt = sctp_find_alternate_net(stcb, net, 0); 1069 } 1070 num_mk = 0; 1071 num_abandoned = 0; 1072 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1073 &num_mk, &num_abandoned); 1074 /* FR Loss recovery just ended with the T3. */ 1075 stcb->asoc.fast_retran_loss_recovery = 0; 1076 1077 /* CMT FR loss recovery ended with the T3 */ 1078 net->fast_retran_loss_recovery = 0; 1079 1080 /* 1081 * setup the sat loss recovery that prevents satellite cwnd advance. 1082 */ 1083 stcb->asoc.sat_t3_loss_recovery = 1; 1084 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1085 1086 /* Backoff the timer and cwnd */ 1087 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1088 if (win_probe == 0) { 1089 /* We don't do normal threshold management on window probes */ 1090 if (sctp_threshold_management(inp, stcb, net, 1091 stcb->asoc.max_send_times)) { 1092 /* Association was destroyed */ 1093 return (1); 1094 } else { 1095 if (net != stcb->asoc.primary_destination) { 1096 /* send a immediate HB if our RTO is stale */ 1097 struct timeval now; 1098 unsigned int ms_goneby; 1099 1100 (void)SCTP_GETTIME_TIMEVAL(&now); 1101 if (net->last_sent_time.tv_sec) { 1102 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1103 } else { 1104 ms_goneby = 0; 1105 } 1106 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1107 /* 1108 * no recent feed back in an RTO or 1109 * more, request a RTT update 1110 */ 1111 if (sctp_send_hb(stcb, 1, net) < 0) 1112 /* 1113 * Less than 0 means we lost 1114 * the assoc 1115 */ 1116 return (1); 1117 } 1118 } 1119 } 1120 } else { 1121 /* 1122 * For a window probe we don't penalize the net's but only 1123 * the association. This may fail it if SACKs are not coming 1124 * back. If sack's are coming with rwnd locked at 0, we will 1125 * continue to hold things waiting for rwnd to raise 1126 */ 1127 if (sctp_threshold_management(inp, stcb, NULL, 1128 stcb->asoc.max_send_times)) { 1129 /* Association was destroyed */ 1130 return (1); 1131 } 1132 } 1133 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1134 /* Move all pending over too */ 1135 sctp_move_all_chunks_to_alt(stcb, net, alt); 1136 1137 /* 1138 * Get the address that failed, to force a new src address 1139 * selecton and a route allocation. 1140 */ 1141 if (net->ro._s_addr) { 1142 sctp_free_ifa(net->ro._s_addr); 1143 net->ro._s_addr = NULL; 1144 } 1145 net->src_addr_selected = 0; 1146 1147 /* Force a route allocation too */ 1148 if (net->ro.ro_rt) { 1149 RTFREE(net->ro.ro_rt); 1150 net->ro.ro_rt = NULL; 1151 } 1152 /* Was it our primary? */ 1153 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1154 /* 1155 * Yes, note it as such and find an alternate note: 1156 * this means HB code must use this to resent the 1157 * primary if it goes active AND if someone does a 1158 * change-primary then this flag must be cleared 1159 * from any net structures. 1160 */ 1161 if (sctp_set_primary_addr(stcb, 1162 (struct sockaddr *)NULL, 1163 alt) == 0) { 1164 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1165 } 1166 } 1167 } else if ((stcb->asoc.sctp_cmt_on_off == 1) && 1168 (stcb->asoc.sctp_cmt_pf > 0) && 1169 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1170 /* 1171 * JRS 5/14/07 - If the destination hasn't failed completely 1172 * but is in PF state, a PF-heartbeat needs to be sent 1173 * manually. 1174 */ 1175 if (sctp_send_hb(stcb, 1, net) < 0) 1176 /* Return less than 0 means we lost the association */ 1177 return (1); 1178 } 1179 /* 1180 * Special case for cookie-echo'ed case, we don't do output but must 1181 * await the COOKIE-ACK before retransmission 1182 */ 1183 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1184 /* 1185 * Here we just reset the timer and start again since we 1186 * have not established the asoc 1187 */ 1188 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1189 return (0); 1190 } 1191 if (stcb->asoc.peer_supports_prsctp) { 1192 struct sctp_tmit_chunk *lchk; 1193 1194 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1195 /* C3. See if we need to send a Fwd-TSN */ 1196 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 1197 stcb->asoc.last_acked_seq, MAX_TSN)) { 1198 /* 1199 * ISSUE with ECN, see FWD-TSN processing for notes 1200 * on issues that will occur when the ECN NONCE 1201 * stuff is put into SCTP for cross checking. 1202 */ 1203 send_forward_tsn(stcb, &stcb->asoc); 1204 if (lchk) { 1205 /* Assure a timer is up */ 1206 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1207 } 1208 } 1209 } 1210 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1211 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1212 } 1213 return (0); 1214 } 1215 1216 int 1217 sctp_t1init_timer(struct sctp_inpcb *inp, 1218 struct sctp_tcb *stcb, 1219 struct sctp_nets *net) 1220 { 1221 /* bump the thresholds */ 1222 if (stcb->asoc.delayed_connection) { 1223 /* 1224 * special hook for delayed connection. The library did NOT 1225 * complete the rest of its sends. 1226 */ 1227 stcb->asoc.delayed_connection = 0; 1228 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1229 return (0); 1230 } 1231 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1232 return (0); 1233 } 1234 if (sctp_threshold_management(inp, stcb, net, 1235 stcb->asoc.max_init_times)) { 1236 /* Association was destroyed */ 1237 return (1); 1238 } 1239 stcb->asoc.dropped_special_cnt = 0; 1240 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1241 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1242 net->RTO = stcb->asoc.initial_init_rto_max; 1243 } 1244 if (stcb->asoc.numnets > 1) { 1245 /* If we have more than one addr use it */ 1246 struct sctp_nets *alt; 1247 1248 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1249 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1250 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1251 stcb->asoc.primary_destination = alt; 1252 } 1253 } 1254 /* Send out a new init */ 1255 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1256 return (0); 1257 } 1258 1259 /* 1260 * For cookie and asconf we actually need to find and mark for resend, then 1261 * increment the resend counter (after all the threshold management stuff of 1262 * course). 1263 */ 1264 int 1265 sctp_cookie_timer(struct sctp_inpcb *inp, 1266 struct sctp_tcb *stcb, 1267 struct sctp_nets *net) 1268 { 1269 struct sctp_nets *alt; 1270 struct sctp_tmit_chunk *cookie; 1271 1272 /* first before all else we must find the cookie */ 1273 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1274 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1275 break; 1276 } 1277 } 1278 if (cookie == NULL) { 1279 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1280 /* FOOBAR! */ 1281 struct mbuf *oper; 1282 1283 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1284 0, M_DONTWAIT, 1, MT_DATA); 1285 if (oper) { 1286 struct sctp_paramhdr *ph; 1287 uint32_t *ippp; 1288 1289 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1290 sizeof(uint32_t); 1291 ph = mtod(oper, struct sctp_paramhdr *); 1292 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1293 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1294 ippp = (uint32_t *) (ph + 1); 1295 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1296 } 1297 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1298 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1299 oper, SCTP_SO_NOT_LOCKED); 1300 } else { 1301 #ifdef INVARIANTS 1302 panic("Cookie timer expires in wrong state?"); 1303 #else 1304 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1305 return (0); 1306 #endif 1307 } 1308 return (0); 1309 } 1310 /* Ok we found the cookie, threshold management next */ 1311 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1312 stcb->asoc.max_init_times)) { 1313 /* Assoc is over */ 1314 return (1); 1315 } 1316 /* 1317 * cleared theshold management now lets backoff the address & select 1318 * an alternate 1319 */ 1320 stcb->asoc.dropped_special_cnt = 0; 1321 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1322 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1323 if (alt != cookie->whoTo) { 1324 sctp_free_remote_addr(cookie->whoTo); 1325 cookie->whoTo = alt; 1326 atomic_add_int(&alt->ref_count, 1); 1327 } 1328 /* Now mark the retran info */ 1329 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1330 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1331 } 1332 cookie->sent = SCTP_DATAGRAM_RESEND; 1333 /* 1334 * Now call the output routine to kick out the cookie again, Note we 1335 * don't mark any chunks for retran so that FR will need to kick in 1336 * to move these (or a send timer). 1337 */ 1338 return (0); 1339 } 1340 1341 int 1342 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1343 struct sctp_nets *net) 1344 { 1345 struct sctp_nets *alt; 1346 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1347 1348 if (stcb->asoc.stream_reset_outstanding == 0) { 1349 return (0); 1350 } 1351 /* find the existing STRRESET, we use the seq number we sent out on */ 1352 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1353 if (strrst == NULL) { 1354 return (0); 1355 } 1356 /* do threshold management */ 1357 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1358 stcb->asoc.max_send_times)) { 1359 /* Assoc is over */ 1360 return (1); 1361 } 1362 /* 1363 * cleared theshold management now lets backoff the address & select 1364 * an alternate 1365 */ 1366 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1367 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1368 sctp_free_remote_addr(strrst->whoTo); 1369 strrst->whoTo = alt; 1370 atomic_add_int(&alt->ref_count, 1); 1371 1372 /* See if a ECN Echo is also stranded */ 1373 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1374 if ((chk->whoTo == net) && 1375 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1376 sctp_free_remote_addr(chk->whoTo); 1377 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1378 chk->sent = SCTP_DATAGRAM_RESEND; 1379 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1380 } 1381 chk->whoTo = alt; 1382 atomic_add_int(&alt->ref_count, 1); 1383 } 1384 } 1385 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1386 /* 1387 * If the address went un-reachable, we need to move to 1388 * alternates for ALL chk's in queue 1389 */ 1390 sctp_move_all_chunks_to_alt(stcb, net, alt); 1391 } 1392 /* mark the retran info */ 1393 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1394 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1395 strrst->sent = SCTP_DATAGRAM_RESEND; 1396 1397 /* restart the timer */ 1398 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1399 return (0); 1400 } 1401 1402 int 1403 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1404 struct sctp_nets *net) 1405 { 1406 struct sctp_nets *alt; 1407 struct sctp_tmit_chunk *asconf, *chk, *nchk; 1408 1409 /* is this a first send, or a retransmission? */ 1410 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1411 /* compose a new ASCONF chunk and send it */ 1412 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1413 } else { 1414 /* 1415 * Retransmission of the existing ASCONF is needed 1416 */ 1417 1418 /* find the existing ASCONF */ 1419 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1420 if (asconf == NULL) { 1421 return (0); 1422 } 1423 /* do threshold management */ 1424 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1425 stcb->asoc.max_send_times)) { 1426 /* Assoc is over */ 1427 return (1); 1428 } 1429 if (asconf->snd_count > stcb->asoc.max_send_times) { 1430 /* 1431 * Something is rotten: our peer is not responding 1432 * to ASCONFs but apparently is to other chunks. 1433 * i.e. it is not properly handling the chunk type 1434 * upper bits. Mark this peer as ASCONF incapable 1435 * and cleanup. 1436 */ 1437 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1438 sctp_asconf_cleanup(stcb, net); 1439 return (0); 1440 } 1441 /* 1442 * cleared threshold management, so now backoff the net and 1443 * select an alternate 1444 */ 1445 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1446 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1447 if (asconf->whoTo != alt) { 1448 sctp_free_remote_addr(asconf->whoTo); 1449 asconf->whoTo = alt; 1450 atomic_add_int(&alt->ref_count, 1); 1451 } 1452 /* See if an ECN Echo is also stranded */ 1453 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1454 if ((chk->whoTo == net) && 1455 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1456 sctp_free_remote_addr(chk->whoTo); 1457 chk->whoTo = alt; 1458 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1459 chk->sent = SCTP_DATAGRAM_RESEND; 1460 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1461 } 1462 atomic_add_int(&alt->ref_count, 1); 1463 } 1464 } 1465 for (chk = asconf; chk; chk = nchk) { 1466 nchk = TAILQ_NEXT(chk, sctp_next); 1467 if (chk->whoTo != alt) { 1468 sctp_free_remote_addr(chk->whoTo); 1469 chk->whoTo = alt; 1470 atomic_add_int(&alt->ref_count, 1); 1471 } 1472 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1473 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1474 chk->sent = SCTP_DATAGRAM_RESEND; 1475 } 1476 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1477 /* 1478 * If the address went un-reachable, we need to move 1479 * to the alternate for ALL chunks in queue 1480 */ 1481 sctp_move_all_chunks_to_alt(stcb, net, alt); 1482 net = alt; 1483 } 1484 /* mark the retran info */ 1485 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1486 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1487 asconf->sent = SCTP_DATAGRAM_RESEND; 1488 1489 /* send another ASCONF if any and we can do */ 1490 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1491 } 1492 return (0); 1493 } 1494 1495 /* Mobility adaptation */ 1496 void 1497 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1498 struct sctp_nets *net) 1499 { 1500 if (stcb->asoc.deleted_primary == NULL) { 1501 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1502 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1503 return; 1504 } 1505 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1506 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1507 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1508 stcb->asoc.deleted_primary = NULL; 1509 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1510 return; 1511 } 1512 1513 /* 1514 * For the shutdown and shutdown-ack, we do not keep one around on the 1515 * control queue. This means we must generate a new one and call the general 1516 * chunk output routine, AFTER having done threshold management. 1517 */ 1518 int 1519 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1520 struct sctp_nets *net) 1521 { 1522 struct sctp_nets *alt; 1523 1524 /* first threshold managment */ 1525 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1526 /* Assoc is over */ 1527 return (1); 1528 } 1529 /* second select an alternative */ 1530 alt = sctp_find_alternate_net(stcb, net, 0); 1531 1532 /* third generate a shutdown into the queue for out net */ 1533 if (alt) { 1534 sctp_send_shutdown(stcb, alt); 1535 } else { 1536 /* 1537 * if alt is NULL, there is no dest to send to?? 1538 */ 1539 return (0); 1540 } 1541 /* fourth restart timer */ 1542 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1543 return (0); 1544 } 1545 1546 int 1547 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1548 struct sctp_nets *net) 1549 { 1550 struct sctp_nets *alt; 1551 1552 /* first threshold managment */ 1553 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1554 /* Assoc is over */ 1555 return (1); 1556 } 1557 /* second select an alternative */ 1558 alt = sctp_find_alternate_net(stcb, net, 0); 1559 1560 /* third generate a shutdown into the queue for out net */ 1561 sctp_send_shutdown_ack(stcb, alt); 1562 1563 /* fourth restart timer */ 1564 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1565 return (0); 1566 } 1567 1568 static void 1569 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1570 struct sctp_tcb *stcb) 1571 { 1572 struct sctp_stream_out *outs; 1573 struct sctp_stream_queue_pending *sp; 1574 unsigned int chks_in_queue = 0; 1575 int being_filled = 0; 1576 1577 /* 1578 * This function is ONLY called when the send/sent queues are empty. 1579 */ 1580 if ((stcb == NULL) || (inp == NULL)) 1581 return; 1582 1583 if (stcb->asoc.sent_queue_retran_cnt) { 1584 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1585 stcb->asoc.sent_queue_retran_cnt); 1586 stcb->asoc.sent_queue_retran_cnt = 0; 1587 } 1588 SCTP_TCB_SEND_LOCK(stcb); 1589 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1590 int i, cnt = 0; 1591 1592 /* Check to see if a spoke fell off the wheel */ 1593 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1594 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1595 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1596 cnt++; 1597 } 1598 } 1599 if (cnt) { 1600 /* yep, we lost a spoke or two */ 1601 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1602 } else { 1603 /* no spokes lost, */ 1604 stcb->asoc.total_output_queue_size = 0; 1605 } 1606 SCTP_TCB_SEND_UNLOCK(stcb); 1607 return; 1608 } 1609 SCTP_TCB_SEND_UNLOCK(stcb); 1610 /* Check to see if some data queued, if so report it */ 1611 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1612 if (!TAILQ_EMPTY(&outs->outqueue)) { 1613 TAILQ_FOREACH(sp, &outs->outqueue, next) { 1614 if (sp->msg_is_complete) 1615 being_filled++; 1616 chks_in_queue++; 1617 } 1618 } 1619 } 1620 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1621 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1622 stcb->asoc.stream_queue_cnt, chks_in_queue); 1623 } 1624 if (chks_in_queue) { 1625 /* call the output queue function */ 1626 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1627 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1628 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1629 /* 1630 * Probably should go in and make it go back through 1631 * and add fragments allowed 1632 */ 1633 if (being_filled == 0) { 1634 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1635 chks_in_queue); 1636 } 1637 } 1638 } else { 1639 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1640 (u_long)stcb->asoc.total_output_queue_size); 1641 stcb->asoc.total_output_queue_size = 0; 1642 } 1643 } 1644 1645 int 1646 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1647 struct sctp_nets *net, int cnt_of_unconf) 1648 { 1649 int ret; 1650 1651 if (net) { 1652 if (net->hb_responded == 0) { 1653 if (net->ro._s_addr) { 1654 /* 1655 * Invalidate the src address if we did not 1656 * get a response last time. 1657 */ 1658 sctp_free_ifa(net->ro._s_addr); 1659 net->ro._s_addr = NULL; 1660 net->src_addr_selected = 0; 1661 } 1662 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1663 } 1664 /* Zero PBA, if it needs it */ 1665 if (net->partial_bytes_acked) { 1666 net->partial_bytes_acked = 0; 1667 } 1668 } 1669 if ((stcb->asoc.total_output_queue_size > 0) && 1670 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1671 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1672 sctp_audit_stream_queues_for_size(inp, stcb); 1673 } 1674 /* Send a new HB, this will do threshold managment, pick a new dest */ 1675 if (cnt_of_unconf == 0) { 1676 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1677 return (1); 1678 } 1679 } else { 1680 /* 1681 * this will send out extra hb's up to maxburst if there are 1682 * any unconfirmed addresses. 1683 */ 1684 uint32_t cnt_sent = 0; 1685 1686 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1687 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1688 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1689 cnt_sent++; 1690 if (net->hb_responded == 0) { 1691 /* Did we respond last time? */ 1692 if (net->ro._s_addr) { 1693 sctp_free_ifa(net->ro._s_addr); 1694 net->ro._s_addr = NULL; 1695 net->src_addr_selected = 0; 1696 } 1697 } 1698 ret = sctp_send_hb(stcb, 1, net); 1699 if (ret < 0) 1700 return 1; 1701 else if (ret == 0) { 1702 break; 1703 } 1704 if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)) 1705 break; 1706 } 1707 } 1708 } 1709 return (0); 1710 } 1711 1712 int 1713 sctp_is_hb_timer_running(struct sctp_tcb *stcb) 1714 { 1715 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1716 /* its running */ 1717 return (1); 1718 } else { 1719 /* nope */ 1720 return (0); 1721 } 1722 } 1723 1724 int 1725 sctp_is_sack_timer_running(struct sctp_tcb *stcb) 1726 { 1727 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1728 /* its running */ 1729 return (1); 1730 } else { 1731 /* nope */ 1732 return (0); 1733 } 1734 } 1735 1736 #define SCTP_NUMBER_OF_MTU_SIZES 18 1737 static uint32_t mtu_sizes[] = { 1738 68, 1739 296, 1740 508, 1741 512, 1742 544, 1743 576, 1744 1006, 1745 1492, 1746 1500, 1747 1536, 1748 2002, 1749 2048, 1750 4352, 1751 4464, 1752 8166, 1753 17914, 1754 32000, 1755 65535 1756 }; 1757 1758 1759 static uint32_t 1760 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1761 { 1762 /* select another MTU that is just bigger than this one */ 1763 int i; 1764 1765 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1766 if (cur_mtu < mtu_sizes[i]) { 1767 /* no max_mtu is bigger than this one */ 1768 return (mtu_sizes[i]); 1769 } 1770 } 1771 /* here return the highest allowable */ 1772 return (cur_mtu); 1773 } 1774 1775 1776 void 1777 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1778 struct sctp_tcb *stcb, 1779 struct sctp_nets *net) 1780 { 1781 uint32_t next_mtu, mtu; 1782 1783 next_mtu = sctp_getnext_mtu(inp, net->mtu); 1784 1785 if ((next_mtu > net->mtu) && (net->port == 0)) { 1786 if ((net->src_addr_selected == 0) || 1787 (net->ro._s_addr == NULL) || 1788 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1789 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1790 sctp_free_ifa(net->ro._s_addr); 1791 net->ro._s_addr = NULL; 1792 net->src_addr_selected = 0; 1793 } else if (net->ro._s_addr == NULL) { 1794 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1795 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1796 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1797 1798 /* KAME hack: embed scopeid */ 1799 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1800 } 1801 #endif 1802 1803 net->ro._s_addr = sctp_source_address_selection(inp, 1804 stcb, 1805 (sctp_route_t *) & net->ro, 1806 net, 0, stcb->asoc.vrf_id); 1807 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1808 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1809 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1810 1811 (void)sa6_recoverscope(sin6); 1812 } 1813 #endif /* INET6 */ 1814 } 1815 if (net->ro._s_addr) 1816 net->src_addr_selected = 1; 1817 } 1818 if (net->ro._s_addr) { 1819 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1820 if (net->port) { 1821 mtu -= sizeof(struct udphdr); 1822 } 1823 if (mtu > next_mtu) { 1824 net->mtu = next_mtu; 1825 } 1826 } 1827 } 1828 /* restart the timer */ 1829 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1830 } 1831 1832 void 1833 sctp_autoclose_timer(struct sctp_inpcb *inp, 1834 struct sctp_tcb *stcb, 1835 struct sctp_nets *net) 1836 { 1837 struct timeval tn, *tim_touse; 1838 struct sctp_association *asoc; 1839 int ticks_gone_by; 1840 1841 (void)SCTP_GETTIME_TIMEVAL(&tn); 1842 if (stcb->asoc.sctp_autoclose_ticks && 1843 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1844 /* Auto close is on */ 1845 asoc = &stcb->asoc; 1846 /* pick the time to use */ 1847 if (asoc->time_last_rcvd.tv_sec > 1848 asoc->time_last_sent.tv_sec) { 1849 tim_touse = &asoc->time_last_rcvd; 1850 } else { 1851 tim_touse = &asoc->time_last_sent; 1852 } 1853 /* Now has long enough transpired to autoclose? */ 1854 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1855 if ((ticks_gone_by > 0) && 1856 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1857 /* 1858 * autoclose time has hit, call the output routine, 1859 * which should do nothing just to be SURE we don't 1860 * have hanging data. We can then safely check the 1861 * queues and know that we are clear to send 1862 * shutdown 1863 */ 1864 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1865 /* Are we clean? */ 1866 if (TAILQ_EMPTY(&asoc->send_queue) && 1867 TAILQ_EMPTY(&asoc->sent_queue)) { 1868 /* 1869 * there is nothing queued to send, so I'm 1870 * done... 1871 */ 1872 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1873 /* only send SHUTDOWN 1st time thru */ 1874 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1875 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1876 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1877 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1878 } 1879 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1880 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1881 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1882 stcb->sctp_ep, stcb, 1883 asoc->primary_destination); 1884 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1885 stcb->sctp_ep, stcb, 1886 asoc->primary_destination); 1887 } 1888 } 1889 } else { 1890 /* 1891 * No auto close at this time, reset t-o to check 1892 * later 1893 */ 1894 int tmp; 1895 1896 /* fool the timer startup to use the time left */ 1897 tmp = asoc->sctp_autoclose_ticks; 1898 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1899 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1900 net); 1901 /* restore the real tick value */ 1902 asoc->sctp_autoclose_ticks = tmp; 1903 } 1904 } 1905 } 1906