1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #define _IP_VHL 37 #include <netinet/sctp_os.h> 38 #include <netinet/sctp_pcb.h> 39 #ifdef INET6 40 #endif 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_input.h> 50 #include <netinet/sctp.h> 51 #include <netinet/sctp_uio.h> 52 #include <netinet/udp.h> 53 54 55 void 56 sctp_early_fr_timer(struct sctp_inpcb *inp, 57 struct sctp_tcb *stcb, 58 struct sctp_nets *net) 59 { 60 struct sctp_tmit_chunk *chk, *tp2; 61 struct timeval now, min_wait, tv; 62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63 64 /* an early FR is occuring. */ 65 (void)SCTP_GETTIME_TIMEVAL(&now); 66 /* get cur rto in micro-seconds */ 67 if (net->lastsa == 0) { 68 /* Hmm no rtt estimate yet? */ 69 cur_rtt = stcb->asoc.initial_rto >> 2; 70 } else { 71 72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73 } 74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76 } 77 cur_rtt *= 1000; 78 tv.tv_sec = cur_rtt / 1000000; 79 tv.tv_usec = cur_rtt % 1000000; 80 min_wait = now; 81 timevalsub(&min_wait, &tv); 82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83 /* 84 * if we hit here, we don't have enough seconds on the clock 85 * to account for the RTO. We just let the lower seconds be 86 * the bounds and don't worry about it. This may mean we 87 * will mark a lot more than we should. 88 */ 89 min_wait.tv_sec = min_wait.tv_usec = 0; 90 } 91 chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 92 for (; chk != NULL; chk = tp2) { 93 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 94 if (chk->whoTo != net) { 95 continue; 96 } 97 if (chk->sent == SCTP_DATAGRAM_RESEND) 98 cnt_resend++; 99 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 100 (chk->sent < SCTP_DATAGRAM_RESEND)) { 101 /* pending, may need retran */ 102 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 103 /* 104 * we have reached a chunk that was sent 105 * some seconds past our min.. forget it we 106 * will find no more to send. 107 */ 108 continue; 109 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 110 /* 111 * we must look at the micro seconds to 112 * know. 113 */ 114 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 115 /* 116 * ok it was sent after our boundary 117 * time. 118 */ 119 continue; 120 } 121 } 122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 123 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 124 4, SCTP_FR_MARKED_EARLY); 125 } 126 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 127 chk->sent = SCTP_DATAGRAM_RESEND; 128 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 129 /* double book size since we are doing an early FR */ 130 chk->book_size_scale++; 131 cnt += chk->send_size; 132 if ((cnt + net->flight_size) > net->cwnd) { 133 /* Mark all we could possibly resend */ 134 break; 135 } 136 } 137 } 138 if (cnt) { 139 /* 140 * JRS - Use the congestion control given in the congestion 141 * control module 142 */ 143 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 144 } else if (cnt_resend) { 145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 146 } 147 /* Restart it? */ 148 if (net->flight_size < net->cwnd) { 149 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 150 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 151 } 152 } 153 154 void 155 sctp_audit_retranmission_queue(struct sctp_association *asoc) 156 { 157 struct sctp_tmit_chunk *chk; 158 159 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 160 asoc->sent_queue_retran_cnt, 161 asoc->sent_queue_cnt); 162 asoc->sent_queue_retran_cnt = 0; 163 asoc->sent_queue_cnt = 0; 164 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 165 if (chk->sent == SCTP_DATAGRAM_RESEND) { 166 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 167 } 168 asoc->sent_queue_cnt++; 169 } 170 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 171 if (chk->sent == SCTP_DATAGRAM_RESEND) { 172 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 173 } 174 } 175 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 176 if (chk->sent == SCTP_DATAGRAM_RESEND) { 177 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 178 } 179 } 180 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 181 asoc->sent_queue_retran_cnt, 182 asoc->sent_queue_cnt); 183 } 184 185 int 186 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 187 struct sctp_nets *net, uint16_t threshold) 188 { 189 if (net) { 190 net->error_count++; 191 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 192 net, net->error_count, 193 net->failure_threshold); 194 if (net->error_count > net->failure_threshold) { 195 /* We had a threshold failure */ 196 if (net->dest_state & SCTP_ADDR_REACHABLE) { 197 net->dest_state &= ~SCTP_ADDR_REACHABLE; 198 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 199 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 200 if (net == stcb->asoc.primary_destination) { 201 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 202 } 203 /* 204 * JRS 5/14/07 - If a destination is 205 * unreachable, the PF bit is turned off. 206 * This allows an unambiguous use of the PF 207 * bit for destinations that are reachable 208 * but potentially failed. If the 209 * destination is set to the unreachable 210 * state, also set the destination to the PF 211 * state. 212 */ 213 /* 214 * Add debug message here if destination is 215 * not in PF state. 216 */ 217 /* Stop any running T3 timers here? */ 218 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 219 net->dest_state &= ~SCTP_ADDR_PF; 220 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 221 net); 222 } 223 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 224 stcb, 225 SCTP_FAILED_THRESHOLD, 226 (void *)net, SCTP_SO_NOT_LOCKED); 227 } 228 } 229 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 230 *********ROUTING CODE 231 */ 232 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 233 *********ROUTING CODE 234 */ 235 } 236 if (stcb == NULL) 237 return (0); 238 239 if (net) { 240 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 242 sctp_misc_ints(SCTP_THRESHOLD_INCR, 243 stcb->asoc.overall_error_count, 244 (stcb->asoc.overall_error_count + 1), 245 SCTP_FROM_SCTP_TIMER, 246 __LINE__); 247 } 248 stcb->asoc.overall_error_count++; 249 } 250 } else { 251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 252 sctp_misc_ints(SCTP_THRESHOLD_INCR, 253 stcb->asoc.overall_error_count, 254 (stcb->asoc.overall_error_count + 1), 255 SCTP_FROM_SCTP_TIMER, 256 __LINE__); 257 } 258 stcb->asoc.overall_error_count++; 259 } 260 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 261 &stcb->asoc, stcb->asoc.overall_error_count, 262 (uint32_t) threshold, 263 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 264 /* 265 * We specifically do not do >= to give the assoc one more change 266 * before we fail it. 267 */ 268 if (stcb->asoc.overall_error_count > threshold) { 269 /* Abort notification sends a ULP notify */ 270 struct mbuf *oper; 271 272 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 273 0, M_DONTWAIT, 1, MT_DATA); 274 if (oper) { 275 struct sctp_paramhdr *ph; 276 uint32_t *ippp; 277 278 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 279 sizeof(uint32_t); 280 ph = mtod(oper, struct sctp_paramhdr *); 281 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 282 ph->param_length = htons(SCTP_BUF_LEN(oper)); 283 ippp = (uint32_t *) (ph + 1); 284 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 285 } 286 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 287 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 288 return (1); 289 } 290 return (0); 291 } 292 293 struct sctp_nets * 294 sctp_find_alternate_net(struct sctp_tcb *stcb, 295 struct sctp_nets *net, 296 int mode) 297 { 298 /* Find and return an alternate network if possible */ 299 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 300 int once; 301 302 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 303 int min_errors = -1; 304 uint32_t max_cwnd = 0; 305 306 if (stcb->asoc.numnets == 1) { 307 /* No others but net */ 308 return (TAILQ_FIRST(&stcb->asoc.nets)); 309 } 310 /* 311 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 312 * net algorithm. This algorithm chooses the active destination (not 313 * in PF state) with the largest cwnd value. If all destinations are 314 * in PF state, unreachable, or unconfirmed, choose the desination 315 * that is in PF state with the lowest error count. In case of a 316 * tie, choose the destination that was most recently active. 317 */ 318 if (mode == 2) { 319 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 320 /* 321 * JRS 5/14/07 - If the destination is unreachable 322 * or unconfirmed, skip it. 323 */ 324 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 325 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 326 continue; 327 } 328 /* 329 * JRS 5/14/07 - If the destination is reachable 330 * but in PF state, compare the error count of the 331 * destination to the minimum error count seen thus 332 * far. Store the destination with the lower error 333 * count. If the error counts are equal, store the 334 * destination that was most recently active. 335 */ 336 if (mnet->dest_state & SCTP_ADDR_PF) { 337 /* 338 * JRS 5/14/07 - If the destination under 339 * consideration is the current destination, 340 * work as if the error count is one higher. 341 * The actual error count will not be 342 * incremented until later in the t3 343 * handler. 344 */ 345 if (mnet == net) { 346 if (min_errors == -1) { 347 min_errors = mnet->error_count + 1; 348 min_errors_net = mnet; 349 } else if (mnet->error_count + 1 < min_errors) { 350 min_errors = mnet->error_count + 1; 351 min_errors_net = mnet; 352 } else if (mnet->error_count + 1 == min_errors 353 && mnet->last_active > min_errors_net->last_active) { 354 min_errors_net = mnet; 355 min_errors = mnet->error_count + 1; 356 } 357 continue; 358 } else { 359 if (min_errors == -1) { 360 min_errors = mnet->error_count; 361 min_errors_net = mnet; 362 } else if (mnet->error_count < min_errors) { 363 min_errors = mnet->error_count; 364 min_errors_net = mnet; 365 } else if (mnet->error_count == min_errors 366 && mnet->last_active > min_errors_net->last_active) { 367 min_errors_net = mnet; 368 min_errors = mnet->error_count; 369 } 370 continue; 371 } 372 } 373 /* 374 * JRS 5/14/07 - If the destination is reachable and 375 * not in PF state, compare the cwnd of the 376 * destination to the highest cwnd seen thus far. 377 * Store the destination with the higher cwnd value. 378 * If the cwnd values are equal, randomly choose one 379 * of the two destinations. 380 */ 381 if (max_cwnd < mnet->cwnd) { 382 max_cwnd_net = mnet; 383 max_cwnd = mnet->cwnd; 384 } else if (max_cwnd == mnet->cwnd) { 385 uint32_t rndval; 386 uint8_t this_random; 387 388 if (stcb->asoc.hb_random_idx > 3) { 389 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 390 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 391 this_random = stcb->asoc.hb_random_values[0]; 392 stcb->asoc.hb_random_idx++; 393 stcb->asoc.hb_ect_randombit = 0; 394 } else { 395 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 396 stcb->asoc.hb_random_idx++; 397 stcb->asoc.hb_ect_randombit = 0; 398 } 399 if (this_random % 2 == 1) { 400 max_cwnd_net = mnet; 401 max_cwnd = mnet->cwnd; /* Useless? */ 402 } 403 } 404 } 405 /* 406 * JRS 5/14/07 - After all destination have been considered 407 * as alternates, check to see if there was some active 408 * destination (not in PF state). If not, check to see if 409 * there was some PF destination with the minimum number of 410 * errors. If not, return the original destination. If 411 * there is a min_errors_net, remove the PF flag from that 412 * destination, set the cwnd to one or two MTUs, and return 413 * the destination as an alt. If there was some active 414 * destination with a highest cwnd, return the destination 415 * as an alt. 416 */ 417 if (max_cwnd_net == NULL) { 418 if (min_errors_net == NULL) { 419 return (net); 420 } 421 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 422 min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 423 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 424 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 425 stcb, min_errors_net, 426 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 427 } 428 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 429 min_errors_net, min_errors_net->error_count); 430 return (min_errors_net); 431 } else { 432 return (max_cwnd_net); 433 } 434 } 435 /* 436 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 437 * choosing an alternate net. 438 */ 439 else if (mode == 1) { 440 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 441 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 442 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 443 ) { 444 /* 445 * will skip ones that are not-reachable or 446 * unconfirmed 447 */ 448 continue; 449 } 450 if (max_cwnd < mnet->cwnd) { 451 max_cwnd_net = mnet; 452 max_cwnd = mnet->cwnd; 453 } else if (max_cwnd == mnet->cwnd) { 454 uint32_t rndval; 455 uint8_t this_random; 456 457 if (stcb->asoc.hb_random_idx > 3) { 458 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 459 memcpy(stcb->asoc.hb_random_values, &rndval, 460 sizeof(stcb->asoc.hb_random_values)); 461 this_random = stcb->asoc.hb_random_values[0]; 462 stcb->asoc.hb_random_idx = 0; 463 stcb->asoc.hb_ect_randombit = 0; 464 } else { 465 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 466 stcb->asoc.hb_random_idx++; 467 stcb->asoc.hb_ect_randombit = 0; 468 } 469 if (this_random % 2) { 470 max_cwnd_net = mnet; 471 max_cwnd = mnet->cwnd; 472 } 473 } 474 } 475 if (max_cwnd_net) { 476 return (max_cwnd_net); 477 } 478 } 479 mnet = net; 480 once = 0; 481 482 if (mnet == NULL) { 483 mnet = TAILQ_FIRST(&stcb->asoc.nets); 484 } 485 do { 486 alt = TAILQ_NEXT(mnet, sctp_next); 487 if (alt == NULL) { 488 once++; 489 if (once > 1) { 490 break; 491 } 492 alt = TAILQ_FIRST(&stcb->asoc.nets); 493 } 494 if (alt->ro.ro_rt == NULL) { 495 if (alt->ro._s_addr) { 496 sctp_free_ifa(alt->ro._s_addr); 497 alt->ro._s_addr = NULL; 498 } 499 alt->src_addr_selected = 0; 500 } 501 if ( 502 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 503 (alt->ro.ro_rt != NULL) && 504 /* sa_ignore NO_NULL_CHK */ 505 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 506 ) { 507 /* Found a reachable address */ 508 break; 509 } 510 mnet = alt; 511 } while (alt != NULL); 512 513 if (alt == NULL) { 514 /* Case where NO insv network exists (dormant state) */ 515 /* we rotate destinations */ 516 once = 0; 517 mnet = net; 518 do { 519 alt = TAILQ_NEXT(mnet, sctp_next); 520 if (alt == NULL) { 521 once++; 522 if (once > 1) { 523 break; 524 } 525 alt = TAILQ_FIRST(&stcb->asoc.nets); 526 } 527 /* sa_ignore NO_NULL_CHK */ 528 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 529 (alt != net)) { 530 /* Found an alternate address */ 531 break; 532 } 533 mnet = alt; 534 } while (alt != NULL); 535 } 536 if (alt == NULL) { 537 return (net); 538 } 539 return (alt); 540 } 541 542 543 544 static void 545 sctp_backoff_on_timeout(struct sctp_tcb *stcb, 546 struct sctp_nets *net, 547 int win_probe, 548 int num_marked, int num_abandoned) 549 { 550 if (net->RTO == 0) { 551 net->RTO = stcb->asoc.minrto; 552 } 553 net->RTO <<= 1; 554 if (net->RTO > stcb->asoc.maxrto) { 555 net->RTO = stcb->asoc.maxrto; 556 } 557 if ((win_probe == 0) && (num_marked || num_abandoned)) { 558 /* We don't apply penalty to window probe scenarios */ 559 /* JRS - Use the congestion control given in the CC module */ 560 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 561 } 562 } 563 564 #ifndef INVARIANTS 565 static void 566 sctp_recover_sent_list(struct sctp_tcb *stcb) 567 { 568 struct sctp_tmit_chunk *chk, *tp2; 569 struct sctp_association *asoc; 570 571 asoc = &stcb->asoc; 572 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 573 for (; chk != NULL; chk = tp2) { 574 tp2 = TAILQ_NEXT(chk, sctp_next); 575 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 576 chk->rec.data.TSN_seq, 577 MAX_TSN)) || 578 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 579 580 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 581 chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq); 582 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 583 if (chk->pr_sctp_on) { 584 if (asoc->pr_sctp_cnt != 0) 585 asoc->pr_sctp_cnt--; 586 } 587 if (chk->data) { 588 /* sa_ignore NO_NULL_CHK */ 589 sctp_free_bufspace(stcb, asoc, chk, 1); 590 sctp_m_freem(chk->data); 591 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 592 asoc->sent_queue_cnt_removeable--; 593 } 594 } 595 chk->data = NULL; 596 asoc->sent_queue_cnt--; 597 sctp_free_a_chunk(stcb, chk); 598 } 599 } 600 SCTP_PRINTF("after recover order is as follows\n"); 601 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 602 for (; chk != NULL; chk = tp2) { 603 tp2 = TAILQ_NEXT(chk, sctp_next); 604 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 605 } 606 } 607 608 #endif 609 610 static int 611 sctp_mark_all_for_resend(struct sctp_tcb *stcb, 612 struct sctp_nets *net, 613 struct sctp_nets *alt, 614 int window_probe, 615 int *num_marked, 616 int *num_abandoned) 617 { 618 619 /* 620 * Mark all chunks (well not all) that were sent to *net for 621 * retransmission. Move them to alt for there destination as well... 622 * We only mark chunks that have been outstanding long enough to 623 * have received feed-back. 624 */ 625 struct sctp_tmit_chunk *chk, *tp2; 626 struct sctp_nets *lnets; 627 struct timeval now, min_wait, tv; 628 int cur_rtt; 629 int cnt_abandoned; 630 int audit_tf, num_mk, fir; 631 unsigned int cnt_mk; 632 uint32_t orig_flight, orig_tf; 633 uint32_t tsnlast, tsnfirst; 634 int recovery_cnt = 0; 635 636 637 /* none in flight now */ 638 audit_tf = 0; 639 fir = 0; 640 /* 641 * figure out how long a data chunk must be pending before we can 642 * mark it .. 643 */ 644 (void)SCTP_GETTIME_TIMEVAL(&now); 645 /* get cur rto in micro-seconds */ 646 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 647 cur_rtt *= 1000; 648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 649 sctp_log_fr(cur_rtt, 650 stcb->asoc.peers_rwnd, 651 window_probe, 652 SCTP_FR_T3_MARK_TIME); 653 sctp_log_fr(net->flight_size, 654 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 655 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 656 SCTP_FR_CWND_REPORT); 657 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 658 } 659 tv.tv_sec = cur_rtt / 1000000; 660 tv.tv_usec = cur_rtt % 1000000; 661 min_wait = now; 662 timevalsub(&min_wait, &tv); 663 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 664 /* 665 * if we hit here, we don't have enough seconds on the clock 666 * to account for the RTO. We just let the lower seconds be 667 * the bounds and don't worry about it. This may mean we 668 * will mark a lot more than we should. 669 */ 670 min_wait.tv_sec = min_wait.tv_usec = 0; 671 } 672 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 673 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 674 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 675 } 676 /* 677 * Our rwnd will be incorrect here since we are not adding back the 678 * cnt * mbuf but we will fix that down below. 679 */ 680 orig_flight = net->flight_size; 681 orig_tf = stcb->asoc.total_flight; 682 683 net->fast_retran_ip = 0; 684 /* Now on to each chunk */ 685 cnt_abandoned = 0; 686 num_mk = cnt_mk = 0; 687 tsnfirst = tsnlast = 0; 688 #ifndef INVARIANTS 689 start_again: 690 #endif 691 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 692 for (; chk != NULL; chk = tp2) { 693 tp2 = TAILQ_NEXT(chk, sctp_next); 694 if ((compare_with_wrap(stcb->asoc.last_acked_seq, 695 chk->rec.data.TSN_seq, 696 MAX_TSN)) || 697 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 698 /* Strange case our list got out of order? */ 699 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 700 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 701 recovery_cnt++; 702 #ifdef INVARIANTS 703 panic("last acked >= chk on sent-Q"); 704 #else 705 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 706 sctp_recover_sent_list(stcb); 707 if (recovery_cnt < 10) { 708 goto start_again; 709 } else { 710 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 711 } 712 #endif 713 } 714 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 715 /* 716 * found one to mark: If it is less than 717 * DATAGRAM_ACKED it MUST not be a skipped or marked 718 * TSN but instead one that is either already set 719 * for retransmission OR one that needs 720 * retransmission. 721 */ 722 723 /* validate its been outstanding long enough */ 724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 725 sctp_log_fr(chk->rec.data.TSN_seq, 726 chk->sent_rcv_time.tv_sec, 727 chk->sent_rcv_time.tv_usec, 728 SCTP_FR_T3_MARK_TIME); 729 } 730 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 731 /* 732 * we have reached a chunk that was sent 733 * some seconds past our min.. forget it we 734 * will find no more to send. 735 */ 736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 737 sctp_log_fr(0, 738 chk->sent_rcv_time.tv_sec, 739 chk->sent_rcv_time.tv_usec, 740 SCTP_FR_T3_STOPPED); 741 } 742 continue; 743 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 744 (window_probe == 0)) { 745 /* 746 * we must look at the micro seconds to 747 * know. 748 */ 749 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 750 /* 751 * ok it was sent after our boundary 752 * time. 753 */ 754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 755 sctp_log_fr(0, 756 chk->sent_rcv_time.tv_sec, 757 chk->sent_rcv_time.tv_usec, 758 SCTP_FR_T3_STOPPED); 759 } 760 continue; 761 } 762 } 763 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 764 /* Is it expired? */ 765 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 766 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 767 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 768 /* Yes so drop it */ 769 if (chk->data) { 770 (void)sctp_release_pr_sctp_chunk(stcb, 771 chk, 772 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 773 SCTP_SO_NOT_LOCKED); 774 cnt_abandoned++; 775 } 776 continue; 777 } 778 } 779 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 780 /* Has it been retransmitted tv_sec times? */ 781 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 782 if (chk->data) { 783 (void)sctp_release_pr_sctp_chunk(stcb, 784 chk, 785 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 786 SCTP_SO_NOT_LOCKED); 787 cnt_abandoned++; 788 } 789 continue; 790 } 791 } 792 if (chk->sent < SCTP_DATAGRAM_RESEND) { 793 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 794 num_mk++; 795 if (fir == 0) { 796 fir = 1; 797 tsnfirst = chk->rec.data.TSN_seq; 798 } 799 tsnlast = chk->rec.data.TSN_seq; 800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 801 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 802 0, SCTP_FR_T3_MARKED); 803 } 804 if (chk->rec.data.chunk_was_revoked) { 805 /* deflate the cwnd */ 806 chk->whoTo->cwnd -= chk->book_size; 807 chk->rec.data.chunk_was_revoked = 0; 808 } 809 net->marked_retrans++; 810 stcb->asoc.marked_retrans++; 811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 812 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 813 chk->whoTo->flight_size, 814 chk->book_size, 815 (uintptr_t) chk->whoTo, 816 chk->rec.data.TSN_seq); 817 } 818 sctp_flight_size_decrease(chk); 819 sctp_total_flight_decrease(stcb, chk); 820 stcb->asoc.peers_rwnd += chk->send_size; 821 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 822 } 823 chk->sent = SCTP_DATAGRAM_RESEND; 824 SCTP_STAT_INCR(sctps_markedretrans); 825 826 /* reset the TSN for striking and other FR stuff */ 827 chk->rec.data.doing_fast_retransmit = 0; 828 /* Clear any time so NO RTT is being done */ 829 chk->do_rtt = 0; 830 if (alt != net) { 831 sctp_free_remote_addr(chk->whoTo); 832 chk->no_fr_allowed = 1; 833 chk->whoTo = alt; 834 atomic_add_int(&alt->ref_count, 1); 835 } else { 836 chk->no_fr_allowed = 0; 837 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 838 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 839 } else { 840 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 841 } 842 } 843 /* 844 * CMT: Do not allow FRs on retransmitted TSNs. 845 */ 846 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) { 847 chk->no_fr_allowed = 1; 848 } 849 #ifdef THIS_SHOULD_NOT_BE_DONE 850 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 851 /* remember highest acked one */ 852 could_be_sent = chk; 853 #endif 854 } 855 if (chk->sent == SCTP_DATAGRAM_RESEND) { 856 cnt_mk++; 857 } 858 } 859 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 860 /* we did not subtract the same things? */ 861 audit_tf = 1; 862 } 863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 864 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 865 } 866 #ifdef SCTP_DEBUG 867 if (num_mk) { 868 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 869 tsnlast); 870 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 871 num_mk, (u_long)stcb->asoc.peers_rwnd); 872 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 873 tsnlast); 874 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 875 num_mk, 876 (int)stcb->asoc.peers_rwnd); 877 } 878 #endif 879 *num_marked = num_mk; 880 *num_abandoned = cnt_abandoned; 881 /* 882 * Now check for a ECN Echo that may be stranded And include the 883 * cnt_mk'd to have all resends in the control queue. 884 */ 885 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 886 if (chk->sent == SCTP_DATAGRAM_RESEND) { 887 cnt_mk++; 888 } 889 if ((chk->whoTo == net) && 890 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 891 sctp_free_remote_addr(chk->whoTo); 892 chk->whoTo = alt; 893 if (chk->sent != SCTP_DATAGRAM_RESEND) { 894 chk->sent = SCTP_DATAGRAM_RESEND; 895 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 896 cnt_mk++; 897 } 898 atomic_add_int(&alt->ref_count, 1); 899 } 900 } 901 #ifdef THIS_SHOULD_NOT_BE_DONE 902 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 903 /* fix it so we retransmit the highest acked anyway */ 904 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 905 cnt_mk++; 906 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 907 } 908 #endif 909 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 910 #ifdef INVARIANTS 911 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 912 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 913 #endif 914 #ifndef SCTP_AUDITING_ENABLED 915 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 916 #endif 917 } 918 if (audit_tf) { 919 SCTPDBG(SCTP_DEBUG_TIMER4, 920 "Audit total flight due to negative value net:%p\n", 921 net); 922 stcb->asoc.total_flight = 0; 923 stcb->asoc.total_flight_count = 0; 924 /* Clear all networks flight size */ 925 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 926 lnets->flight_size = 0; 927 SCTPDBG(SCTP_DEBUG_TIMER4, 928 "Net:%p c-f cwnd:%d ssthresh:%d\n", 929 lnets, lnets->cwnd, lnets->ssthresh); 930 } 931 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 932 if (chk->sent < SCTP_DATAGRAM_RESEND) { 933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 934 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 935 chk->whoTo->flight_size, 936 chk->book_size, 937 (uintptr_t) chk->whoTo, 938 chk->rec.data.TSN_seq); 939 } 940 sctp_flight_size_increase(chk); 941 sctp_total_flight_increase(stcb, chk); 942 } 943 } 944 } 945 /* 946 * Setup the ecn nonce re-sync point. We do this since 947 * retranmissions are NOT setup for ECN. This means that do to 948 * Karn's rule, we don't know the total of the peers ecn bits. 949 */ 950 chk = TAILQ_FIRST(&stcb->asoc.send_queue); 951 if (chk == NULL) { 952 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 953 } else { 954 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 955 } 956 stcb->asoc.nonce_wait_for_ecne = 0; 957 stcb->asoc.nonce_sum_check = 0; 958 /* We return 1 if we only have a window probe outstanding */ 959 return (0); 960 } 961 962 static void 963 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 964 struct sctp_nets *net, 965 struct sctp_nets *alt) 966 { 967 struct sctp_association *asoc; 968 struct sctp_stream_out *outs; 969 struct sctp_tmit_chunk *chk; 970 struct sctp_stream_queue_pending *sp; 971 972 if (net == alt) 973 /* nothing to do */ 974 return; 975 976 asoc = &stcb->asoc; 977 978 /* 979 * now through all the streams checking for chunks sent to our bad 980 * network. 981 */ 982 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 983 /* now clean up any chunks here */ 984 TAILQ_FOREACH(sp, &outs->outqueue, next) { 985 if (sp->net == net) { 986 sctp_free_remote_addr(sp->net); 987 sp->net = alt; 988 atomic_add_int(&alt->ref_count, 1); 989 } 990 } 991 } 992 /* Now check the pending queue */ 993 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 994 if (chk->whoTo == net) { 995 sctp_free_remote_addr(chk->whoTo); 996 chk->whoTo = alt; 997 atomic_add_int(&alt->ref_count, 1); 998 } 999 } 1000 1001 } 1002 1003 int 1004 sctp_t3rxt_timer(struct sctp_inpcb *inp, 1005 struct sctp_tcb *stcb, 1006 struct sctp_nets *net) 1007 { 1008 struct sctp_nets *alt; 1009 int win_probe, num_mk, num_abandoned; 1010 1011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1012 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 1013 } 1014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1015 struct sctp_nets *lnet; 1016 1017 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1018 if (net == lnet) { 1019 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 1020 } else { 1021 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 1022 } 1023 } 1024 } 1025 /* Find an alternate and mark those for retransmission */ 1026 if ((stcb->asoc.peers_rwnd == 0) && 1027 (stcb->asoc.total_flight < net->mtu)) { 1028 SCTP_STAT_INCR(sctps_timowindowprobe); 1029 win_probe = 1; 1030 } else { 1031 win_probe = 0; 1032 } 1033 1034 /* 1035 * JRS 5/14/07 - If CMT PF is on and the destination if not already 1036 * in PF state, set the destination to PF state and store the 1037 * current time as the time that the destination was last active. In 1038 * addition, find an alternate destination with PF-based 1039 * find_alt_net(). 1040 */ 1041 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 1042 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 1043 net->dest_state |= SCTP_ADDR_PF; 1044 net->last_active = sctp_get_tick_count(); 1045 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 1046 net); 1047 } 1048 alt = sctp_find_alternate_net(stcb, net, 2); 1049 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1050 /* 1051 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 1052 * used, then pick dest with largest ssthresh for any 1053 * retransmission. 1054 */ 1055 alt = net; 1056 alt = sctp_find_alternate_net(stcb, alt, 1); 1057 /* 1058 * CUCv2: If a different dest is picked for the 1059 * retransmission, then new (rtx-)pseudo_cumack needs to be 1060 * tracked for orig dest. Let CUCv2 track new (rtx-) 1061 * pseudo-cumack always. 1062 */ 1063 net->find_pseudo_cumack = 1; 1064 net->find_rtx_pseudo_cumack = 1; 1065 } else { /* CMT is OFF */ 1066 alt = sctp_find_alternate_net(stcb, net, 0); 1067 } 1068 num_mk = 0; 1069 num_abandoned = 0; 1070 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1071 &num_mk, &num_abandoned); 1072 /* FR Loss recovery just ended with the T3. */ 1073 stcb->asoc.fast_retran_loss_recovery = 0; 1074 1075 /* CMT FR loss recovery ended with the T3 */ 1076 net->fast_retran_loss_recovery = 0; 1077 1078 /* 1079 * setup the sat loss recovery that prevents satellite cwnd advance. 1080 */ 1081 stcb->asoc.sat_t3_loss_recovery = 1; 1082 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1083 1084 /* Backoff the timer and cwnd */ 1085 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1086 if (win_probe == 0) { 1087 /* We don't do normal threshold management on window probes */ 1088 if (sctp_threshold_management(inp, stcb, net, 1089 stcb->asoc.max_send_times)) { 1090 /* Association was destroyed */ 1091 return (1); 1092 } else { 1093 if (net != stcb->asoc.primary_destination) { 1094 /* send a immediate HB if our RTO is stale */ 1095 struct timeval now; 1096 unsigned int ms_goneby; 1097 1098 (void)SCTP_GETTIME_TIMEVAL(&now); 1099 if (net->last_sent_time.tv_sec) { 1100 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1101 } else { 1102 ms_goneby = 0; 1103 } 1104 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1105 /* 1106 * no recent feed back in an RTO or 1107 * more, request a RTT update 1108 */ 1109 if (sctp_send_hb(stcb, 1, net) < 0) 1110 /* 1111 * Less than 0 means we lost 1112 * the assoc 1113 */ 1114 return (1); 1115 } 1116 } 1117 } 1118 } else { 1119 /* 1120 * For a window probe we don't penalize the net's but only 1121 * the association. This may fail it if SACKs are not coming 1122 * back. If sack's are coming with rwnd locked at 0, we will 1123 * continue to hold things waiting for rwnd to raise 1124 */ 1125 if (sctp_threshold_management(inp, stcb, NULL, 1126 stcb->asoc.max_send_times)) { 1127 /* Association was destroyed */ 1128 return (1); 1129 } 1130 } 1131 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1132 /* Move all pending over too */ 1133 sctp_move_all_chunks_to_alt(stcb, net, alt); 1134 1135 /* 1136 * Get the address that failed, to force a new src address 1137 * selecton and a route allocation. 1138 */ 1139 if (net->ro._s_addr) { 1140 sctp_free_ifa(net->ro._s_addr); 1141 net->ro._s_addr = NULL; 1142 } 1143 net->src_addr_selected = 0; 1144 1145 /* Force a route allocation too */ 1146 if (net->ro.ro_rt) { 1147 RTFREE(net->ro.ro_rt); 1148 net->ro.ro_rt = NULL; 1149 } 1150 /* Was it our primary? */ 1151 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1152 /* 1153 * Yes, note it as such and find an alternate note: 1154 * this means HB code must use this to resent the 1155 * primary if it goes active AND if someone does a 1156 * change-primary then this flag must be cleared 1157 * from any net structures. 1158 */ 1159 if (sctp_set_primary_addr(stcb, 1160 (struct sockaddr *)NULL, 1161 alt) == 0) { 1162 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1163 } 1164 } 1165 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) { 1166 /* 1167 * JRS 5/14/07 - If the destination hasn't failed completely 1168 * but is in PF state, a PF-heartbeat needs to be sent 1169 * manually. 1170 */ 1171 if (sctp_send_hb(stcb, 1, net) < 0) 1172 /* Return less than 0 means we lost the association */ 1173 return (1); 1174 } 1175 /* 1176 * Special case for cookie-echo'ed case, we don't do output but must 1177 * await the COOKIE-ACK before retransmission 1178 */ 1179 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1180 /* 1181 * Here we just reset the timer and start again since we 1182 * have not established the asoc 1183 */ 1184 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1185 return (0); 1186 } 1187 if (stcb->asoc.peer_supports_prsctp) { 1188 struct sctp_tmit_chunk *lchk; 1189 1190 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1191 /* C3. See if we need to send a Fwd-TSN */ 1192 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 1193 stcb->asoc.last_acked_seq, MAX_TSN)) { 1194 /* 1195 * ISSUE with ECN, see FWD-TSN processing for notes 1196 * on issues that will occur when the ECN NONCE 1197 * stuff is put into SCTP for cross checking. 1198 */ 1199 send_forward_tsn(stcb, &stcb->asoc); 1200 if (lchk) { 1201 /* Assure a timer is up */ 1202 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1203 } 1204 } 1205 } 1206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1207 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1208 } 1209 return (0); 1210 } 1211 1212 int 1213 sctp_t1init_timer(struct sctp_inpcb *inp, 1214 struct sctp_tcb *stcb, 1215 struct sctp_nets *net) 1216 { 1217 /* bump the thresholds */ 1218 if (stcb->asoc.delayed_connection) { 1219 /* 1220 * special hook for delayed connection. The library did NOT 1221 * complete the rest of its sends. 1222 */ 1223 stcb->asoc.delayed_connection = 0; 1224 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1225 return (0); 1226 } 1227 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1228 return (0); 1229 } 1230 if (sctp_threshold_management(inp, stcb, net, 1231 stcb->asoc.max_init_times)) { 1232 /* Association was destroyed */ 1233 return (1); 1234 } 1235 stcb->asoc.dropped_special_cnt = 0; 1236 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1237 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1238 net->RTO = stcb->asoc.initial_init_rto_max; 1239 } 1240 if (stcb->asoc.numnets > 1) { 1241 /* If we have more than one addr use it */ 1242 struct sctp_nets *alt; 1243 1244 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1245 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1246 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1247 stcb->asoc.primary_destination = alt; 1248 } 1249 } 1250 /* Send out a new init */ 1251 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1252 return (0); 1253 } 1254 1255 /* 1256 * For cookie and asconf we actually need to find and mark for resend, then 1257 * increment the resend counter (after all the threshold management stuff of 1258 * course). 1259 */ 1260 int 1261 sctp_cookie_timer(struct sctp_inpcb *inp, 1262 struct sctp_tcb *stcb, 1263 struct sctp_nets *net) 1264 { 1265 struct sctp_nets *alt; 1266 struct sctp_tmit_chunk *cookie; 1267 1268 /* first before all else we must find the cookie */ 1269 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1270 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1271 break; 1272 } 1273 } 1274 if (cookie == NULL) { 1275 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1276 /* FOOBAR! */ 1277 struct mbuf *oper; 1278 1279 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1280 0, M_DONTWAIT, 1, MT_DATA); 1281 if (oper) { 1282 struct sctp_paramhdr *ph; 1283 uint32_t *ippp; 1284 1285 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1286 sizeof(uint32_t); 1287 ph = mtod(oper, struct sctp_paramhdr *); 1288 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1289 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1290 ippp = (uint32_t *) (ph + 1); 1291 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1292 } 1293 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1294 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1295 oper, SCTP_SO_NOT_LOCKED); 1296 } else { 1297 #ifdef INVARIANTS 1298 panic("Cookie timer expires in wrong state?"); 1299 #else 1300 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1301 return (0); 1302 #endif 1303 } 1304 return (0); 1305 } 1306 /* Ok we found the cookie, threshold management next */ 1307 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1308 stcb->asoc.max_init_times)) { 1309 /* Assoc is over */ 1310 return (1); 1311 } 1312 /* 1313 * cleared theshold management now lets backoff the address & select 1314 * an alternate 1315 */ 1316 stcb->asoc.dropped_special_cnt = 0; 1317 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1318 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1319 if (alt != cookie->whoTo) { 1320 sctp_free_remote_addr(cookie->whoTo); 1321 cookie->whoTo = alt; 1322 atomic_add_int(&alt->ref_count, 1); 1323 } 1324 /* Now mark the retran info */ 1325 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1326 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1327 } 1328 cookie->sent = SCTP_DATAGRAM_RESEND; 1329 /* 1330 * Now call the output routine to kick out the cookie again, Note we 1331 * don't mark any chunks for retran so that FR will need to kick in 1332 * to move these (or a send timer). 1333 */ 1334 return (0); 1335 } 1336 1337 int 1338 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1339 struct sctp_nets *net) 1340 { 1341 struct sctp_nets *alt; 1342 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1343 1344 if (stcb->asoc.stream_reset_outstanding == 0) { 1345 return (0); 1346 } 1347 /* find the existing STRRESET, we use the seq number we sent out on */ 1348 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1349 if (strrst == NULL) { 1350 return (0); 1351 } 1352 /* do threshold management */ 1353 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1354 stcb->asoc.max_send_times)) { 1355 /* Assoc is over */ 1356 return (1); 1357 } 1358 /* 1359 * cleared theshold management now lets backoff the address & select 1360 * an alternate 1361 */ 1362 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1363 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1364 sctp_free_remote_addr(strrst->whoTo); 1365 strrst->whoTo = alt; 1366 atomic_add_int(&alt->ref_count, 1); 1367 1368 /* See if a ECN Echo is also stranded */ 1369 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1370 if ((chk->whoTo == net) && 1371 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1372 sctp_free_remote_addr(chk->whoTo); 1373 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1374 chk->sent = SCTP_DATAGRAM_RESEND; 1375 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1376 } 1377 chk->whoTo = alt; 1378 atomic_add_int(&alt->ref_count, 1); 1379 } 1380 } 1381 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1382 /* 1383 * If the address went un-reachable, we need to move to 1384 * alternates for ALL chk's in queue 1385 */ 1386 sctp_move_all_chunks_to_alt(stcb, net, alt); 1387 } 1388 /* mark the retran info */ 1389 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1390 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1391 strrst->sent = SCTP_DATAGRAM_RESEND; 1392 1393 /* restart the timer */ 1394 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1395 return (0); 1396 } 1397 1398 int 1399 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1400 struct sctp_nets *net) 1401 { 1402 struct sctp_nets *alt; 1403 struct sctp_tmit_chunk *asconf, *chk, *nchk; 1404 1405 /* is this a first send, or a retransmission? */ 1406 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1407 /* compose a new ASCONF chunk and send it */ 1408 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1409 } else { 1410 /* 1411 * Retransmission of the existing ASCONF is needed 1412 */ 1413 1414 /* find the existing ASCONF */ 1415 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1416 if (asconf == NULL) { 1417 return (0); 1418 } 1419 /* do threshold management */ 1420 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1421 stcb->asoc.max_send_times)) { 1422 /* Assoc is over */ 1423 return (1); 1424 } 1425 if (asconf->snd_count > stcb->asoc.max_send_times) { 1426 /* 1427 * Something is rotten: our peer is not responding 1428 * to ASCONFs but apparently is to other chunks. 1429 * i.e. it is not properly handling the chunk type 1430 * upper bits. Mark this peer as ASCONF incapable 1431 * and cleanup. 1432 */ 1433 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1434 sctp_asconf_cleanup(stcb, net); 1435 return (0); 1436 } 1437 /* 1438 * cleared threshold management, so now backoff the net and 1439 * select an alternate 1440 */ 1441 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1442 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1443 if (asconf->whoTo != alt) { 1444 sctp_free_remote_addr(asconf->whoTo); 1445 asconf->whoTo = alt; 1446 atomic_add_int(&alt->ref_count, 1); 1447 } 1448 /* See if an ECN Echo is also stranded */ 1449 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1450 if ((chk->whoTo == net) && 1451 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1452 sctp_free_remote_addr(chk->whoTo); 1453 chk->whoTo = alt; 1454 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1455 chk->sent = SCTP_DATAGRAM_RESEND; 1456 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1457 } 1458 atomic_add_int(&alt->ref_count, 1); 1459 } 1460 } 1461 for (chk = asconf; chk; chk = nchk) { 1462 nchk = TAILQ_NEXT(chk, sctp_next); 1463 if (chk->whoTo != alt) { 1464 sctp_free_remote_addr(chk->whoTo); 1465 chk->whoTo = alt; 1466 atomic_add_int(&alt->ref_count, 1); 1467 } 1468 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1469 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1470 chk->sent = SCTP_DATAGRAM_RESEND; 1471 } 1472 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1473 /* 1474 * If the address went un-reachable, we need to move 1475 * to the alternate for ALL chunks in queue 1476 */ 1477 sctp_move_all_chunks_to_alt(stcb, net, alt); 1478 net = alt; 1479 } 1480 /* mark the retran info */ 1481 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1482 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1483 asconf->sent = SCTP_DATAGRAM_RESEND; 1484 1485 /* send another ASCONF if any and we can do */ 1486 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1487 } 1488 return (0); 1489 } 1490 1491 /* Mobility adaptation */ 1492 void 1493 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1494 struct sctp_nets *net) 1495 { 1496 if (stcb->asoc.deleted_primary == NULL) { 1497 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1498 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1499 return; 1500 } 1501 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1502 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1503 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1504 stcb->asoc.deleted_primary = NULL; 1505 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1506 return; 1507 } 1508 1509 /* 1510 * For the shutdown and shutdown-ack, we do not keep one around on the 1511 * control queue. This means we must generate a new one and call the general 1512 * chunk output routine, AFTER having done threshold management. 1513 */ 1514 int 1515 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1516 struct sctp_nets *net) 1517 { 1518 struct sctp_nets *alt; 1519 1520 /* first threshold managment */ 1521 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1522 /* Assoc is over */ 1523 return (1); 1524 } 1525 /* second select an alternative */ 1526 alt = sctp_find_alternate_net(stcb, net, 0); 1527 1528 /* third generate a shutdown into the queue for out net */ 1529 if (alt) { 1530 sctp_send_shutdown(stcb, alt); 1531 } else { 1532 /* 1533 * if alt is NULL, there is no dest to send to?? 1534 */ 1535 return (0); 1536 } 1537 /* fourth restart timer */ 1538 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1539 return (0); 1540 } 1541 1542 int 1543 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1544 struct sctp_nets *net) 1545 { 1546 struct sctp_nets *alt; 1547 1548 /* first threshold managment */ 1549 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1550 /* Assoc is over */ 1551 return (1); 1552 } 1553 /* second select an alternative */ 1554 alt = sctp_find_alternate_net(stcb, net, 0); 1555 1556 /* third generate a shutdown into the queue for out net */ 1557 sctp_send_shutdown_ack(stcb, alt); 1558 1559 /* fourth restart timer */ 1560 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1561 return (0); 1562 } 1563 1564 static void 1565 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1566 struct sctp_tcb *stcb) 1567 { 1568 struct sctp_stream_out *outs; 1569 struct sctp_stream_queue_pending *sp; 1570 unsigned int chks_in_queue = 0; 1571 int being_filled = 0; 1572 1573 /* 1574 * This function is ONLY called when the send/sent queues are empty. 1575 */ 1576 if ((stcb == NULL) || (inp == NULL)) 1577 return; 1578 1579 if (stcb->asoc.sent_queue_retran_cnt) { 1580 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1581 stcb->asoc.sent_queue_retran_cnt); 1582 stcb->asoc.sent_queue_retran_cnt = 0; 1583 } 1584 SCTP_TCB_SEND_LOCK(stcb); 1585 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1586 int i, cnt = 0; 1587 1588 /* Check to see if a spoke fell off the wheel */ 1589 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1590 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1591 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1592 cnt++; 1593 } 1594 } 1595 if (cnt) { 1596 /* yep, we lost a spoke or two */ 1597 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1598 } else { 1599 /* no spokes lost, */ 1600 stcb->asoc.total_output_queue_size = 0; 1601 } 1602 SCTP_TCB_SEND_UNLOCK(stcb); 1603 return; 1604 } 1605 SCTP_TCB_SEND_UNLOCK(stcb); 1606 /* Check to see if some data queued, if so report it */ 1607 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1608 if (!TAILQ_EMPTY(&outs->outqueue)) { 1609 TAILQ_FOREACH(sp, &outs->outqueue, next) { 1610 if (sp->msg_is_complete) 1611 being_filled++; 1612 chks_in_queue++; 1613 } 1614 } 1615 } 1616 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1617 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1618 stcb->asoc.stream_queue_cnt, chks_in_queue); 1619 } 1620 if (chks_in_queue) { 1621 /* call the output queue function */ 1622 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1623 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1624 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1625 /* 1626 * Probably should go in and make it go back through 1627 * and add fragments allowed 1628 */ 1629 if (being_filled == 0) { 1630 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1631 chks_in_queue); 1632 } 1633 } 1634 } else { 1635 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1636 (u_long)stcb->asoc.total_output_queue_size); 1637 stcb->asoc.total_output_queue_size = 0; 1638 } 1639 } 1640 1641 int 1642 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1643 struct sctp_nets *net, int cnt_of_unconf) 1644 { 1645 int ret; 1646 1647 if (net) { 1648 if (net->hb_responded == 0) { 1649 if (net->ro._s_addr) { 1650 /* 1651 * Invalidate the src address if we did not 1652 * get a response last time. 1653 */ 1654 sctp_free_ifa(net->ro._s_addr); 1655 net->ro._s_addr = NULL; 1656 net->src_addr_selected = 0; 1657 } 1658 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1659 } 1660 /* Zero PBA, if it needs it */ 1661 if (net->partial_bytes_acked) { 1662 net->partial_bytes_acked = 0; 1663 } 1664 } 1665 if ((stcb->asoc.total_output_queue_size > 0) && 1666 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1667 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1668 sctp_audit_stream_queues_for_size(inp, stcb); 1669 } 1670 /* Send a new HB, this will do threshold managment, pick a new dest */ 1671 if (cnt_of_unconf == 0) { 1672 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1673 return (1); 1674 } 1675 } else { 1676 /* 1677 * this will send out extra hb's up to maxburst if there are 1678 * any unconfirmed addresses. 1679 */ 1680 uint32_t cnt_sent = 0; 1681 1682 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1683 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1684 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1685 cnt_sent++; 1686 if (net->hb_responded == 0) { 1687 /* Did we respond last time? */ 1688 if (net->ro._s_addr) { 1689 sctp_free_ifa(net->ro._s_addr); 1690 net->ro._s_addr = NULL; 1691 net->src_addr_selected = 0; 1692 } 1693 } 1694 ret = sctp_send_hb(stcb, 1, net); 1695 if (ret < 0) 1696 return 1; 1697 else if (ret == 0) { 1698 break; 1699 } 1700 if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)) 1701 break; 1702 } 1703 } 1704 } 1705 return (0); 1706 } 1707 1708 int 1709 sctp_is_hb_timer_running(struct sctp_tcb *stcb) 1710 { 1711 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1712 /* its running */ 1713 return (1); 1714 } else { 1715 /* nope */ 1716 return (0); 1717 } 1718 } 1719 1720 int 1721 sctp_is_sack_timer_running(struct sctp_tcb *stcb) 1722 { 1723 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1724 /* its running */ 1725 return (1); 1726 } else { 1727 /* nope */ 1728 return (0); 1729 } 1730 } 1731 1732 #define SCTP_NUMBER_OF_MTU_SIZES 18 1733 static uint32_t mtu_sizes[] = { 1734 68, 1735 296, 1736 508, 1737 512, 1738 544, 1739 576, 1740 1006, 1741 1492, 1742 1500, 1743 1536, 1744 2002, 1745 2048, 1746 4352, 1747 4464, 1748 8166, 1749 17914, 1750 32000, 1751 65535 1752 }; 1753 1754 1755 static uint32_t 1756 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1757 { 1758 /* select another MTU that is just bigger than this one */ 1759 int i; 1760 1761 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1762 if (cur_mtu < mtu_sizes[i]) { 1763 /* no max_mtu is bigger than this one */ 1764 return (mtu_sizes[i]); 1765 } 1766 } 1767 /* here return the highest allowable */ 1768 return (cur_mtu); 1769 } 1770 1771 1772 void 1773 sctp_pathmtu_timer(struct sctp_inpcb *inp, 1774 struct sctp_tcb *stcb, 1775 struct sctp_nets *net) 1776 { 1777 uint32_t next_mtu, mtu; 1778 1779 next_mtu = sctp_getnext_mtu(inp, net->mtu); 1780 1781 if ((next_mtu > net->mtu) && (net->port == 0)) { 1782 if ((net->src_addr_selected == 0) || 1783 (net->ro._s_addr == NULL) || 1784 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1785 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1786 sctp_free_ifa(net->ro._s_addr); 1787 net->ro._s_addr = NULL; 1788 net->src_addr_selected = 0; 1789 } else if (net->ro._s_addr == NULL) { 1790 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1791 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1792 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1793 1794 /* KAME hack: embed scopeid */ 1795 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1796 } 1797 #endif 1798 1799 net->ro._s_addr = sctp_source_address_selection(inp, 1800 stcb, 1801 (sctp_route_t *) & net->ro, 1802 net, 0, stcb->asoc.vrf_id); 1803 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1804 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1805 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1806 1807 (void)sa6_recoverscope(sin6); 1808 } 1809 #endif /* INET6 */ 1810 } 1811 if (net->ro._s_addr) 1812 net->src_addr_selected = 1; 1813 } 1814 if (net->ro._s_addr) { 1815 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1816 if (net->port) { 1817 mtu -= sizeof(struct udphdr); 1818 } 1819 if (mtu > next_mtu) { 1820 net->mtu = next_mtu; 1821 } 1822 } 1823 } 1824 /* restart the timer */ 1825 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1826 } 1827 1828 void 1829 sctp_autoclose_timer(struct sctp_inpcb *inp, 1830 struct sctp_tcb *stcb, 1831 struct sctp_nets *net) 1832 { 1833 struct timeval tn, *tim_touse; 1834 struct sctp_association *asoc; 1835 int ticks_gone_by; 1836 1837 (void)SCTP_GETTIME_TIMEVAL(&tn); 1838 if (stcb->asoc.sctp_autoclose_ticks && 1839 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1840 /* Auto close is on */ 1841 asoc = &stcb->asoc; 1842 /* pick the time to use */ 1843 if (asoc->time_last_rcvd.tv_sec > 1844 asoc->time_last_sent.tv_sec) { 1845 tim_touse = &asoc->time_last_rcvd; 1846 } else { 1847 tim_touse = &asoc->time_last_sent; 1848 } 1849 /* Now has long enough transpired to autoclose? */ 1850 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1851 if ((ticks_gone_by > 0) && 1852 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1853 /* 1854 * autoclose time has hit, call the output routine, 1855 * which should do nothing just to be SURE we don't 1856 * have hanging data. We can then safely check the 1857 * queues and know that we are clear to send 1858 * shutdown 1859 */ 1860 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1861 /* Are we clean? */ 1862 if (TAILQ_EMPTY(&asoc->send_queue) && 1863 TAILQ_EMPTY(&asoc->sent_queue)) { 1864 /* 1865 * there is nothing queued to send, so I'm 1866 * done... 1867 */ 1868 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1869 /* only send SHUTDOWN 1st time thru */ 1870 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1871 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1872 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1873 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1874 } 1875 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1876 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1877 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1878 stcb->sctp_ep, stcb, 1879 asoc->primary_destination); 1880 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1881 stcb->sctp_ep, stcb, 1882 asoc->primary_destination); 1883 } 1884 } 1885 } else { 1886 /* 1887 * No auto close at this time, reset t-o to check 1888 * later 1889 */ 1890 int tmp; 1891 1892 /* fool the timer startup to use the time left */ 1893 tmp = asoc->sctp_autoclose_ticks; 1894 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1895 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1896 net); 1897 /* restore the real tick value */ 1898 asoc->sctp_autoclose_ticks = tmp; 1899 } 1900 } 1901 } 1902