1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <netinet/sctp_os.h> 34 #include <netinet/sctp_var.h> 35 #include <netinet/sctp_sysctl.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctp_header.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_output.h> 40 #include <netinet/sctp_input.h> 41 #include <netinet/sctp_indata.h> 42 #include <netinet/sctp_uio.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctp_dtrace_declare.h> 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 static void 51 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 52 { 53 struct sctp_association *assoc; 54 uint32_t cwnd_in_mtu; 55 56 assoc = &stcb->asoc; 57 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 58 if (cwnd_in_mtu == 0) { 59 /* Using 0 means that the value of RFC 4960 is used. */ 60 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 61 } else { 62 /* 63 * We take the minimum of the burst limit and the initial 64 * congestion window. 65 */ 66 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst)) 67 cwnd_in_mtu = assoc->max_burst; 68 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 69 } 70 if (stcb->asoc.sctp_cmt_on_off == 2) { 71 /* In case of resource pooling initialize appropriately */ 72 net->cwnd /= assoc->numnets; 73 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 74 net->cwnd = net->mtu - sizeof(struct sctphdr); 75 } 76 } 77 net->ssthresh = assoc->peers_rwnd; 78 79 SDT_PROBE(sctp, cwnd, net, init, 80 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 81 0, net->cwnd); 82 if (SCTP_BASE_SYSCTL(sctp_logging_level) & 83 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 84 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 85 } 86 } 87 88 static void 89 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 90 struct sctp_association *asoc) 91 { 92 struct sctp_nets *net; 93 uint32_t t_ssthresh, t_cwnd; 94 95 /* MT FIXME: Don't compute this over and over again */ 96 t_ssthresh = 0; 97 t_cwnd = 0; 98 if (asoc->sctp_cmt_on_off == 2) { 99 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 100 t_ssthresh += net->ssthresh; 101 t_cwnd += net->cwnd; 102 } 103 } 104 /*- 105 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 106 * (net->fast_retran_loss_recovery == 0))) 107 */ 108 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 109 if ((asoc->fast_retran_loss_recovery == 0) || 110 (asoc->sctp_cmt_on_off > 0)) { 111 /* out of a RFC2582 Fast recovery window? */ 112 if (net->net_ack > 0) { 113 /* 114 * per section 7.2.3, are there any 115 * destinations that had a fast retransmit 116 * to them. If so what we need to do is 117 * adjust ssthresh and cwnd. 118 */ 119 struct sctp_tmit_chunk *lchk; 120 int old_cwnd = net->cwnd; 121 122 if (asoc->sctp_cmt_on_off == 2) { 123 net->ssthresh = (uint32_t) (((uint64_t) 4 * 124 (uint64_t) net->mtu * 125 (uint64_t) net->ssthresh) / 126 (uint64_t) t_ssthresh); 127 if ((net->cwnd > t_cwnd / 2) && 128 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 129 net->ssthresh = net->cwnd - t_cwnd / 2; 130 } 131 if (net->ssthresh < net->mtu) { 132 net->ssthresh = net->mtu; 133 } 134 } else { 135 net->ssthresh = net->cwnd / 2; 136 if (net->ssthresh < (net->mtu * 2)) { 137 net->ssthresh = 2 * net->mtu; 138 } 139 } 140 net->cwnd = net->ssthresh; 141 SDT_PROBE(sctp, cwnd, net, fr, 142 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 143 old_cwnd, net->cwnd); 144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 145 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 146 SCTP_CWND_LOG_FROM_FR); 147 } 148 lchk = TAILQ_FIRST(&asoc->send_queue); 149 150 net->partial_bytes_acked = 0; 151 /* Turn on fast recovery window */ 152 asoc->fast_retran_loss_recovery = 1; 153 if (lchk == NULL) { 154 /* Mark end of the window */ 155 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 156 } else { 157 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 158 } 159 160 /* 161 * CMT fast recovery -- per destination 162 * recovery variable. 163 */ 164 net->fast_retran_loss_recovery = 1; 165 166 if (lchk == NULL) { 167 /* Mark end of the window */ 168 net->fast_recovery_tsn = asoc->sending_seq - 1; 169 } else { 170 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 171 } 172 173 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 174 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 175 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 176 stcb->sctp_ep, stcb, net); 177 } 178 } else if (net->net_ack > 0) { 179 /* 180 * Mark a peg that we WOULD have done a cwnd 181 * reduction but RFC2582 prevented this action. 182 */ 183 SCTP_STAT_INCR(sctps_fastretransinrtt); 184 } 185 } 186 } 187 188 189 /* RTCC Algoritm to limit growth of cwnd, return 190 * true if you want to NOT allow cwnd growth 191 */ 192 static int 193 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) 194 { 195 uint64_t bw_offset, rtt_offset, rtt, vtag, probepoint; 196 197 /*- 198 * Here we need to see if we want 199 * to limit cwnd growth due to increase 200 * in overall rtt but no increase in bw. 201 * We use the following table to figure 202 * out what we should do. When we return 203 * 0, cc update goes on as planned. If we 204 * return 1, then no cc update happens and cwnd 205 * stays where it is at. 206 * ---------------------------------- 207 * BW | RTT | Action 208 * ********************************* 209 * INC | INC | return 0 210 * ---------------------------------- 211 * INC | SAME | return 0 212 * ---------------------------------- 213 * INC | DECR | return 0 214 * ---------------------------------- 215 * SAME | INC | return 1 216 * ---------------------------------- 217 * SAME | SAME | return 1 218 * ---------------------------------- 219 * SAME | DECR | return 0 220 * ---------------------------------- 221 * DECR | INC | return 0 or 1 based on if we caused. 222 * ---------------------------------- 223 * DECR | SAME | return 0 224 * ---------------------------------- 225 * DECR | DECR | return 0 226 * ---------------------------------- 227 * 228 * We are a bit fuzz on what an increase or 229 * decrease is. For BW it is the same if 230 * it did not change within 1/64th. For 231 * RTT it stayed the same if it did not 232 * change within 1/32nd 233 */ 234 rtt = stcb->asoc.my_vtag; 235 vtag = (rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); 236 probepoint = (((uint64_t) net->cwnd) << 32); 237 rtt = net->rtt; 238 bw_offset = net->cc_mod.rtcc.lbw >> SCTP_BASE_SYSCTL(sctp_rttvar_bw); 239 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { 240 /* 241 * BW increased, so update and return 0, since all actions 242 * in our table say to do the normal CC update 243 */ 244 /* PROBE POINT 0 */ 245 SDT_PROBE(sctp, cwnd, net, rttvar, 246 vtag, 247 ((net->cc_mod.rtcc.lbw << 32) | nbw), 248 net->cc_mod.rtcc.lbw_rtt, 249 rtt, 250 probepoint); 251 net->cc_mod.rtcc.lbw = nbw; 252 net->cc_mod.rtcc.lbw_rtt = rtt; 253 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 254 return (0); 255 } 256 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); 257 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { 258 /* Bandwidth decreased. */ 259 if (rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 260 /* rtt increased */ 261 /* Did we add more */ 262 if (net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) { 263 /* We caused it maybe.. back off */ 264 /* PROBE POINT 1 */ 265 probepoint |= ((1 << 16) | 1); 266 SDT_PROBE(sctp, cwnd, net, rttvar, 267 vtag, 268 ((net->cc_mod.rtcc.lbw << 32) | nbw), 269 net->cc_mod.rtcc.lbw_rtt, 270 rtt, 271 probepoint); 272 273 net->cc_mod.rtcc.lbw = nbw; 274 net->cc_mod.rtcc.lbw_rtt = rtt; 275 net->cwnd = net->cc_mod.rtcc.cwnd_at_bw_set; 276 if (net->cc_mod.rtcc.ret_from_eq) { 277 /* 278 * Switch over to CA if we are less 279 * aggressive 280 */ 281 net->ssthresh = net->cwnd - 1; 282 net->partial_bytes_acked = 0; 283 } 284 return (1); 285 } 286 /* Probe point 2 */ 287 probepoint |= ((2 << 16) | 0); 288 SDT_PROBE(sctp, cwnd, net, rttvar, 289 vtag, 290 ((net->cc_mod.rtcc.lbw << 32) | nbw), 291 net->cc_mod.rtcc.lbw_rtt, 292 rtt, 293 probepoint); 294 295 /* Someone else - fight for more? */ 296 net->cc_mod.rtcc.lbw = nbw; 297 net->cc_mod.rtcc.lbw_rtt = rtt; 298 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 299 return (0); 300 } else if (rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 301 /* rtt decreased */ 302 /* Probe point 3 */ 303 probepoint |= ((3 << 16) | 0); 304 SDT_PROBE(sctp, cwnd, net, rttvar, 305 vtag, 306 ((net->cc_mod.rtcc.lbw << 32) | nbw), 307 net->cc_mod.rtcc.lbw_rtt, 308 rtt, 309 probepoint); 310 net->cc_mod.rtcc.lbw = nbw; 311 net->cc_mod.rtcc.lbw_rtt = rtt; 312 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 313 return (0); 314 } 315 /* The bw decreased but rtt stayed the same */ 316 net->cc_mod.rtcc.lbw = nbw; 317 net->cc_mod.rtcc.lbw_rtt = rtt; 318 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 319 /* Probe point 4 */ 320 probepoint |= ((4 << 16) | 0); 321 SDT_PROBE(sctp, cwnd, net, rttvar, 322 vtag, 323 ((net->cc_mod.rtcc.lbw << 32) | nbw), 324 net->cc_mod.rtcc.lbw_rtt, 325 rtt, 326 probepoint); 327 return (0); 328 } 329 /* 330 * If we reach here then we are in a situation where the bw stayed 331 * the same. 332 */ 333 if (rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 334 /* 335 * rtt increased we don't update bw.. so we don't update the 336 * rtt either. 337 */ 338 /* Probe point 5 */ 339 probepoint |= ((5 << 16) | 1); 340 SDT_PROBE(sctp, cwnd, net, rttvar, 341 vtag, 342 ((net->cc_mod.rtcc.lbw << 32) | nbw), 343 net->cc_mod.rtcc.lbw_rtt, 344 rtt, 345 probepoint); 346 return (1); 347 } 348 if (rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 349 /* 350 * rtt decreased, there could be more room. we update both 351 * the bw and the rtt here. 352 */ 353 /* Probe point 6 */ 354 probepoint |= ((6 << 16) | 0); 355 SDT_PROBE(sctp, cwnd, net, rttvar, 356 vtag, 357 ((net->cc_mod.rtcc.lbw << 32) | nbw), 358 net->cc_mod.rtcc.lbw_rtt, 359 rtt, 360 probepoint); 361 net->cc_mod.rtcc.lbw = nbw; 362 net->cc_mod.rtcc.lbw_rtt = rtt; 363 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 364 return (0); 365 } 366 /* 367 * Ok bw and rtt remained the same .. no update to any but save the 368 * latest cwnd. 369 */ 370 /* Probe point 7 */ 371 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); 372 SDT_PROBE(sctp, cwnd, net, rttvar, 373 vtag, 374 ((net->cc_mod.rtcc.lbw << 32) | nbw), 375 net->cc_mod.rtcc.lbw_rtt, 376 rtt, 377 probepoint); 378 return ((int)net->cc_mod.rtcc.ret_from_eq); 379 } 380 381 static void 382 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, 383 struct sctp_association *asoc, 384 int accum_moved, int reneged_all, int will_exit, int use_rtcc) 385 { 386 struct sctp_nets *net; 387 int old_cwnd; 388 uint32_t t_ssthresh, t_cwnd, incr; 389 390 /* MT FIXME: Don't compute this over and over again */ 391 t_ssthresh = 0; 392 t_cwnd = 0; 393 if (stcb->asoc.sctp_cmt_on_off == 2) { 394 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 395 t_ssthresh += net->ssthresh; 396 t_cwnd += net->cwnd; 397 } 398 } 399 /******************************/ 400 /* update cwnd and Early FR */ 401 /******************************/ 402 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 403 404 #ifdef JANA_CMT_FAST_RECOVERY 405 /* 406 * CMT fast recovery code. Need to debug. 407 */ 408 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 409 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 410 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 411 net->will_exit_fast_recovery = 1; 412 } 413 } 414 #endif 415 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 416 /* 417 * So, first of all do we need to have a Early FR 418 * timer running? 419 */ 420 if ((!TAILQ_EMPTY(&asoc->sent_queue) && 421 (net->ref_count > 1) && 422 (net->flight_size < net->cwnd)) || 423 (reneged_all)) { 424 /* 425 * yes, so in this case stop it if its 426 * running, and then restart it. Reneging 427 * all is a special case where we want to 428 * run the Early FR timer and then force the 429 * last few unacked to be sent, causing us 430 * to illicit a sack with gaps to force out 431 * the others. 432 */ 433 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 434 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 435 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 437 } 438 SCTP_STAT_INCR(sctps_earlyfrstrid); 439 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 440 } else { 441 /* No, stop it if its running */ 442 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 443 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 444 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 445 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 446 } 447 } 448 } 449 /* if nothing was acked on this destination skip it */ 450 if (net->net_ack == 0) { 451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 452 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 453 } 454 continue; 455 } 456 if (net->net_ack2 > 0) { 457 /* 458 * Karn's rule applies to clearing error count, this 459 * is optional. 460 */ 461 net->error_count = 0; 462 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 463 SCTP_ADDR_NOT_REACHABLE) { 464 /* addr came good */ 465 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 466 net->dest_state |= SCTP_ADDR_REACHABLE; 467 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 468 SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 469 /* now was it the primary? if so restore */ 470 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 471 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 472 } 473 } 474 /* 475 * JRS 5/14/07 - If CMT PF is on and the destination 476 * is in PF state, set the destination to active 477 * state and set the cwnd to one or two MTU's based 478 * on whether PF1 or PF2 is being used. 479 * 480 * Should we stop any running T3 timer here? 481 */ 482 if ((asoc->sctp_cmt_on_off > 0) && 483 (asoc->sctp_cmt_pf > 0) && 484 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 485 net->dest_state &= ~SCTP_ADDR_PF; 486 old_cwnd = net->cwnd; 487 net->cwnd = net->mtu * asoc->sctp_cmt_pf; 488 SDT_PROBE(sctp, cwnd, net, ack, 489 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 490 old_cwnd, net->cwnd); 491 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 492 net, net->cwnd); 493 /* 494 * Since the cwnd value is explicitly set, 495 * skip the code that updates the cwnd 496 * value. 497 */ 498 goto skip_cwnd_update; 499 } 500 } 501 #ifdef JANA_CMT_FAST_RECOVERY 502 /* 503 * CMT fast recovery code 504 */ 505 /* 506 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 507 * && net->will_exit_fast_recovery == 0) { @@@ Do something 508 * } else if (sctp_cmt_on_off == 0 && 509 * asoc->fast_retran_loss_recovery && will_exit == 0) { 510 */ 511 #endif 512 513 if (asoc->fast_retran_loss_recovery && 514 (will_exit == 0) && 515 (asoc->sctp_cmt_on_off == 0)) { 516 /* 517 * If we are in loss recovery we skip any cwnd 518 * update 519 */ 520 goto skip_cwnd_update; 521 } 522 /* 523 * Did any measurements go on for this network? 524 */ 525 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) { 526 uint64_t nbw; 527 528 /* 529 * At this point our bw_bytes has been updated by 530 * incoming sack information. 531 * 532 * But our bw may not yet be set. 533 * 534 */ 535 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) { 536 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000); 537 } else { 538 nbw = net->cc_mod.rtcc.bw_bytes; 539 } 540 if (net->cc_mod.rtcc.lbw) { 541 if (cc_bw_limit(stcb, net, nbw)) { 542 /* Hold here, no update */ 543 goto skip_cwnd_update; 544 } 545 } else { 546 uint64_t vtag, probepoint; 547 548 probepoint = (((uint64_t) net->cwnd) << 32); 549 probepoint |= ((0xa << 16) | 0); 550 vtag = (net->rtt << 32) | 551 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 552 (stcb->rport); 553 554 SDT_PROBE(sctp, cwnd, net, rttvar, 555 vtag, 556 nbw, 557 0, 558 net->rtt, 559 probepoint); 560 net->cc_mod.rtcc.lbw = nbw; 561 net->cc_mod.rtcc.lbw_rtt = net->rtt; 562 } 563 } 564 /* 565 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 566 * moved. 567 */ 568 if (accum_moved || 569 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 570 /* If the cumulative ack moved we can proceed */ 571 if (net->cwnd <= net->ssthresh) { 572 /* We are in slow start */ 573 if (net->flight_size + net->net_ack >= net->cwnd) { 574 old_cwnd = net->cwnd; 575 if (stcb->asoc.sctp_cmt_on_off == 2) { 576 uint32_t limit; 577 578 limit = (uint32_t) (((uint64_t) net->mtu * 579 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 580 (uint64_t) net->ssthresh) / 581 (uint64_t) t_ssthresh); 582 incr = (uint32_t) (((uint64_t) net->net_ack * 583 (uint64_t) net->ssthresh) / 584 (uint64_t) t_ssthresh); 585 if (incr > limit) { 586 incr = limit; 587 } 588 if (incr == 0) { 589 incr = 1; 590 } 591 } else { 592 incr = net->net_ack; 593 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) { 594 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable); 595 } 596 } 597 net->cwnd += incr; 598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 599 sctp_log_cwnd(stcb, net, incr, 600 SCTP_CWND_LOG_FROM_SS); 601 } 602 SDT_PROBE(sctp, cwnd, net, ack, 603 stcb->asoc.my_vtag, 604 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 605 net, 606 old_cwnd, net->cwnd); 607 } else { 608 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 609 sctp_log_cwnd(stcb, net, net->net_ack, 610 SCTP_CWND_LOG_NOADV_SS); 611 } 612 } 613 } else { 614 /* We are in congestion avoidance */ 615 /* 616 * Add to pba 617 */ 618 net->partial_bytes_acked += net->net_ack; 619 620 if ((net->flight_size + net->net_ack >= net->cwnd) && 621 (net->partial_bytes_acked >= net->cwnd)) { 622 net->partial_bytes_acked -= net->cwnd; 623 old_cwnd = net->cwnd; 624 if (asoc->sctp_cmt_on_off == 2) { 625 incr = (uint32_t) (((uint64_t) net->mtu * 626 (uint64_t) net->ssthresh) / 627 (uint64_t) t_ssthresh); 628 if (incr == 0) { 629 incr = 1; 630 } 631 } else { 632 incr = net->mtu; 633 } 634 net->cwnd += incr; 635 SDT_PROBE(sctp, cwnd, net, ack, 636 stcb->asoc.my_vtag, 637 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 638 net, 639 old_cwnd, net->cwnd); 640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 641 sctp_log_cwnd(stcb, net, net->mtu, 642 SCTP_CWND_LOG_FROM_CA); 643 } 644 } else { 645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 646 sctp_log_cwnd(stcb, net, net->net_ack, 647 SCTP_CWND_LOG_NOADV_CA); 648 } 649 } 650 } 651 } else { 652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 653 sctp_log_cwnd(stcb, net, net->mtu, 654 SCTP_CWND_LOG_NO_CUMACK); 655 } 656 } 657 skip_cwnd_update: 658 /* 659 * NOW, according to Karn's rule do we need to restore the 660 * RTO timer back? Check our net_ack2. If not set then we 661 * have a ambiguity.. i.e. all data ack'd was sent to more 662 * than one place. 663 */ 664 if (net->net_ack2) { 665 /* restore any doubled timers */ 666 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 667 if (net->RTO < stcb->asoc.minrto) { 668 net->RTO = stcb->asoc.minrto; 669 } 670 if (net->RTO > stcb->asoc.maxrto) { 671 net->RTO = stcb->asoc.maxrto; 672 } 673 } 674 } 675 } 676 677 static void 678 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 679 { 680 int old_cwnd = net->cwnd; 681 uint32_t t_ssthresh, t_cwnd; 682 683 /* MT FIXME: Don't compute this over and over again */ 684 t_ssthresh = 0; 685 t_cwnd = 0; 686 if (stcb->asoc.sctp_cmt_on_off == 2) { 687 struct sctp_nets *lnet; 688 689 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 690 t_ssthresh += lnet->ssthresh; 691 t_cwnd += lnet->cwnd; 692 } 693 net->ssthresh = (uint32_t) (((uint64_t) 4 * 694 (uint64_t) net->mtu * 695 (uint64_t) net->ssthresh) / 696 (uint64_t) t_ssthresh); 697 if ((net->cwnd > t_cwnd / 2) && 698 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 699 net->ssthresh = net->cwnd - t_cwnd / 2; 700 } 701 if (net->ssthresh < net->mtu) { 702 net->ssthresh = net->mtu; 703 } 704 } else { 705 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 706 } 707 net->cwnd = net->mtu; 708 net->partial_bytes_acked = 0; 709 SDT_PROBE(sctp, cwnd, net, to, 710 stcb->asoc.my_vtag, 711 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 712 net, 713 old_cwnd, net->cwnd); 714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 715 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 716 } 717 } 718 719 static void 720 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, 721 int in_window, int num_pkt_lost, int use_rtcc) 722 { 723 int old_cwnd = net->cwnd; 724 725 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { 726 /* Data center Congestion Control */ 727 if (in_window == 0) { 728 /* 729 * Go to CA with the cwnd at the point we sent the 730 * TSN that was marked with a CE. 731 */ 732 if (net->ecn_prev_cwnd < net->cwnd) { 733 /* Restore to prev cwnd */ 734 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost); 735 } else { 736 /* Just cut in 1/2 */ 737 net->cwnd /= 2; 738 } 739 /* Drop to CA */ 740 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); 741 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 742 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 743 } 744 } else { 745 /* 746 * Further tuning down required over the drastic 747 * orginal cut 748 */ 749 net->ssthresh -= (net->mtu * num_pkt_lost); 750 net->cwnd -= (net->mtu * num_pkt_lost); 751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 752 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 753 } 754 } 755 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 756 } else { 757 if (in_window == 0) { 758 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 759 net->ssthresh = net->cwnd / 2; 760 if (net->ssthresh < net->mtu) { 761 net->ssthresh = net->mtu; 762 /* 763 * here back off the timer as well, to slow 764 * us down 765 */ 766 net->RTO <<= 1; 767 } 768 net->cwnd = net->ssthresh; 769 SDT_PROBE(sctp, cwnd, net, ecn, 770 stcb->asoc.my_vtag, 771 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 772 net, 773 old_cwnd, net->cwnd); 774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 775 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 776 } 777 } 778 } 779 780 } 781 782 static void 783 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 784 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 785 uint32_t * bottle_bw, uint32_t * on_queue) 786 { 787 uint32_t bw_avail; 788 int rtt; 789 unsigned int incr; 790 int old_cwnd = net->cwnd; 791 792 /* need real RTT in msd for this calc */ 793 rtt = net->rtt / 1000; 794 /* get bottle neck bw */ 795 *bottle_bw = ntohl(cp->bottle_bw); 796 /* and whats on queue */ 797 *on_queue = ntohl(cp->current_onq); 798 /* 799 * adjust the on-queue if our flight is more it could be that the 800 * router has not yet gotten data "in-flight" to it 801 */ 802 if (*on_queue < net->flight_size) 803 *on_queue = net->flight_size; 804 /* calculate the available space */ 805 bw_avail = (*bottle_bw * rtt) / 1000; 806 if (bw_avail > *bottle_bw) { 807 /* 808 * Cap the growth to no more than the bottle neck. This can 809 * happen as RTT slides up due to queues. It also means if 810 * you have more than a 1 second RTT with a empty queue you 811 * will be limited to the bottle_bw per second no matter if 812 * other points have 1/2 the RTT and you could get more 813 * out... 814 */ 815 bw_avail = *bottle_bw; 816 } 817 if (*on_queue > bw_avail) { 818 /* 819 * No room for anything else don't allow anything else to be 820 * "added to the fire". 821 */ 822 int seg_inflight, seg_onqueue, my_portion; 823 824 net->partial_bytes_acked = 0; 825 826 /* how much are we over queue size? */ 827 incr = *on_queue - bw_avail; 828 if (stcb->asoc.seen_a_sack_this_pkt) { 829 /* 830 * undo any cwnd adjustment that the sack might have 831 * made 832 */ 833 net->cwnd = net->prev_cwnd; 834 } 835 /* Now how much of that is mine? */ 836 seg_inflight = net->flight_size / net->mtu; 837 seg_onqueue = *on_queue / net->mtu; 838 my_portion = (incr * seg_inflight) / seg_onqueue; 839 840 /* Have I made an adjustment already */ 841 if (net->cwnd > net->flight_size) { 842 /* 843 * for this flight I made an adjustment we need to 844 * decrease the portion by a share our previous 845 * adjustment. 846 */ 847 int diff_adj; 848 849 diff_adj = net->cwnd - net->flight_size; 850 if (diff_adj > my_portion) 851 my_portion = 0; 852 else 853 my_portion -= diff_adj; 854 } 855 /* 856 * back down to the previous cwnd (assume we have had a sack 857 * before this packet). minus what ever portion of the 858 * overage is my fault. 859 */ 860 net->cwnd -= my_portion; 861 862 /* we will NOT back down more than 1 MTU */ 863 if (net->cwnd <= net->mtu) { 864 net->cwnd = net->mtu; 865 } 866 /* force into CA */ 867 net->ssthresh = net->cwnd - 1; 868 } else { 869 /* 870 * Take 1/4 of the space left or max burst up .. whichever 871 * is less. 872 */ 873 incr = (bw_avail - *on_queue) >> 2; 874 if ((stcb->asoc.max_burst > 0) && 875 (stcb->asoc.max_burst * net->mtu < incr)) { 876 incr = stcb->asoc.max_burst * net->mtu; 877 } 878 net->cwnd += incr; 879 } 880 if (net->cwnd > bw_avail) { 881 /* We can't exceed the pipe size */ 882 net->cwnd = bw_avail; 883 } 884 if (net->cwnd < net->mtu) { 885 /* We always have 1 MTU */ 886 net->cwnd = net->mtu; 887 } 888 if (net->cwnd - old_cwnd != 0) { 889 /* log only changes */ 890 SDT_PROBE(sctp, cwnd, net, pd, 891 stcb->asoc.my_vtag, 892 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 893 net, 894 old_cwnd, net->cwnd); 895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 896 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 897 SCTP_CWND_LOG_FROM_SAT); 898 } 899 } 900 } 901 902 static void 903 sctp_cwnd_update_after_output(struct sctp_tcb *stcb, 904 struct sctp_nets *net, int burst_limit) 905 { 906 int old_cwnd = net->cwnd; 907 908 if (net->ssthresh < net->cwnd) 909 net->ssthresh = net->cwnd; 910 if (burst_limit) { 911 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 912 SDT_PROBE(sctp, cwnd, net, bl, 913 stcb->asoc.my_vtag, 914 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 915 net, 916 old_cwnd, net->cwnd); 917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 918 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 919 } 920 } 921 } 922 923 static void 924 sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp, 925 struct sctp_tcb *stcb, struct sctp_nets *net) 926 { 927 int old_cwnd = net->cwnd; 928 929 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 930 /* 931 * make a small adjustment to cwnd and force to CA. 932 */ 933 if (net->cwnd > net->mtu) 934 /* drop down one MTU after sending */ 935 net->cwnd -= net->mtu; 936 if (net->cwnd < net->ssthresh) 937 /* still in SS move to CA */ 938 net->ssthresh = net->cwnd - 1; 939 SDT_PROBE(sctp, cwnd, net, fr, 940 stcb->asoc.my_vtag, 941 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 942 net, 943 old_cwnd, net->cwnd); 944 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 945 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR); 946 } 947 } 948 949 static void 950 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 951 struct sctp_association *asoc, 952 int accum_moved, int reneged_all, int will_exit) 953 { 954 /* Passing a zero argument in last disables the rtcc algoritm */ 955 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); 956 } 957 958 static void 959 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 960 int in_window, int num_pkt_lost) 961 { 962 /* Passing a zero argument in last disables the rtcc algoritm */ 963 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); 964 } 965 966 /* Here starts the RTCCVAR type CC invented by RRS which 967 * is a slight mod to RFC2581. We reuse a common routine or 968 * two since these algoritms are so close and need to 969 * remain the same. 970 */ 971 static void 972 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 973 int in_window, int num_pkt_lost) 974 { 975 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); 976 } 977 978 979 static 980 void 981 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, 982 struct sctp_tmit_chunk *tp1) 983 { 984 net->cc_mod.rtcc.bw_bytes += tp1->send_size; 985 } 986 987 static void 988 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb, 989 struct sctp_nets *net) 990 { 991 if (net->cc_mod.rtcc.tls_needs_set > 0) { 992 /* We had a bw measurment going on */ 993 struct timeval ltls; 994 995 SCTP_GETPTIME_TIMEVAL(<ls); 996 timevalsub(<ls, &net->cc_mod.rtcc.tls); 997 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec; 998 } 999 } 1000 1001 static void 1002 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, 1003 struct sctp_nets *net) 1004 { 1005 uint64_t vtag, probepoint; 1006 1007 if (net->cc_mod.rtcc.lbw) { 1008 /* Clear the old bw.. we went to 0 in-flight */ 1009 vtag = (net->rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1010 (stcb->rport); 1011 probepoint = (((uint64_t) net->cwnd) << 32); 1012 /* Probe point 8 */ 1013 probepoint |= ((8 << 16) | 0); 1014 SDT_PROBE(sctp, cwnd, net, rttvar, 1015 vtag, 1016 ((net->cc_mod.rtcc.lbw << 32) | 0), 1017 net->cc_mod.rtcc.lbw_rtt, 1018 0, 1019 probepoint); 1020 net->cc_mod.rtcc.lbw_rtt = 0; 1021 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1022 net->cc_mod.rtcc.lbw = 0; 1023 net->cc_mod.rtcc.bw_tot_time = 0; 1024 net->cc_mod.rtcc.bw_bytes = 0; 1025 net->cc_mod.rtcc.tls_needs_set = 0; 1026 if (net->cc_mod.rtcc.ret_from_eq) { 1027 /* less aggressive one - reset cwnd too */ 1028 uint32_t cwnd_in_mtu, cwnd; 1029 1030 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 1031 if (cwnd_in_mtu == 0) { 1032 /* 1033 * Using 0 means that the value of RFC 4960 1034 * is used. 1035 */ 1036 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1037 } else { 1038 /* 1039 * We take the minimum of the burst limit 1040 * and the initial congestion window. 1041 */ 1042 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst)) 1043 cwnd_in_mtu = stcb->asoc.max_burst; 1044 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 1045 } 1046 if (net->cwnd > cwnd) { 1047 /* 1048 * Only set if we are not a timeout (i.e. 1049 * down to 1 mtu) 1050 */ 1051 net->cwnd = cwnd; 1052 } 1053 } 1054 } 1055 } 1056 1057 static void 1058 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, 1059 struct sctp_nets *net) 1060 { 1061 uint64_t vtag, probepoint; 1062 1063 sctp_set_initial_cc_param(stcb, net); 1064 stcb->asoc.use_precise_time = 1; 1065 probepoint = (((uint64_t) net->cwnd) << 32); 1066 probepoint |= ((9 << 16) | 0); 1067 vtag = (net->rtt << 32) | 1068 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1069 (stcb->rport); 1070 SDT_PROBE(sctp, cwnd, net, rttvar, 1071 vtag, 1072 0, 1073 0, 1074 0, 1075 probepoint); 1076 net->cc_mod.rtcc.lbw_rtt = 0; 1077 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1078 net->cc_mod.rtcc.lbw = 0; 1079 net->cc_mod.rtcc.bw_tot_time = 0; 1080 net->cc_mod.rtcc.bw_bytes = 0; 1081 net->cc_mod.rtcc.tls_needs_set = 0; 1082 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret); 1083 } 1084 1085 static int 1086 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, 1087 struct sctp_cc_option *cc_opt) 1088 { 1089 struct sctp_nets *net; 1090 1091 if (setorget == 1) { 1092 /* a set */ 1093 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1094 if ((cc_opt->aid_value.assoc_value != 0) && 1095 (cc_opt->aid_value.assoc_value != 1)) { 1096 return (EINVAL); 1097 } 1098 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1099 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value; 1100 } 1101 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1102 if ((cc_opt->aid_value.assoc_value != 0) && 1103 (cc_opt->aid_value.assoc_value != 1)) { 1104 return (EINVAL); 1105 } 1106 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1107 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value; 1108 } 1109 } else { 1110 return (EINVAL); 1111 } 1112 } else { 1113 /* a get */ 1114 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1115 net = TAILQ_FIRST(&stcb->asoc.nets); 1116 if (net == NULL) { 1117 return (EFAULT); 1118 } 1119 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq; 1120 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1121 net = TAILQ_FIRST(&stcb->asoc.nets); 1122 if (net == NULL) { 1123 return (EFAULT); 1124 } 1125 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn; 1126 } else { 1127 return (EINVAL); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 static void 1134 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb, 1135 struct sctp_nets *net) 1136 { 1137 if (net->cc_mod.rtcc.tls_needs_set == 0) { 1138 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls); 1139 net->cc_mod.rtcc.tls_needs_set = 2; 1140 } 1141 } 1142 1143 static void 1144 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, 1145 struct sctp_association *asoc, 1146 int accum_moved, int reneged_all, int will_exit) 1147 { 1148 /* Passing a one argument at the last enables the rtcc algoritm */ 1149 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); 1150 } 1151 1152 1153 /* Here starts Sally Floyds HS-TCP */ 1154 1155 struct sctp_hs_raise_drop { 1156 int32_t cwnd; 1157 int32_t increase; 1158 int32_t drop_percent; 1159 }; 1160 1161 #define SCTP_HS_TABLE_SIZE 73 1162 1163 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 1164 {38, 1, 50}, /* 0 */ 1165 {118, 2, 44}, /* 1 */ 1166 {221, 3, 41}, /* 2 */ 1167 {347, 4, 38}, /* 3 */ 1168 {495, 5, 37}, /* 4 */ 1169 {663, 6, 35}, /* 5 */ 1170 {851, 7, 34}, /* 6 */ 1171 {1058, 8, 33}, /* 7 */ 1172 {1284, 9, 32}, /* 8 */ 1173 {1529, 10, 31}, /* 9 */ 1174 {1793, 11, 30}, /* 10 */ 1175 {2076, 12, 29}, /* 11 */ 1176 {2378, 13, 28}, /* 12 */ 1177 {2699, 14, 28}, /* 13 */ 1178 {3039, 15, 27}, /* 14 */ 1179 {3399, 16, 27}, /* 15 */ 1180 {3778, 17, 26}, /* 16 */ 1181 {4177, 18, 26}, /* 17 */ 1182 {4596, 19, 25}, /* 18 */ 1183 {5036, 20, 25}, /* 19 */ 1184 {5497, 21, 24}, /* 20 */ 1185 {5979, 22, 24}, /* 21 */ 1186 {6483, 23, 23}, /* 22 */ 1187 {7009, 24, 23}, /* 23 */ 1188 {7558, 25, 22}, /* 24 */ 1189 {8130, 26, 22}, /* 25 */ 1190 {8726, 27, 22}, /* 26 */ 1191 {9346, 28, 21}, /* 27 */ 1192 {9991, 29, 21}, /* 28 */ 1193 {10661, 30, 21}, /* 29 */ 1194 {11358, 31, 20}, /* 30 */ 1195 {12082, 32, 20}, /* 31 */ 1196 {12834, 33, 20}, /* 32 */ 1197 {13614, 34, 19}, /* 33 */ 1198 {14424, 35, 19}, /* 34 */ 1199 {15265, 36, 19}, /* 35 */ 1200 {16137, 37, 19}, /* 36 */ 1201 {17042, 38, 18}, /* 37 */ 1202 {17981, 39, 18}, /* 38 */ 1203 {18955, 40, 18}, /* 39 */ 1204 {19965, 41, 17}, /* 40 */ 1205 {21013, 42, 17}, /* 41 */ 1206 {22101, 43, 17}, /* 42 */ 1207 {23230, 44, 17}, /* 43 */ 1208 {24402, 45, 16}, /* 44 */ 1209 {25618, 46, 16}, /* 45 */ 1210 {26881, 47, 16}, /* 46 */ 1211 {28193, 48, 16}, /* 47 */ 1212 {29557, 49, 15}, /* 48 */ 1213 {30975, 50, 15}, /* 49 */ 1214 {32450, 51, 15}, /* 50 */ 1215 {33986, 52, 15}, /* 51 */ 1216 {35586, 53, 14}, /* 52 */ 1217 {37253, 54, 14}, /* 53 */ 1218 {38992, 55, 14}, /* 54 */ 1219 {40808, 56, 14}, /* 55 */ 1220 {42707, 57, 13}, /* 56 */ 1221 {44694, 58, 13}, /* 57 */ 1222 {46776, 59, 13}, /* 58 */ 1223 {48961, 60, 13}, /* 59 */ 1224 {51258, 61, 13}, /* 60 */ 1225 {53677, 62, 12}, /* 61 */ 1226 {56230, 63, 12}, /* 62 */ 1227 {58932, 64, 12}, /* 63 */ 1228 {61799, 65, 12}, /* 64 */ 1229 {64851, 66, 11}, /* 65 */ 1230 {68113, 67, 11}, /* 66 */ 1231 {71617, 68, 11}, /* 67 */ 1232 {75401, 69, 10}, /* 68 */ 1233 {79517, 70, 10}, /* 69 */ 1234 {84035, 71, 10}, /* 70 */ 1235 {89053, 72, 10}, /* 71 */ 1236 {94717, 73, 9} /* 72 */ 1237 }; 1238 1239 static void 1240 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 1241 { 1242 int cur_val, i, indx, incr; 1243 1244 cur_val = net->cwnd >> 10; 1245 indx = SCTP_HS_TABLE_SIZE - 1; 1246 #ifdef SCTP_DEBUG 1247 printf("HS CC CAlled.\n"); 1248 #endif 1249 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1250 /* normal mode */ 1251 if (net->net_ack > net->mtu) { 1252 net->cwnd += net->mtu; 1253 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1254 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 1255 } 1256 } else { 1257 net->cwnd += net->net_ack; 1258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1259 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 1260 } 1261 } 1262 } else { 1263 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 1264 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 1265 indx = i; 1266 break; 1267 } 1268 } 1269 net->last_hs_used = indx; 1270 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 1271 net->cwnd += incr; 1272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1273 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 1274 } 1275 } 1276 } 1277 1278 static void 1279 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 1280 { 1281 int cur_val, i, indx; 1282 int old_cwnd = net->cwnd; 1283 1284 cur_val = net->cwnd >> 10; 1285 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1286 /* normal mode */ 1287 net->ssthresh = net->cwnd / 2; 1288 if (net->ssthresh < (net->mtu * 2)) { 1289 net->ssthresh = 2 * net->mtu; 1290 } 1291 net->cwnd = net->ssthresh; 1292 } else { 1293 /* drop by the proper amount */ 1294 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 1295 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 1296 net->cwnd = net->ssthresh; 1297 /* now where are we */ 1298 indx = net->last_hs_used; 1299 cur_val = net->cwnd >> 10; 1300 /* reset where we are in the table */ 1301 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1302 /* feel out of hs */ 1303 net->last_hs_used = 0; 1304 } else { 1305 for (i = indx; i >= 1; i--) { 1306 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 1307 break; 1308 } 1309 } 1310 net->last_hs_used = indx; 1311 } 1312 } 1313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1314 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 1315 } 1316 } 1317 1318 static void 1319 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 1320 struct sctp_association *asoc) 1321 { 1322 struct sctp_nets *net; 1323 1324 /* 1325 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 1326 * (net->fast_retran_loss_recovery == 0))) 1327 */ 1328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1329 if ((asoc->fast_retran_loss_recovery == 0) || 1330 (asoc->sctp_cmt_on_off > 0)) { 1331 /* out of a RFC2582 Fast recovery window? */ 1332 if (net->net_ack > 0) { 1333 /* 1334 * per section 7.2.3, are there any 1335 * destinations that had a fast retransmit 1336 * to them. If so what we need to do is 1337 * adjust ssthresh and cwnd. 1338 */ 1339 struct sctp_tmit_chunk *lchk; 1340 1341 sctp_hs_cwnd_decrease(stcb, net); 1342 1343 lchk = TAILQ_FIRST(&asoc->send_queue); 1344 1345 net->partial_bytes_acked = 0; 1346 /* Turn on fast recovery window */ 1347 asoc->fast_retran_loss_recovery = 1; 1348 if (lchk == NULL) { 1349 /* Mark end of the window */ 1350 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1351 } else { 1352 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1353 } 1354 1355 /* 1356 * CMT fast recovery -- per destination 1357 * recovery variable. 1358 */ 1359 net->fast_retran_loss_recovery = 1; 1360 1361 if (lchk == NULL) { 1362 /* Mark end of the window */ 1363 net->fast_recovery_tsn = asoc->sending_seq - 1; 1364 } else { 1365 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1366 } 1367 1368 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1369 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 1370 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1371 stcb->sctp_ep, stcb, net); 1372 } 1373 } else if (net->net_ack > 0) { 1374 /* 1375 * Mark a peg that we WOULD have done a cwnd 1376 * reduction but RFC2582 prevented this action. 1377 */ 1378 SCTP_STAT_INCR(sctps_fastretransinrtt); 1379 } 1380 } 1381 } 1382 1383 static void 1384 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 1385 struct sctp_association *asoc, 1386 int accum_moved, int reneged_all, int will_exit) 1387 { 1388 struct sctp_nets *net; 1389 1390 /******************************/ 1391 /* update cwnd and Early FR */ 1392 /******************************/ 1393 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1394 1395 #ifdef JANA_CMT_FAST_RECOVERY 1396 /* 1397 * CMT fast recovery code. Need to debug. 1398 */ 1399 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1400 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1401 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1402 net->will_exit_fast_recovery = 1; 1403 } 1404 } 1405 #endif 1406 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 1407 /* 1408 * So, first of all do we need to have a Early FR 1409 * timer running? 1410 */ 1411 if ((!TAILQ_EMPTY(&asoc->sent_queue) && 1412 (net->ref_count > 1) && 1413 (net->flight_size < net->cwnd)) || 1414 (reneged_all)) { 1415 /* 1416 * yes, so in this case stop it if its 1417 * running, and then restart it. Reneging 1418 * all is a special case where we want to 1419 * run the Early FR timer and then force the 1420 * last few unacked to be sent, causing us 1421 * to illicit a sack with gaps to force out 1422 * the others. 1423 */ 1424 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1425 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 1426 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1427 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 1428 } 1429 SCTP_STAT_INCR(sctps_earlyfrstrid); 1430 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 1431 } else { 1432 /* No, stop it if its running */ 1433 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1434 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 1435 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 1437 } 1438 } 1439 } 1440 /* if nothing was acked on this destination skip it */ 1441 if (net->net_ack == 0) { 1442 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1443 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1444 } 1445 continue; 1446 } 1447 if (net->net_ack2 > 0) { 1448 /* 1449 * Karn's rule applies to clearing error count, this 1450 * is optional. 1451 */ 1452 net->error_count = 0; 1453 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 1454 SCTP_ADDR_NOT_REACHABLE) { 1455 /* addr came good */ 1456 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 1457 net->dest_state |= SCTP_ADDR_REACHABLE; 1458 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 1459 SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 1460 /* now was it the primary? if so restore */ 1461 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 1462 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 1463 } 1464 } 1465 /* 1466 * JRS 5/14/07 - If CMT PF is on and the destination 1467 * is in PF state, set the destination to active 1468 * state and set the cwnd to one or two MTU's based 1469 * on whether PF1 or PF2 is being used. 1470 * 1471 * Should we stop any running T3 timer here? 1472 */ 1473 if ((asoc->sctp_cmt_on_off > 0) && 1474 (asoc->sctp_cmt_pf > 0) && 1475 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1476 net->dest_state &= ~SCTP_ADDR_PF; 1477 net->cwnd = net->mtu * asoc->sctp_cmt_pf; 1478 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 1479 net, net->cwnd); 1480 /* 1481 * Since the cwnd value is explicitly set, 1482 * skip the code that updates the cwnd 1483 * value. 1484 */ 1485 goto skip_cwnd_update; 1486 } 1487 } 1488 #ifdef JANA_CMT_FAST_RECOVERY 1489 /* 1490 * CMT fast recovery code 1491 */ 1492 /* 1493 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1494 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1495 * } else if (sctp_cmt_on_off == 0 && 1496 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1497 */ 1498 #endif 1499 1500 if (asoc->fast_retran_loss_recovery && 1501 (will_exit == 0) && 1502 (asoc->sctp_cmt_on_off == 0)) { 1503 /* 1504 * If we are in loss recovery we skip any cwnd 1505 * update 1506 */ 1507 goto skip_cwnd_update; 1508 } 1509 /* 1510 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1511 * moved. 1512 */ 1513 if (accum_moved || 1514 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1515 /* If the cumulative ack moved we can proceed */ 1516 if (net->cwnd <= net->ssthresh) { 1517 /* We are in slow start */ 1518 if (net->flight_size + net->net_ack >= net->cwnd) { 1519 1520 sctp_hs_cwnd_increase(stcb, net); 1521 1522 } else { 1523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1524 sctp_log_cwnd(stcb, net, net->net_ack, 1525 SCTP_CWND_LOG_NOADV_SS); 1526 } 1527 } 1528 } else { 1529 /* We are in congestion avoidance */ 1530 net->partial_bytes_acked += net->net_ack; 1531 if ((net->flight_size + net->net_ack >= net->cwnd) && 1532 (net->partial_bytes_acked >= net->cwnd)) { 1533 net->partial_bytes_acked -= net->cwnd; 1534 net->cwnd += net->mtu; 1535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1536 sctp_log_cwnd(stcb, net, net->mtu, 1537 SCTP_CWND_LOG_FROM_CA); 1538 } 1539 } else { 1540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1541 sctp_log_cwnd(stcb, net, net->net_ack, 1542 SCTP_CWND_LOG_NOADV_CA); 1543 } 1544 } 1545 } 1546 } else { 1547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1548 sctp_log_cwnd(stcb, net, net->mtu, 1549 SCTP_CWND_LOG_NO_CUMACK); 1550 } 1551 } 1552 skip_cwnd_update: 1553 /* 1554 * NOW, according to Karn's rule do we need to restore the 1555 * RTO timer back? Check our net_ack2. If not set then we 1556 * have a ambiguity.. i.e. all data ack'd was sent to more 1557 * than one place. 1558 */ 1559 if (net->net_ack2) { 1560 /* restore any doubled timers */ 1561 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 1562 if (net->RTO < stcb->asoc.minrto) { 1563 net->RTO = stcb->asoc.minrto; 1564 } 1565 if (net->RTO > stcb->asoc.maxrto) { 1566 net->RTO = stcb->asoc.maxrto; 1567 } 1568 } 1569 } 1570 } 1571 1572 1573 /* 1574 * H-TCP congestion control. The algorithm is detailed in: 1575 * R.N.Shorten, D.J.Leith: 1576 * "H-TCP: TCP for high-speed and long-distance networks" 1577 * Proc. PFLDnet, Argonne, 2004. 1578 * http://www.hamilton.ie/net/htcp3.pdf 1579 */ 1580 1581 1582 static int use_rtt_scaling = 1; 1583 static int use_bandwidth_switch = 1; 1584 1585 static inline int 1586 between(uint32_t seq1, uint32_t seq2, uint32_t seq3) 1587 { 1588 return seq3 - seq2 >= seq1 - seq2; 1589 } 1590 1591 static inline uint32_t 1592 htcp_cong_time(struct htcp *ca) 1593 { 1594 return sctp_get_tick_count() - ca->last_cong; 1595 } 1596 1597 static inline uint32_t 1598 htcp_ccount(struct htcp *ca) 1599 { 1600 return htcp_cong_time(ca) / ca->minRTT; 1601 } 1602 1603 static inline void 1604 htcp_reset(struct htcp *ca) 1605 { 1606 ca->undo_last_cong = ca->last_cong; 1607 ca->undo_maxRTT = ca->maxRTT; 1608 ca->undo_old_maxB = ca->old_maxB; 1609 ca->last_cong = sctp_get_tick_count(); 1610 } 1611 1612 #ifdef SCTP_NOT_USED 1613 1614 static uint32_t 1615 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 1616 { 1617 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong; 1618 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT; 1619 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB; 1620 return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu); 1621 } 1622 1623 #endif 1624 1625 static inline void 1626 measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net) 1627 { 1628 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT; 1629 1630 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1631 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT) 1632 net->cc_mod.htcp_ca.minRTT = srtt; 1633 1634 /* max RTT */ 1635 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { 1636 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) 1637 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; 1638 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20)) 1639 net->cc_mod.htcp_ca.maxRTT = srtt; 1640 } 1641 } 1642 1643 static void 1644 measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net) 1645 { 1646 uint32_t now = sctp_get_tick_count(); 1647 1648 if (net->fast_retran_ip == 0) 1649 net->cc_mod.htcp_ca.bytes_acked = net->net_ack; 1650 1651 if (!use_bandwidth_switch) 1652 return; 1653 1654 /* achieved throughput calculations */ 1655 /* JRS - not 100% sure of this statement */ 1656 if (net->fast_retran_ip == 1) { 1657 net->cc_mod.htcp_ca.bytecount = 0; 1658 net->cc_mod.htcp_ca.lasttime = now; 1659 return; 1660 } 1661 net->cc_mod.htcp_ca.bytecount += net->net_ack; 1662 1663 if (net->cc_mod.htcp_ca.bytecount >= net->cwnd - ((net->cc_mod.htcp_ca.alpha >> 7 ? : 1) * net->mtu) 1664 && now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT 1665 && net->cc_mod.htcp_ca.minRTT > 0) { 1666 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime); 1667 1668 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) { 1669 /* just after backoff */ 1670 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi; 1671 } else { 1672 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4; 1673 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB) 1674 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi; 1675 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB) 1676 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB; 1677 } 1678 net->cc_mod.htcp_ca.bytecount = 0; 1679 net->cc_mod.htcp_ca.lasttime = now; 1680 } 1681 } 1682 1683 static inline void 1684 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1685 { 1686 if (use_bandwidth_switch) { 1687 uint32_t maxB = ca->maxB; 1688 uint32_t old_maxB = ca->old_maxB; 1689 1690 ca->old_maxB = ca->maxB; 1691 1692 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1693 ca->beta = BETA_MIN; 1694 ca->modeswitch = 0; 1695 return; 1696 } 1697 } 1698 if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) { 1699 ca->beta = (minRTT << 7) / maxRTT; 1700 if (ca->beta < BETA_MIN) 1701 ca->beta = BETA_MIN; 1702 else if (ca->beta > BETA_MAX) 1703 ca->beta = BETA_MAX; 1704 } else { 1705 ca->beta = BETA_MIN; 1706 ca->modeswitch = 1; 1707 } 1708 } 1709 1710 static inline void 1711 htcp_alpha_update(struct htcp *ca) 1712 { 1713 uint32_t minRTT = ca->minRTT; 1714 uint32_t factor = 1; 1715 uint32_t diff = htcp_cong_time(ca); 1716 1717 if (diff > (uint32_t) hz) { 1718 diff -= hz; 1719 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1720 } 1721 if (use_rtt_scaling && minRTT) { 1722 uint32_t scale = (hz << 3) / (10 * minRTT); 1723 1724 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1725 * interval [0.5,10]<<3 */ 1726 factor = (factor << 3) / scale; 1727 if (!factor) 1728 factor = 1; 1729 } 1730 ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 1731 if (!ca->alpha) 1732 ca->alpha = ALPHA_BASE; 1733 } 1734 1735 /* After we have the rtt data to calculate beta, we'd still prefer to wait one 1736 * rtt before we adjust our beta to ensure we are working from a consistent 1737 * data. 1738 * 1739 * This function should be called when we hit a congestion event since only at 1740 * that point do we really have a real sense of maxRTT (the queues en route 1741 * were getting just too full now). 1742 */ 1743 static void 1744 htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net) 1745 { 1746 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT; 1747 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT; 1748 1749 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT); 1750 htcp_alpha_update(&net->cc_mod.htcp_ca); 1751 1752 /* 1753 * add slowly fading memory for maxRTT to accommodate routing 1754 * changes etc 1755 */ 1756 if (minRTT > 0 && maxRTT > minRTT) 1757 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 1758 } 1759 1760 static uint32_t 1761 htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net) 1762 { 1763 htcp_param_update(stcb, net); 1764 return max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu); 1765 } 1766 1767 static void 1768 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 1769 { 1770 /*- 1771 * How to handle these functions? 1772 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 1773 * return; 1774 */ 1775 if (net->cwnd <= net->ssthresh) { 1776 /* We are in slow start */ 1777 if (net->flight_size + net->net_ack >= net->cwnd) { 1778 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 1779 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 1780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1781 sctp_log_cwnd(stcb, net, net->mtu, 1782 SCTP_CWND_LOG_FROM_SS); 1783 } 1784 } else { 1785 net->cwnd += net->net_ack; 1786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1787 sctp_log_cwnd(stcb, net, net->net_ack, 1788 SCTP_CWND_LOG_FROM_SS); 1789 } 1790 } 1791 } else { 1792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1793 sctp_log_cwnd(stcb, net, net->net_ack, 1794 SCTP_CWND_LOG_NOADV_SS); 1795 } 1796 } 1797 } else { 1798 measure_rtt(stcb, net); 1799 1800 /* 1801 * In dangerous area, increase slowly. In theory this is 1802 * net->cwnd += alpha / net->cwnd 1803 */ 1804 /* What is snd_cwnd_cnt?? */ 1805 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 1806 /*- 1807 * Does SCTP have a cwnd clamp? 1808 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 1809 */ 1810 net->cwnd += net->mtu; 1811 net->partial_bytes_acked = 0; 1812 htcp_alpha_update(&net->cc_mod.htcp_ca); 1813 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1814 sctp_log_cwnd(stcb, net, net->mtu, 1815 SCTP_CWND_LOG_FROM_CA); 1816 } 1817 } else { 1818 net->partial_bytes_acked += net->net_ack; 1819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1820 sctp_log_cwnd(stcb, net, net->net_ack, 1821 SCTP_CWND_LOG_NOADV_CA); 1822 } 1823 } 1824 1825 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 1826 } 1827 } 1828 1829 #ifdef SCTP_NOT_USED 1830 /* Lower bound on congestion window. */ 1831 static uint32_t 1832 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 1833 { 1834 return net->ssthresh; 1835 } 1836 1837 #endif 1838 1839 static void 1840 htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net) 1841 { 1842 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp)); 1843 net->cc_mod.htcp_ca.alpha = ALPHA_BASE; 1844 net->cc_mod.htcp_ca.beta = BETA_MIN; 1845 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 1846 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 1847 } 1848 1849 static void 1850 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 1851 { 1852 /* 1853 * We take the max of the burst limit times a MTU or the 1854 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 1855 */ 1856 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1857 net->ssthresh = stcb->asoc.peers_rwnd; 1858 htcp_init(stcb, net); 1859 1860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 1861 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 1862 } 1863 } 1864 1865 static void 1866 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1867 struct sctp_association *asoc, 1868 int accum_moved, int reneged_all, int will_exit) 1869 { 1870 struct sctp_nets *net; 1871 1872 /******************************/ 1873 /* update cwnd and Early FR */ 1874 /******************************/ 1875 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1876 1877 #ifdef JANA_CMT_FAST_RECOVERY 1878 /* 1879 * CMT fast recovery code. Need to debug. 1880 */ 1881 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1882 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1883 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1884 net->will_exit_fast_recovery = 1; 1885 } 1886 } 1887 #endif 1888 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 1889 /* 1890 * So, first of all do we need to have a Early FR 1891 * timer running? 1892 */ 1893 if ((!TAILQ_EMPTY(&asoc->sent_queue) && 1894 (net->ref_count > 1) && 1895 (net->flight_size < net->cwnd)) || 1896 (reneged_all)) { 1897 /* 1898 * yes, so in this case stop it if its 1899 * running, and then restart it. Reneging 1900 * all is a special case where we want to 1901 * run the Early FR timer and then force the 1902 * last few unacked to be sent, causing us 1903 * to illicit a sack with gaps to force out 1904 * the others. 1905 */ 1906 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1907 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 1908 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1909 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 1910 } 1911 SCTP_STAT_INCR(sctps_earlyfrstrid); 1912 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 1913 } else { 1914 /* No, stop it if its running */ 1915 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1916 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 1917 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1918 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 1919 } 1920 } 1921 } 1922 /* if nothing was acked on this destination skip it */ 1923 if (net->net_ack == 0) { 1924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1925 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1926 } 1927 continue; 1928 } 1929 if (net->net_ack2 > 0) { 1930 /* 1931 * Karn's rule applies to clearing error count, this 1932 * is optional. 1933 */ 1934 net->error_count = 0; 1935 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 1936 SCTP_ADDR_NOT_REACHABLE) { 1937 /* addr came good */ 1938 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 1939 net->dest_state |= SCTP_ADDR_REACHABLE; 1940 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 1941 SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 1942 /* now was it the primary? if so restore */ 1943 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 1944 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 1945 } 1946 } 1947 /* 1948 * JRS 5/14/07 - If CMT PF is on and the destination 1949 * is in PF state, set the destination to active 1950 * state and set the cwnd to one or two MTU's based 1951 * on whether PF1 or PF2 is being used. 1952 * 1953 * Should we stop any running T3 timer here? 1954 */ 1955 if ((asoc->sctp_cmt_on_off > 0) && 1956 (asoc->sctp_cmt_pf > 0) && 1957 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1958 net->dest_state &= ~SCTP_ADDR_PF; 1959 net->cwnd = net->mtu * asoc->sctp_cmt_pf; 1960 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 1961 net, net->cwnd); 1962 /* 1963 * Since the cwnd value is explicitly set, 1964 * skip the code that updates the cwnd 1965 * value. 1966 */ 1967 goto skip_cwnd_update; 1968 } 1969 } 1970 #ifdef JANA_CMT_FAST_RECOVERY 1971 /* 1972 * CMT fast recovery code 1973 */ 1974 /* 1975 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1976 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1977 * } else if (sctp_cmt_on_off == 0 && 1978 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1979 */ 1980 #endif 1981 1982 if (asoc->fast_retran_loss_recovery && 1983 will_exit == 0 && 1984 (asoc->sctp_cmt_on_off == 0)) { 1985 /* 1986 * If we are in loss recovery we skip any cwnd 1987 * update 1988 */ 1989 goto skip_cwnd_update; 1990 } 1991 /* 1992 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1993 * moved. 1994 */ 1995 if (accum_moved || 1996 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1997 htcp_cong_avoid(stcb, net); 1998 measure_achieved_throughput(stcb, net); 1999 } else { 2000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2001 sctp_log_cwnd(stcb, net, net->mtu, 2002 SCTP_CWND_LOG_NO_CUMACK); 2003 } 2004 } 2005 skip_cwnd_update: 2006 /* 2007 * NOW, according to Karn's rule do we need to restore the 2008 * RTO timer back? Check our net_ack2. If not set then we 2009 * have a ambiguity.. i.e. all data ack'd was sent to more 2010 * than one place. 2011 */ 2012 if (net->net_ack2) { 2013 /* restore any doubled timers */ 2014 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2015 if (net->RTO < stcb->asoc.minrto) { 2016 net->RTO = stcb->asoc.minrto; 2017 } 2018 if (net->RTO > stcb->asoc.maxrto) { 2019 net->RTO = stcb->asoc.maxrto; 2020 } 2021 } 2022 } 2023 } 2024 2025 static void 2026 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 2027 struct sctp_association *asoc) 2028 { 2029 struct sctp_nets *net; 2030 2031 /* 2032 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 2033 * (net->fast_retran_loss_recovery == 0))) 2034 */ 2035 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2036 if ((asoc->fast_retran_loss_recovery == 0) || 2037 (asoc->sctp_cmt_on_off > 0)) { 2038 /* out of a RFC2582 Fast recovery window? */ 2039 if (net->net_ack > 0) { 2040 /* 2041 * per section 7.2.3, are there any 2042 * destinations that had a fast retransmit 2043 * to them. If so what we need to do is 2044 * adjust ssthresh and cwnd. 2045 */ 2046 struct sctp_tmit_chunk *lchk; 2047 int old_cwnd = net->cwnd; 2048 2049 /* JRS - reset as if state were changed */ 2050 htcp_reset(&net->cc_mod.htcp_ca); 2051 net->ssthresh = htcp_recalc_ssthresh(stcb, net); 2052 net->cwnd = net->ssthresh; 2053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2054 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 2055 SCTP_CWND_LOG_FROM_FR); 2056 } 2057 lchk = TAILQ_FIRST(&asoc->send_queue); 2058 2059 net->partial_bytes_acked = 0; 2060 /* Turn on fast recovery window */ 2061 asoc->fast_retran_loss_recovery = 1; 2062 if (lchk == NULL) { 2063 /* Mark end of the window */ 2064 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 2065 } else { 2066 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2067 } 2068 2069 /* 2070 * CMT fast recovery -- per destination 2071 * recovery variable. 2072 */ 2073 net->fast_retran_loss_recovery = 1; 2074 2075 if (lchk == NULL) { 2076 /* Mark end of the window */ 2077 net->fast_recovery_tsn = asoc->sending_seq - 1; 2078 } else { 2079 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2080 } 2081 2082 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 2083 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 2084 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 2085 stcb->sctp_ep, stcb, net); 2086 } 2087 } else if (net->net_ack > 0) { 2088 /* 2089 * Mark a peg that we WOULD have done a cwnd 2090 * reduction but RFC2582 prevented this action. 2091 */ 2092 SCTP_STAT_INCR(sctps_fastretransinrtt); 2093 } 2094 } 2095 } 2096 2097 static void 2098 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 2099 struct sctp_nets *net) 2100 { 2101 int old_cwnd = net->cwnd; 2102 2103 /* JRS - reset as if the state were being changed to timeout */ 2104 htcp_reset(&net->cc_mod.htcp_ca); 2105 net->ssthresh = htcp_recalc_ssthresh(stcb, net); 2106 net->cwnd = net->mtu; 2107 net->partial_bytes_acked = 0; 2108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2109 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 2110 } 2111 } 2112 2113 static void 2114 sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp, 2115 struct sctp_tcb *stcb, struct sctp_nets *net) 2116 { 2117 int old_cwnd; 2118 2119 old_cwnd = net->cwnd; 2120 2121 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 2122 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 2123 /* 2124 * make a small adjustment to cwnd and force to CA. 2125 */ 2126 if (net->cwnd > net->mtu) 2127 /* drop down one MTU after sending */ 2128 net->cwnd -= net->mtu; 2129 if (net->cwnd < net->ssthresh) 2130 /* still in SS move to CA */ 2131 net->ssthresh = net->cwnd - 1; 2132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2133 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR); 2134 } 2135 } 2136 2137 static void 2138 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 2139 struct sctp_nets *net, int in_window, int num_pkt_lost) 2140 { 2141 int old_cwnd; 2142 2143 old_cwnd = net->cwnd; 2144 2145 /* JRS - reset hctp as if state changed */ 2146 if (in_window == 0) { 2147 htcp_reset(&net->cc_mod.htcp_ca); 2148 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2149 net->ssthresh = htcp_recalc_ssthresh(stcb, net); 2150 if (net->ssthresh < net->mtu) { 2151 net->ssthresh = net->mtu; 2152 /* here back off the timer as well, to slow us down */ 2153 net->RTO <<= 1; 2154 } 2155 net->cwnd = net->ssthresh; 2156 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2157 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2158 } 2159 } 2160 } 2161 2162 struct sctp_cc_functions sctp_cc_functions[] = { 2163 { 2164 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2165 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack, 2166 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2167 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2168 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2169 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2170 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2171 .sctp_cwnd_update_after_fr_timer = sctp_cwnd_update_after_fr_timer 2172 }, 2173 { 2174 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2175 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack, 2176 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr, 2177 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2178 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2179 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2180 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2181 .sctp_cwnd_update_after_fr_timer = sctp_cwnd_update_after_fr_timer 2182 }, 2183 { 2184 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param, 2185 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack, 2186 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr, 2187 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout, 2188 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo, 2189 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2190 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2191 .sctp_cwnd_update_after_fr_timer = sctp_htcp_cwnd_update_after_fr_timer 2192 }, 2193 { 2194 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param, 2195 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack, 2196 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2197 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2198 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo, 2199 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2200 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2201 .sctp_cwnd_update_after_fr_timer = sctp_cwnd_update_after_fr_timer, 2202 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted, 2203 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged, 2204 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins, 2205 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack, 2206 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option 2207 } 2208 }; 2209