1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <netinet/sctp_os.h> 34 #include <netinet/sctp_var.h> 35 #include <netinet/sctp_sysctl.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctp_header.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_output.h> 40 #include <netinet/sctp_input.h> 41 #include <netinet/sctp_indata.h> 42 #include <netinet/sctp_uio.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctp_dtrace_declare.h> 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #define SHIFT_MPTCP_MULTI_N 40 51 #define SHIFT_MPTCP_MULTI_Z 16 52 #define SHIFT_MPTCP_MULTI 8 53 54 static void 55 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 56 { 57 struct sctp_association *assoc; 58 uint32_t cwnd_in_mtu; 59 60 assoc = &stcb->asoc; 61 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 62 if (cwnd_in_mtu == 0) { 63 /* Using 0 means that the value of RFC 4960 is used. */ 64 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 65 } else { 66 /* 67 * We take the minimum of the burst limit and the initial 68 * congestion window. 69 */ 70 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst)) 71 cwnd_in_mtu = assoc->max_burst; 72 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 73 } 74 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 75 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 76 /* In case of resource pooling initialize appropriately */ 77 net->cwnd /= assoc->numnets; 78 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 79 net->cwnd = net->mtu - sizeof(struct sctphdr); 80 } 81 } 82 net->ssthresh = assoc->peers_rwnd; 83 SDT_PROBE(sctp, cwnd, net, init, 84 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 85 0, net->cwnd); 86 if (SCTP_BASE_SYSCTL(sctp_logging_level) & 87 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 88 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 89 } 90 } 91 92 static void 93 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 94 struct sctp_association *asoc) 95 { 96 struct sctp_nets *net; 97 uint32_t t_ssthresh, t_cwnd; 98 uint64_t t_ucwnd_sbw; 99 100 /* MT FIXME: Don't compute this over and over again */ 101 t_ssthresh = 0; 102 t_cwnd = 0; 103 t_ucwnd_sbw = 0; 104 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 105 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 106 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 107 t_ssthresh += net->ssthresh; 108 t_cwnd += net->cwnd; 109 if (net->lastsa > 0) { 110 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) net->lastsa; 111 } 112 } 113 if (t_ucwnd_sbw == 0) { 114 t_ucwnd_sbw = 1; 115 } 116 } 117 /*- 118 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 119 * (net->fast_retran_loss_recovery == 0))) 120 */ 121 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 122 if ((asoc->fast_retran_loss_recovery == 0) || 123 (asoc->sctp_cmt_on_off > 0)) { 124 /* out of a RFC2582 Fast recovery window? */ 125 if (net->net_ack > 0) { 126 /* 127 * per section 7.2.3, are there any 128 * destinations that had a fast retransmit 129 * to them. If so what we need to do is 130 * adjust ssthresh and cwnd. 131 */ 132 struct sctp_tmit_chunk *lchk; 133 int old_cwnd = net->cwnd; 134 135 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 136 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 137 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) { 138 net->ssthresh = (uint32_t) (((uint64_t) 4 * 139 (uint64_t) net->mtu * 140 (uint64_t) net->ssthresh) / 141 (uint64_t) t_ssthresh); 142 143 } 144 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) { 145 uint32_t srtt; 146 147 srtt = net->lastsa; 148 /* 149 * lastsa>>3; we don't need 150 * to devide ... 151 */ 152 if (srtt == 0) { 153 srtt = 1; 154 } 155 /* 156 * Short Version => Equal to 157 * Contel Version MBe 158 */ 159 net->ssthresh = (uint32_t) (((uint64_t) 4 * 160 (uint64_t) net->mtu * 161 (uint64_t) net->cwnd) / 162 ((uint64_t) srtt * 163 t_ucwnd_sbw)); 164 /* INCREASE FACTOR */ ; 165 } 166 if ((net->cwnd > t_cwnd / 2) && 167 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 168 net->ssthresh = net->cwnd - t_cwnd / 2; 169 } 170 if (net->ssthresh < net->mtu) { 171 net->ssthresh = net->mtu; 172 } 173 } else { 174 net->ssthresh = net->cwnd / 2; 175 if (net->ssthresh < (net->mtu * 2)) { 176 net->ssthresh = 2 * net->mtu; 177 } 178 } 179 net->cwnd = net->ssthresh; 180 SDT_PROBE(sctp, cwnd, net, fr, 181 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 182 old_cwnd, net->cwnd); 183 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 184 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 185 SCTP_CWND_LOG_FROM_FR); 186 } 187 lchk = TAILQ_FIRST(&asoc->send_queue); 188 189 net->partial_bytes_acked = 0; 190 /* Turn on fast recovery window */ 191 asoc->fast_retran_loss_recovery = 1; 192 if (lchk == NULL) { 193 /* Mark end of the window */ 194 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 195 } else { 196 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 197 } 198 199 /* 200 * CMT fast recovery -- per destination 201 * recovery variable. 202 */ 203 net->fast_retran_loss_recovery = 1; 204 205 if (lchk == NULL) { 206 /* Mark end of the window */ 207 net->fast_recovery_tsn = asoc->sending_seq - 1; 208 } else { 209 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 210 } 211 212 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 213 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 214 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 215 stcb->sctp_ep, stcb, net); 216 } 217 } else if (net->net_ack > 0) { 218 /* 219 * Mark a peg that we WOULD have done a cwnd 220 * reduction but RFC2582 prevented this action. 221 */ 222 SCTP_STAT_INCR(sctps_fastretransinrtt); 223 } 224 } 225 } 226 227 /* Defines for instantaneous bw decisions */ 228 #define SCTP_INST_LOOSING 1 /* Loosing to other flows */ 229 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ 230 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */ 231 232 233 static int 234 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, 235 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) 236 { 237 uint64_t oth, probepoint; 238 239 probepoint = (((uint64_t) net->cwnd) << 32); 240 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 241 /* 242 * rtt increased we don't update bw.. so we don't update the 243 * rtt either. 244 */ 245 /* Probe point 5 */ 246 probepoint |= ((5 << 16) | 1); 247 SDT_PROBE(sctp, cwnd, net, rttvar, 248 vtag, 249 ((net->cc_mod.rtcc.lbw << 32) | nbw), 250 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 251 net->flight_size, 252 probepoint); 253 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 254 if (net->cc_mod.rtcc.last_step_state == 5) 255 net->cc_mod.rtcc.step_cnt++; 256 else 257 net->cc_mod.rtcc.step_cnt = 1; 258 net->cc_mod.rtcc.last_step_state = 5; 259 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 260 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 261 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 262 /* Try a step down */ 263 oth = net->cc_mod.rtcc.vol_reduce; 264 oth <<= 16; 265 oth |= net->cc_mod.rtcc.step_cnt; 266 oth <<= 16; 267 oth |= net->cc_mod.rtcc.last_step_state; 268 SDT_PROBE(sctp, cwnd, net, rttstep, 269 vtag, 270 ((net->cc_mod.rtcc.lbw << 32) | nbw), 271 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 272 oth, 273 probepoint); 274 if (net->cwnd > (4 * net->mtu)) { 275 net->cwnd -= net->mtu; 276 net->cc_mod.rtcc.vol_reduce++; 277 } else { 278 net->cc_mod.rtcc.step_cnt = 0; 279 } 280 } 281 } 282 return (1); 283 } 284 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 285 /* 286 * rtt decreased, there could be more room. we update both 287 * the bw and the rtt here to lock this in as a good step 288 * down. 289 */ 290 /* Probe point 6 */ 291 probepoint |= ((6 << 16) | 0); 292 SDT_PROBE(sctp, cwnd, net, rttvar, 293 vtag, 294 ((net->cc_mod.rtcc.lbw << 32) | nbw), 295 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 296 net->flight_size, 297 probepoint); 298 if (net->cc_mod.rtcc.steady_step) { 299 oth = net->cc_mod.rtcc.vol_reduce; 300 oth <<= 16; 301 oth |= net->cc_mod.rtcc.step_cnt; 302 oth <<= 16; 303 oth |= net->cc_mod.rtcc.last_step_state; 304 SDT_PROBE(sctp, cwnd, net, rttstep, 305 vtag, 306 ((net->cc_mod.rtcc.lbw << 32) | nbw), 307 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 308 oth, 309 probepoint); 310 if ((net->cc_mod.rtcc.last_step_state == 5) && 311 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) { 312 /* Step down worked */ 313 net->cc_mod.rtcc.step_cnt = 0; 314 return (1); 315 } else { 316 net->cc_mod.rtcc.last_step_state = 6; 317 net->cc_mod.rtcc.step_cnt = 0; 318 } 319 } 320 net->cc_mod.rtcc.lbw = nbw; 321 net->cc_mod.rtcc.lbw_rtt = net->rtt; 322 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 323 if (inst_ind == SCTP_INST_GAINING) 324 return (1); 325 else if (inst_ind == SCTP_INST_NEUTRAL) 326 return (1); 327 else 328 return (0); 329 } 330 /* 331 * Ok bw and rtt remained the same .. no update to any 332 */ 333 /* Probe point 7 */ 334 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); 335 SDT_PROBE(sctp, cwnd, net, rttvar, 336 vtag, 337 ((net->cc_mod.rtcc.lbw << 32) | nbw), 338 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 339 net->flight_size, 340 probepoint); 341 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 342 if (net->cc_mod.rtcc.last_step_state == 5) 343 net->cc_mod.rtcc.step_cnt++; 344 else 345 net->cc_mod.rtcc.step_cnt = 1; 346 net->cc_mod.rtcc.last_step_state = 5; 347 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 348 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 349 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 350 /* Try a step down */ 351 if (net->cwnd > (4 * net->mtu)) { 352 net->cwnd -= net->mtu; 353 net->cc_mod.rtcc.vol_reduce++; 354 return (1); 355 } else { 356 net->cc_mod.rtcc.step_cnt = 0; 357 } 358 } 359 } 360 if (inst_ind == SCTP_INST_GAINING) 361 return (1); 362 else if (inst_ind == SCTP_INST_NEUTRAL) 363 return (1); 364 else 365 return ((int)net->cc_mod.rtcc.ret_from_eq); 366 } 367 368 static int 369 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, 370 uint64_t vtag, uint8_t inst_ind) 371 { 372 uint64_t oth, probepoint; 373 374 /* Bandwidth decreased. */ 375 probepoint = (((uint64_t) net->cwnd) << 32); 376 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 377 /* rtt increased */ 378 /* Did we add more */ 379 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) && 380 (inst_ind != SCTP_INST_LOOSING)) { 381 /* We caused it maybe.. back off? */ 382 /* PROBE POINT 1 */ 383 probepoint |= ((1 << 16) | 1); 384 SDT_PROBE(sctp, cwnd, net, rttvar, 385 vtag, 386 ((net->cc_mod.rtcc.lbw << 32) | nbw), 387 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 388 net->flight_size, 389 probepoint); 390 if (net->cc_mod.rtcc.ret_from_eq) { 391 /* 392 * Switch over to CA if we are less 393 * aggressive 394 */ 395 net->ssthresh = net->cwnd - 1; 396 net->partial_bytes_acked = 0; 397 } 398 return (1); 399 } 400 /* Probe point 2 */ 401 probepoint |= ((2 << 16) | 0); 402 SDT_PROBE(sctp, cwnd, net, rttvar, 403 vtag, 404 ((net->cc_mod.rtcc.lbw << 32) | nbw), 405 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 406 net->flight_size, 407 probepoint); 408 /* Someone else - fight for more? */ 409 if (net->cc_mod.rtcc.steady_step) { 410 oth = net->cc_mod.rtcc.vol_reduce; 411 oth <<= 16; 412 oth |= net->cc_mod.rtcc.step_cnt; 413 oth <<= 16; 414 oth |= net->cc_mod.rtcc.last_step_state; 415 SDT_PROBE(sctp, cwnd, net, rttstep, 416 vtag, 417 ((net->cc_mod.rtcc.lbw << 32) | nbw), 418 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 419 oth, 420 probepoint); 421 /* 422 * Did we voluntarily give up some? if so take one 423 * back please 424 */ 425 if ((net->cc_mod.rtcc.vol_reduce) && 426 (inst_ind != SCTP_INST_GAINING)) { 427 net->cwnd += net->mtu; 428 net->cc_mod.rtcc.vol_reduce--; 429 } 430 net->cc_mod.rtcc.last_step_state = 2; 431 net->cc_mod.rtcc.step_cnt = 0; 432 } 433 goto out_decision; 434 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 435 /* bw & rtt decreased */ 436 /* Probe point 3 */ 437 probepoint |= ((3 << 16) | 0); 438 SDT_PROBE(sctp, cwnd, net, rttvar, 439 vtag, 440 ((net->cc_mod.rtcc.lbw << 32) | nbw), 441 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 442 net->flight_size, 443 probepoint); 444 if (net->cc_mod.rtcc.steady_step) { 445 oth = net->cc_mod.rtcc.vol_reduce; 446 oth <<= 16; 447 oth |= net->cc_mod.rtcc.step_cnt; 448 oth <<= 16; 449 oth |= net->cc_mod.rtcc.last_step_state; 450 SDT_PROBE(sctp, cwnd, net, rttstep, 451 vtag, 452 ((net->cc_mod.rtcc.lbw << 32) | nbw), 453 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 454 oth, 455 probepoint); 456 if ((net->cc_mod.rtcc.vol_reduce) && 457 (inst_ind != SCTP_INST_GAINING)) { 458 net->cwnd += net->mtu; 459 net->cc_mod.rtcc.vol_reduce--; 460 } 461 net->cc_mod.rtcc.last_step_state = 3; 462 net->cc_mod.rtcc.step_cnt = 0; 463 } 464 goto out_decision; 465 } 466 /* The bw decreased but rtt stayed the same */ 467 /* Probe point 4 */ 468 probepoint |= ((4 << 16) | 0); 469 SDT_PROBE(sctp, cwnd, net, rttvar, 470 vtag, 471 ((net->cc_mod.rtcc.lbw << 32) | nbw), 472 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 473 net->flight_size, 474 probepoint); 475 if (net->cc_mod.rtcc.steady_step) { 476 oth = net->cc_mod.rtcc.vol_reduce; 477 oth <<= 16; 478 oth |= net->cc_mod.rtcc.step_cnt; 479 oth <<= 16; 480 oth |= net->cc_mod.rtcc.last_step_state; 481 SDT_PROBE(sctp, cwnd, net, rttstep, 482 vtag, 483 ((net->cc_mod.rtcc.lbw << 32) | nbw), 484 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 485 oth, 486 probepoint); 487 if ((net->cc_mod.rtcc.vol_reduce) && 488 (inst_ind != SCTP_INST_GAINING)) { 489 net->cwnd += net->mtu; 490 net->cc_mod.rtcc.vol_reduce--; 491 } 492 net->cc_mod.rtcc.last_step_state = 4; 493 net->cc_mod.rtcc.step_cnt = 0; 494 } 495 out_decision: 496 net->cc_mod.rtcc.lbw = nbw; 497 net->cc_mod.rtcc.lbw_rtt = net->rtt; 498 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 499 if (inst_ind == SCTP_INST_GAINING) { 500 return (1); 501 } else { 502 return (0); 503 } 504 } 505 506 static int 507 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag) 508 { 509 uint64_t oth, probepoint; 510 511 /* 512 * BW increased, so update and return 0, since all actions in our 513 * table say to do the normal CC update. Note that we pay no 514 * attention to the inst_ind since our overall sum is increasing. 515 */ 516 /* PROBE POINT 0 */ 517 probepoint = (((uint64_t) net->cwnd) << 32); 518 SDT_PROBE(sctp, cwnd, net, rttvar, 519 vtag, 520 ((net->cc_mod.rtcc.lbw << 32) | nbw), 521 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 522 net->flight_size, 523 probepoint); 524 if (net->cc_mod.rtcc.steady_step) { 525 oth = net->cc_mod.rtcc.vol_reduce; 526 oth <<= 16; 527 oth |= net->cc_mod.rtcc.step_cnt; 528 oth <<= 16; 529 oth |= net->cc_mod.rtcc.last_step_state; 530 SDT_PROBE(sctp, cwnd, net, rttstep, 531 vtag, 532 ((net->cc_mod.rtcc.lbw << 32) | nbw), 533 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 534 oth, 535 probepoint); 536 net->cc_mod.rtcc.last_step_state = 0; 537 net->cc_mod.rtcc.step_cnt = 0; 538 net->cc_mod.rtcc.vol_reduce = 0; 539 } 540 net->cc_mod.rtcc.lbw = nbw; 541 net->cc_mod.rtcc.lbw_rtt = net->rtt; 542 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 543 return (0); 544 } 545 546 /* RTCC Algoritm to limit growth of cwnd, return 547 * true if you want to NOT allow cwnd growth 548 */ 549 static int 550 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) 551 { 552 uint64_t bw_offset, rtt_offset; 553 uint64_t probepoint, rtt, vtag; 554 uint64_t bytes_for_this_rtt, inst_bw; 555 uint64_t div, inst_off; 556 int bw_shift; 557 uint8_t inst_ind; 558 int ret; 559 560 /*- 561 * Here we need to see if we want 562 * to limit cwnd growth due to increase 563 * in overall rtt but no increase in bw. 564 * We use the following table to figure 565 * out what we should do. When we return 566 * 0, cc update goes on as planned. If we 567 * return 1, then no cc update happens and cwnd 568 * stays where it is at. 569 * ---------------------------------- 570 * BW | RTT | Action 571 * ********************************* 572 * INC | INC | return 0 573 * ---------------------------------- 574 * INC | SAME | return 0 575 * ---------------------------------- 576 * INC | DECR | return 0 577 * ---------------------------------- 578 * SAME | INC | return 1 579 * ---------------------------------- 580 * SAME | SAME | return 1 581 * ---------------------------------- 582 * SAME | DECR | return 0 583 * ---------------------------------- 584 * DECR | INC | return 0 or 1 based on if we caused. 585 * ---------------------------------- 586 * DECR | SAME | return 0 587 * ---------------------------------- 588 * DECR | DECR | return 0 589 * ---------------------------------- 590 * 591 * We are a bit fuzz on what an increase or 592 * decrease is. For BW it is the same if 593 * it did not change within 1/64th. For 594 * RTT it stayed the same if it did not 595 * change within 1/32nd 596 */ 597 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); 598 rtt = stcb->asoc.my_vtag; 599 vtag = (rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); 600 probepoint = (((uint64_t) net->cwnd) << 32); 601 rtt = net->rtt; 602 if (net->cc_mod.rtcc.rtt_set_this_sack) { 603 net->cc_mod.rtcc.rtt_set_this_sack = 0; 604 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc; 605 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 606 if (net->rtt) { 607 div = net->rtt / 1000; 608 if (div) { 609 inst_bw = bytes_for_this_rtt / div; 610 inst_off = inst_bw >> bw_shift; 611 if (inst_bw > nbw) 612 inst_ind = SCTP_INST_GAINING; 613 else if ((inst_bw + inst_off) < nbw) 614 inst_ind = SCTP_INST_LOOSING; 615 else 616 inst_ind = SCTP_INST_NEUTRAL; 617 probepoint |= ((0xb << 16) | inst_ind); 618 } else { 619 inst_ind = net->cc_mod.rtcc.last_inst_ind; 620 inst_bw = bytes_for_this_rtt / (uint64_t) (net->rtt); 621 /* Can't determine do not change */ 622 probepoint |= ((0xc << 16) | inst_ind); 623 } 624 } else { 625 inst_ind = net->cc_mod.rtcc.last_inst_ind; 626 inst_bw = bytes_for_this_rtt; 627 /* Can't determine do not change */ 628 probepoint |= ((0xd << 16) | inst_ind); 629 } 630 SDT_PROBE(sctp, cwnd, net, rttvar, 631 vtag, 632 ((nbw << 32) | inst_bw), 633 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt), 634 net->flight_size, 635 probepoint); 636 } else { 637 /* No rtt measurement, use last one */ 638 inst_ind = net->cc_mod.rtcc.last_inst_ind; 639 } 640 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; 641 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { 642 ret = cc_bw_increase(stcb, net, nbw, vtag); 643 goto out; 644 } 645 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); 646 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { 647 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); 648 goto out; 649 } 650 /* 651 * If we reach here then we are in a situation where the bw stayed 652 * the same. 653 */ 654 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); 655 out: 656 net->cc_mod.rtcc.last_inst_ind = inst_ind; 657 return (ret); 658 } 659 660 static void 661 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, 662 struct sctp_association *asoc, 663 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc) 664 { 665 struct sctp_nets *net; 666 int old_cwnd; 667 uint32_t t_ssthresh, t_cwnd, incr; 668 uint64_t t_ucwnd_sbw; 669 uint64_t t_path_mptcp; 670 uint64_t mptcp_like_alpha; 671 uint32_t srtt; 672 uint64_t max_path; 673 674 /* MT FIXME: Don't compute this over and over again */ 675 t_ssthresh = 0; 676 t_cwnd = 0; 677 t_ucwnd_sbw = 0; 678 t_path_mptcp = 0; 679 mptcp_like_alpha = 1; 680 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 681 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) || 682 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) { 683 max_path = 0; 684 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 685 t_ssthresh += net->ssthresh; 686 t_cwnd += net->cwnd; 687 /* lastsa>>3; we don't need to devide ... */ 688 srtt = net->lastsa; 689 if (srtt > 0) { 690 uint64_t tmp; 691 692 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) srtt; 693 t_path_mptcp += (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_Z) / 694 (((uint64_t) net->mtu) * (uint64_t) srtt); 695 tmp = (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_N) / 696 ((uint64_t) net->mtu * (uint64_t) (srtt * srtt)); 697 if (tmp > max_path) { 698 max_path = tmp; 699 } 700 } 701 } 702 if (t_path_mptcp > 0) { 703 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp); 704 } else { 705 mptcp_like_alpha = 1; 706 } 707 } 708 if (t_ssthresh == 0) { 709 t_ssthresh = 1; 710 } 711 if (t_ucwnd_sbw == 0) { 712 t_ucwnd_sbw = 1; 713 } 714 /******************************/ 715 /* update cwnd and Early FR */ 716 /******************************/ 717 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 718 719 #ifdef JANA_CMT_FAST_RECOVERY 720 /* 721 * CMT fast recovery code. Need to debug. 722 */ 723 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 724 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 725 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 726 net->will_exit_fast_recovery = 1; 727 } 728 } 729 #endif 730 /* if nothing was acked on this destination skip it */ 731 if (net->net_ack == 0) { 732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 733 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 734 } 735 continue; 736 } 737 #ifdef JANA_CMT_FAST_RECOVERY 738 /* 739 * CMT fast recovery code 740 */ 741 /* 742 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 743 * && net->will_exit_fast_recovery == 0) { @@@ Do something 744 * } else if (sctp_cmt_on_off == 0 && 745 * asoc->fast_retran_loss_recovery && will_exit == 0) { 746 */ 747 #endif 748 749 if (asoc->fast_retran_loss_recovery && 750 (will_exit == 0) && 751 (asoc->sctp_cmt_on_off == 0)) { 752 /* 753 * If we are in loss recovery we skip any cwnd 754 * update 755 */ 756 return; 757 } 758 /* 759 * Did any measurements go on for this network? 760 */ 761 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) { 762 uint64_t nbw; 763 764 /* 765 * At this point our bw_bytes has been updated by 766 * incoming sack information. 767 * 768 * But our bw may not yet be set. 769 * 770 */ 771 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) { 772 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000); 773 } else { 774 nbw = net->cc_mod.rtcc.bw_bytes; 775 } 776 if (net->cc_mod.rtcc.lbw) { 777 if (cc_bw_limit(stcb, net, nbw)) { 778 /* Hold here, no update */ 779 continue; 780 } 781 } else { 782 uint64_t vtag, probepoint; 783 784 probepoint = (((uint64_t) net->cwnd) << 32); 785 probepoint |= ((0xa << 16) | 0); 786 vtag = (net->rtt << 32) | 787 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 788 (stcb->rport); 789 790 SDT_PROBE(sctp, cwnd, net, rttvar, 791 vtag, 792 nbw, 793 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 794 net->flight_size, 795 probepoint); 796 net->cc_mod.rtcc.lbw = nbw; 797 net->cc_mod.rtcc.lbw_rtt = net->rtt; 798 if (net->cc_mod.rtcc.rtt_set_this_sack) { 799 net->cc_mod.rtcc.rtt_set_this_sack = 0; 800 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 801 } 802 } 803 } 804 /* 805 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 806 * moved. 807 */ 808 if (accum_moved || 809 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 810 /* If the cumulative ack moved we can proceed */ 811 if (net->cwnd <= net->ssthresh) { 812 /* We are in slow start */ 813 if (net->flight_size + net->net_ack >= net->cwnd) { 814 uint32_t limit; 815 816 old_cwnd = net->cwnd; 817 switch (asoc->sctp_cmt_on_off) { 818 case SCTP_CMT_RPV1: 819 limit = (uint32_t) (((uint64_t) net->mtu * 820 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 821 (uint64_t) net->ssthresh) / 822 (uint64_t) t_ssthresh); 823 incr = (uint32_t) (((uint64_t) net->net_ack * 824 (uint64_t) net->ssthresh) / 825 (uint64_t) t_ssthresh); 826 if (incr > limit) { 827 incr = limit; 828 } 829 if (incr == 0) { 830 incr = 1; 831 } 832 break; 833 case SCTP_CMT_RPV2: 834 /* 835 * lastsa>>3; we don't need 836 * to divide ... 837 */ 838 srtt = net->lastsa; 839 if (srtt == 0) { 840 srtt = 1; 841 } 842 limit = (uint32_t) (((uint64_t) net->mtu * 843 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 844 (uint64_t) net->cwnd) / 845 ((uint64_t) srtt * t_ucwnd_sbw)); 846 /* INCREASE FACTOR */ 847 incr = (uint32_t) (((uint64_t) net->net_ack * 848 (uint64_t) net->cwnd) / 849 ((uint64_t) srtt * t_ucwnd_sbw)); 850 /* INCREASE FACTOR */ 851 if (incr > limit) { 852 incr = limit; 853 } 854 if (incr == 0) { 855 incr = 1; 856 } 857 break; 858 case SCTP_CMT_MPTCP: 859 limit = (uint32_t) (((uint64_t) net->mtu * 860 mptcp_like_alpha * 861 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >> 862 SHIFT_MPTCP_MULTI); 863 incr = (uint32_t) (((uint64_t) net->net_ack * 864 mptcp_like_alpha) >> 865 SHIFT_MPTCP_MULTI); 866 if (incr > limit) { 867 incr = limit; 868 } 869 if (incr > net->net_ack) { 870 incr = net->net_ack; 871 } 872 if (incr > net->mtu) { 873 incr = net->mtu; 874 } 875 break; 876 default: 877 incr = net->net_ack; 878 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) { 879 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable); 880 } 881 break; 882 } 883 net->cwnd += incr; 884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 885 sctp_log_cwnd(stcb, net, incr, 886 SCTP_CWND_LOG_FROM_SS); 887 } 888 SDT_PROBE(sctp, cwnd, net, ack, 889 stcb->asoc.my_vtag, 890 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 891 net, 892 old_cwnd, net->cwnd); 893 } else { 894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 895 sctp_log_cwnd(stcb, net, net->net_ack, 896 SCTP_CWND_LOG_NOADV_SS); 897 } 898 } 899 } else { 900 /* We are in congestion avoidance */ 901 /* 902 * Add to pba 903 */ 904 net->partial_bytes_acked += net->net_ack; 905 906 if ((net->flight_size + net->net_ack >= net->cwnd) && 907 (net->partial_bytes_acked >= net->cwnd)) { 908 net->partial_bytes_acked -= net->cwnd; 909 old_cwnd = net->cwnd; 910 switch (asoc->sctp_cmt_on_off) { 911 case SCTP_CMT_RPV1: 912 incr = (uint32_t) (((uint64_t) net->mtu * 913 (uint64_t) net->ssthresh) / 914 (uint64_t) t_ssthresh); 915 if (incr == 0) { 916 incr = 1; 917 } 918 break; 919 case SCTP_CMT_RPV2: 920 /* 921 * lastsa>>3; we don't need 922 * to divide ... 923 */ 924 srtt = net->lastsa; 925 if (srtt == 0) { 926 srtt = 1; 927 } 928 incr = (uint32_t) ((uint64_t) net->mtu * 929 (uint64_t) net->cwnd / 930 ((uint64_t) srtt * 931 t_ucwnd_sbw)); 932 /* INCREASE FACTOR */ 933 if (incr == 0) { 934 incr = 1; 935 } 936 break; 937 case SCTP_CMT_MPTCP: 938 incr = (uint32_t) ((mptcp_like_alpha * 939 (uint64_t) net->cwnd) >> 940 SHIFT_MPTCP_MULTI); 941 if (incr > net->mtu) { 942 incr = net->mtu; 943 } 944 break; 945 default: 946 incr = net->mtu; 947 break; 948 } 949 net->cwnd += incr; 950 SDT_PROBE(sctp, cwnd, net, ack, 951 stcb->asoc.my_vtag, 952 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 953 net, 954 old_cwnd, net->cwnd); 955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 956 sctp_log_cwnd(stcb, net, net->mtu, 957 SCTP_CWND_LOG_FROM_CA); 958 } 959 } else { 960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 961 sctp_log_cwnd(stcb, net, net->net_ack, 962 SCTP_CWND_LOG_NOADV_CA); 963 } 964 } 965 } 966 } else { 967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 968 sctp_log_cwnd(stcb, net, net->mtu, 969 SCTP_CWND_LOG_NO_CUMACK); 970 } 971 } 972 } 973 } 974 975 static void 976 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net) 977 { 978 int old_cwnd; 979 980 old_cwnd = net->cwnd; 981 net->cwnd = net->mtu; 982 SDT_PROBE(sctp, cwnd, net, ack, 983 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 984 old_cwnd, net->cwnd); 985 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 986 net, net->cwnd); 987 } 988 989 990 static void 991 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 992 { 993 int old_cwnd = net->cwnd; 994 uint32_t t_ssthresh, t_cwnd; 995 uint64_t t_ucwnd_sbw; 996 997 /* MT FIXME: Don't compute this over and over again */ 998 t_ssthresh = 0; 999 t_cwnd = 0; 1000 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 1001 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 1002 struct sctp_nets *lnet; 1003 uint32_t srtt; 1004 1005 t_ucwnd_sbw = 0; 1006 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1007 t_ssthresh += lnet->ssthresh; 1008 t_cwnd += lnet->cwnd; 1009 srtt = lnet->lastsa; 1010 /* lastsa>>3; we don't need to divide ... */ 1011 if (srtt > 0) { 1012 t_ucwnd_sbw += (uint64_t) lnet->cwnd / (uint64_t) srtt; 1013 } 1014 } 1015 if (t_ssthresh < 1) { 1016 t_ssthresh = 1; 1017 } 1018 if (t_ucwnd_sbw < 1) { 1019 t_ucwnd_sbw = 1; 1020 } 1021 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) { 1022 net->ssthresh = (uint32_t) (((uint64_t) 4 * 1023 (uint64_t) net->mtu * 1024 (uint64_t) net->ssthresh) / 1025 (uint64_t) t_ssthresh); 1026 } else { 1027 uint64_t cc_delta; 1028 1029 srtt = net->lastsa; 1030 /* lastsa>>3; we don't need to divide ... */ 1031 if (srtt == 0) { 1032 srtt = 1; 1033 } 1034 cc_delta = t_ucwnd_sbw * (uint64_t) srtt / 2; 1035 if (cc_delta < t_cwnd) { 1036 net->ssthresh = (uint32_t) ((uint64_t) t_cwnd - cc_delta); 1037 } else { 1038 net->ssthresh = net->mtu; 1039 } 1040 } 1041 if ((net->cwnd > t_cwnd / 2) && 1042 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 1043 net->ssthresh = net->cwnd - t_cwnd / 2; 1044 } 1045 if (net->ssthresh < net->mtu) { 1046 net->ssthresh = net->mtu; 1047 } 1048 } else { 1049 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 1050 } 1051 net->cwnd = net->mtu; 1052 net->partial_bytes_acked = 0; 1053 SDT_PROBE(sctp, cwnd, net, to, 1054 stcb->asoc.my_vtag, 1055 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1056 net, 1057 old_cwnd, net->cwnd); 1058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1059 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 1060 } 1061 } 1062 1063 static void 1064 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, 1065 int in_window, int num_pkt_lost, int use_rtcc) 1066 { 1067 int old_cwnd = net->cwnd; 1068 1069 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { 1070 /* Data center Congestion Control */ 1071 if (in_window == 0) { 1072 /* 1073 * Go to CA with the cwnd at the point we sent the 1074 * TSN that was marked with a CE. 1075 */ 1076 if (net->ecn_prev_cwnd < net->cwnd) { 1077 /* Restore to prev cwnd */ 1078 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost); 1079 } else { 1080 /* Just cut in 1/2 */ 1081 net->cwnd /= 2; 1082 } 1083 /* Drop to CA */ 1084 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); 1085 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1086 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1087 } 1088 } else { 1089 /* 1090 * Further tuning down required over the drastic 1091 * orginal cut 1092 */ 1093 net->ssthresh -= (net->mtu * num_pkt_lost); 1094 net->cwnd -= (net->mtu * num_pkt_lost); 1095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1096 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1097 } 1098 } 1099 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1100 } else { 1101 if (in_window == 0) { 1102 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1103 net->ssthresh = net->cwnd / 2; 1104 if (net->ssthresh < net->mtu) { 1105 net->ssthresh = net->mtu; 1106 /* 1107 * here back off the timer as well, to slow 1108 * us down 1109 */ 1110 net->RTO <<= 1; 1111 } 1112 net->cwnd = net->ssthresh; 1113 SDT_PROBE(sctp, cwnd, net, ecn, 1114 stcb->asoc.my_vtag, 1115 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1116 net, 1117 old_cwnd, net->cwnd); 1118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1119 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1120 } 1121 } 1122 } 1123 1124 } 1125 1126 static void 1127 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 1128 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 1129 uint32_t * bottle_bw, uint32_t * on_queue) 1130 { 1131 uint32_t bw_avail; 1132 int rtt; 1133 unsigned int incr; 1134 int old_cwnd = net->cwnd; 1135 1136 /* need real RTT in msd for this calc */ 1137 rtt = net->rtt / 1000; 1138 /* get bottle neck bw */ 1139 *bottle_bw = ntohl(cp->bottle_bw); 1140 /* and whats on queue */ 1141 *on_queue = ntohl(cp->current_onq); 1142 /* 1143 * adjust the on-queue if our flight is more it could be that the 1144 * router has not yet gotten data "in-flight" to it 1145 */ 1146 if (*on_queue < net->flight_size) 1147 *on_queue = net->flight_size; 1148 /* calculate the available space */ 1149 bw_avail = (*bottle_bw * rtt) / 1000; 1150 if (bw_avail > *bottle_bw) { 1151 /* 1152 * Cap the growth to no more than the bottle neck. This can 1153 * happen as RTT slides up due to queues. It also means if 1154 * you have more than a 1 second RTT with a empty queue you 1155 * will be limited to the bottle_bw per second no matter if 1156 * other points have 1/2 the RTT and you could get more 1157 * out... 1158 */ 1159 bw_avail = *bottle_bw; 1160 } 1161 if (*on_queue > bw_avail) { 1162 /* 1163 * No room for anything else don't allow anything else to be 1164 * "added to the fire". 1165 */ 1166 int seg_inflight, seg_onqueue, my_portion; 1167 1168 net->partial_bytes_acked = 0; 1169 1170 /* how much are we over queue size? */ 1171 incr = *on_queue - bw_avail; 1172 if (stcb->asoc.seen_a_sack_this_pkt) { 1173 /* 1174 * undo any cwnd adjustment that the sack might have 1175 * made 1176 */ 1177 net->cwnd = net->prev_cwnd; 1178 } 1179 /* Now how much of that is mine? */ 1180 seg_inflight = net->flight_size / net->mtu; 1181 seg_onqueue = *on_queue / net->mtu; 1182 my_portion = (incr * seg_inflight) / seg_onqueue; 1183 1184 /* Have I made an adjustment already */ 1185 if (net->cwnd > net->flight_size) { 1186 /* 1187 * for this flight I made an adjustment we need to 1188 * decrease the portion by a share our previous 1189 * adjustment. 1190 */ 1191 int diff_adj; 1192 1193 diff_adj = net->cwnd - net->flight_size; 1194 if (diff_adj > my_portion) 1195 my_portion = 0; 1196 else 1197 my_portion -= diff_adj; 1198 } 1199 /* 1200 * back down to the previous cwnd (assume we have had a sack 1201 * before this packet). minus what ever portion of the 1202 * overage is my fault. 1203 */ 1204 net->cwnd -= my_portion; 1205 1206 /* we will NOT back down more than 1 MTU */ 1207 if (net->cwnd <= net->mtu) { 1208 net->cwnd = net->mtu; 1209 } 1210 /* force into CA */ 1211 net->ssthresh = net->cwnd - 1; 1212 } else { 1213 /* 1214 * Take 1/4 of the space left or max burst up .. whichever 1215 * is less. 1216 */ 1217 incr = (bw_avail - *on_queue) >> 2; 1218 if ((stcb->asoc.max_burst > 0) && 1219 (stcb->asoc.max_burst * net->mtu < incr)) { 1220 incr = stcb->asoc.max_burst * net->mtu; 1221 } 1222 net->cwnd += incr; 1223 } 1224 if (net->cwnd > bw_avail) { 1225 /* We can't exceed the pipe size */ 1226 net->cwnd = bw_avail; 1227 } 1228 if (net->cwnd < net->mtu) { 1229 /* We always have 1 MTU */ 1230 net->cwnd = net->mtu; 1231 } 1232 if (net->cwnd - old_cwnd != 0) { 1233 /* log only changes */ 1234 SDT_PROBE(sctp, cwnd, net, pd, 1235 stcb->asoc.my_vtag, 1236 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1237 net, 1238 old_cwnd, net->cwnd); 1239 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1240 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 1241 SCTP_CWND_LOG_FROM_SAT); 1242 } 1243 } 1244 } 1245 1246 static void 1247 sctp_cwnd_update_after_output(struct sctp_tcb *stcb, 1248 struct sctp_nets *net, int burst_limit) 1249 { 1250 int old_cwnd = net->cwnd; 1251 1252 if (net->ssthresh < net->cwnd) 1253 net->ssthresh = net->cwnd; 1254 if (burst_limit) { 1255 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 1256 SDT_PROBE(sctp, cwnd, net, bl, 1257 stcb->asoc.my_vtag, 1258 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1259 net, 1260 old_cwnd, net->cwnd); 1261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1262 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 1263 } 1264 } 1265 } 1266 1267 static void 1268 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1269 struct sctp_association *asoc, 1270 int accum_moved, int reneged_all, int will_exit) 1271 { 1272 /* Passing a zero argument in last disables the rtcc algoritm */ 1273 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); 1274 } 1275 1276 static void 1277 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1278 int in_window, int num_pkt_lost) 1279 { 1280 /* Passing a zero argument in last disables the rtcc algoritm */ 1281 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); 1282 } 1283 1284 /* Here starts the RTCCVAR type CC invented by RRS which 1285 * is a slight mod to RFC2581. We reuse a common routine or 1286 * two since these algoritms are so close and need to 1287 * remain the same. 1288 */ 1289 static void 1290 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1291 int in_window, int num_pkt_lost) 1292 { 1293 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); 1294 } 1295 1296 1297 static 1298 void 1299 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, 1300 struct sctp_tmit_chunk *tp1) 1301 { 1302 net->cc_mod.rtcc.bw_bytes += tp1->send_size; 1303 } 1304 1305 static void 1306 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, 1307 struct sctp_nets *net) 1308 { 1309 if (net->cc_mod.rtcc.tls_needs_set > 0) { 1310 /* We had a bw measurment going on */ 1311 struct timeval ltls; 1312 1313 SCTP_GETPTIME_TIMEVAL(<ls); 1314 timevalsub(<ls, &net->cc_mod.rtcc.tls); 1315 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec; 1316 } 1317 } 1318 1319 static void 1320 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, 1321 struct sctp_nets *net) 1322 { 1323 uint64_t vtag, probepoint; 1324 1325 if (net->cc_mod.rtcc.lbw) { 1326 /* Clear the old bw.. we went to 0 in-flight */ 1327 vtag = (net->rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1328 (stcb->rport); 1329 probepoint = (((uint64_t) net->cwnd) << 32); 1330 /* Probe point 8 */ 1331 probepoint |= ((8 << 16) | 0); 1332 SDT_PROBE(sctp, cwnd, net, rttvar, 1333 vtag, 1334 ((net->cc_mod.rtcc.lbw << 32) | 0), 1335 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 1336 net->flight_size, 1337 probepoint); 1338 net->cc_mod.rtcc.lbw_rtt = 0; 1339 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1340 net->cc_mod.rtcc.lbw = 0; 1341 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1342 net->cc_mod.rtcc.vol_reduce = 0; 1343 net->cc_mod.rtcc.bw_tot_time = 0; 1344 net->cc_mod.rtcc.bw_bytes = 0; 1345 net->cc_mod.rtcc.tls_needs_set = 0; 1346 if (net->cc_mod.rtcc.steady_step) { 1347 net->cc_mod.rtcc.vol_reduce = 0; 1348 net->cc_mod.rtcc.step_cnt = 0; 1349 net->cc_mod.rtcc.last_step_state = 0; 1350 } 1351 if (net->cc_mod.rtcc.ret_from_eq) { 1352 /* less aggressive one - reset cwnd too */ 1353 uint32_t cwnd_in_mtu, cwnd; 1354 1355 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 1356 if (cwnd_in_mtu == 0) { 1357 /* 1358 * Using 0 means that the value of RFC 4960 1359 * is used. 1360 */ 1361 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1362 } else { 1363 /* 1364 * We take the minimum of the burst limit 1365 * and the initial congestion window. 1366 */ 1367 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst)) 1368 cwnd_in_mtu = stcb->asoc.max_burst; 1369 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 1370 } 1371 if (net->cwnd > cwnd) { 1372 /* 1373 * Only set if we are not a timeout (i.e. 1374 * down to 1 mtu) 1375 */ 1376 net->cwnd = cwnd; 1377 } 1378 } 1379 } 1380 } 1381 1382 static void 1383 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, 1384 struct sctp_nets *net) 1385 { 1386 uint64_t vtag, probepoint; 1387 1388 sctp_set_initial_cc_param(stcb, net); 1389 stcb->asoc.use_precise_time = 1; 1390 probepoint = (((uint64_t) net->cwnd) << 32); 1391 probepoint |= ((9 << 16) | 0); 1392 vtag = (net->rtt << 32) | 1393 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1394 (stcb->rport); 1395 SDT_PROBE(sctp, cwnd, net, rttvar, 1396 vtag, 1397 0, 1398 0, 1399 0, 1400 probepoint); 1401 net->cc_mod.rtcc.lbw_rtt = 0; 1402 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1403 net->cc_mod.rtcc.vol_reduce = 0; 1404 net->cc_mod.rtcc.lbw = 0; 1405 net->cc_mod.rtcc.vol_reduce = 0; 1406 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1407 net->cc_mod.rtcc.bw_tot_time = 0; 1408 net->cc_mod.rtcc.bw_bytes = 0; 1409 net->cc_mod.rtcc.tls_needs_set = 0; 1410 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret); 1411 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step); 1412 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn); 1413 net->cc_mod.rtcc.step_cnt = 0; 1414 net->cc_mod.rtcc.last_step_state = 0; 1415 1416 1417 } 1418 1419 static int 1420 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, 1421 struct sctp_cc_option *cc_opt) 1422 { 1423 struct sctp_nets *net; 1424 1425 if (setorget == 1) { 1426 /* a set */ 1427 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1428 if ((cc_opt->aid_value.assoc_value != 0) && 1429 (cc_opt->aid_value.assoc_value != 1)) { 1430 return (EINVAL); 1431 } 1432 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1433 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value; 1434 } 1435 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1436 if ((cc_opt->aid_value.assoc_value != 0) && 1437 (cc_opt->aid_value.assoc_value != 1)) { 1438 return (EINVAL); 1439 } 1440 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1441 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value; 1442 } 1443 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1444 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1445 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value; 1446 } 1447 } else { 1448 return (EINVAL); 1449 } 1450 } else { 1451 /* a get */ 1452 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1453 net = TAILQ_FIRST(&stcb->asoc.nets); 1454 if (net == NULL) { 1455 return (EFAULT); 1456 } 1457 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq; 1458 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1459 net = TAILQ_FIRST(&stcb->asoc.nets); 1460 if (net == NULL) { 1461 return (EFAULT); 1462 } 1463 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn; 1464 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1465 net = TAILQ_FIRST(&stcb->asoc.nets); 1466 if (net == NULL) { 1467 return (EFAULT); 1468 } 1469 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step; 1470 } else { 1471 return (EINVAL); 1472 } 1473 } 1474 return (0); 1475 } 1476 1477 static void 1478 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED, 1479 struct sctp_nets *net) 1480 { 1481 if (net->cc_mod.rtcc.tls_needs_set == 0) { 1482 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls); 1483 net->cc_mod.rtcc.tls_needs_set = 2; 1484 } 1485 } 1486 1487 static void 1488 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, 1489 struct sctp_association *asoc, 1490 int accum_moved, int reneged_all, int will_exit) 1491 { 1492 /* Passing a one argument at the last enables the rtcc algoritm */ 1493 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); 1494 } 1495 1496 static void 1497 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED, 1498 struct sctp_nets *net, 1499 struct timeval *now SCTP_UNUSED) 1500 { 1501 net->cc_mod.rtcc.rtt_set_this_sack = 1; 1502 } 1503 1504 /* Here starts Sally Floyds HS-TCP */ 1505 1506 struct sctp_hs_raise_drop { 1507 int32_t cwnd; 1508 int32_t increase; 1509 int32_t drop_percent; 1510 }; 1511 1512 #define SCTP_HS_TABLE_SIZE 73 1513 1514 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 1515 {38, 1, 50}, /* 0 */ 1516 {118, 2, 44}, /* 1 */ 1517 {221, 3, 41}, /* 2 */ 1518 {347, 4, 38}, /* 3 */ 1519 {495, 5, 37}, /* 4 */ 1520 {663, 6, 35}, /* 5 */ 1521 {851, 7, 34}, /* 6 */ 1522 {1058, 8, 33}, /* 7 */ 1523 {1284, 9, 32}, /* 8 */ 1524 {1529, 10, 31}, /* 9 */ 1525 {1793, 11, 30}, /* 10 */ 1526 {2076, 12, 29}, /* 11 */ 1527 {2378, 13, 28}, /* 12 */ 1528 {2699, 14, 28}, /* 13 */ 1529 {3039, 15, 27}, /* 14 */ 1530 {3399, 16, 27}, /* 15 */ 1531 {3778, 17, 26}, /* 16 */ 1532 {4177, 18, 26}, /* 17 */ 1533 {4596, 19, 25}, /* 18 */ 1534 {5036, 20, 25}, /* 19 */ 1535 {5497, 21, 24}, /* 20 */ 1536 {5979, 22, 24}, /* 21 */ 1537 {6483, 23, 23}, /* 22 */ 1538 {7009, 24, 23}, /* 23 */ 1539 {7558, 25, 22}, /* 24 */ 1540 {8130, 26, 22}, /* 25 */ 1541 {8726, 27, 22}, /* 26 */ 1542 {9346, 28, 21}, /* 27 */ 1543 {9991, 29, 21}, /* 28 */ 1544 {10661, 30, 21}, /* 29 */ 1545 {11358, 31, 20}, /* 30 */ 1546 {12082, 32, 20}, /* 31 */ 1547 {12834, 33, 20}, /* 32 */ 1548 {13614, 34, 19}, /* 33 */ 1549 {14424, 35, 19}, /* 34 */ 1550 {15265, 36, 19}, /* 35 */ 1551 {16137, 37, 19}, /* 36 */ 1552 {17042, 38, 18}, /* 37 */ 1553 {17981, 39, 18}, /* 38 */ 1554 {18955, 40, 18}, /* 39 */ 1555 {19965, 41, 17}, /* 40 */ 1556 {21013, 42, 17}, /* 41 */ 1557 {22101, 43, 17}, /* 42 */ 1558 {23230, 44, 17}, /* 43 */ 1559 {24402, 45, 16}, /* 44 */ 1560 {25618, 46, 16}, /* 45 */ 1561 {26881, 47, 16}, /* 46 */ 1562 {28193, 48, 16}, /* 47 */ 1563 {29557, 49, 15}, /* 48 */ 1564 {30975, 50, 15}, /* 49 */ 1565 {32450, 51, 15}, /* 50 */ 1566 {33986, 52, 15}, /* 51 */ 1567 {35586, 53, 14}, /* 52 */ 1568 {37253, 54, 14}, /* 53 */ 1569 {38992, 55, 14}, /* 54 */ 1570 {40808, 56, 14}, /* 55 */ 1571 {42707, 57, 13}, /* 56 */ 1572 {44694, 58, 13}, /* 57 */ 1573 {46776, 59, 13}, /* 58 */ 1574 {48961, 60, 13}, /* 59 */ 1575 {51258, 61, 13}, /* 60 */ 1576 {53677, 62, 12}, /* 61 */ 1577 {56230, 63, 12}, /* 62 */ 1578 {58932, 64, 12}, /* 63 */ 1579 {61799, 65, 12}, /* 64 */ 1580 {64851, 66, 11}, /* 65 */ 1581 {68113, 67, 11}, /* 66 */ 1582 {71617, 68, 11}, /* 67 */ 1583 {75401, 69, 10}, /* 68 */ 1584 {79517, 70, 10}, /* 69 */ 1585 {84035, 71, 10}, /* 70 */ 1586 {89053, 72, 10}, /* 71 */ 1587 {94717, 73, 9} /* 72 */ 1588 }; 1589 1590 static void 1591 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 1592 { 1593 int cur_val, i, indx, incr; 1594 1595 cur_val = net->cwnd >> 10; 1596 indx = SCTP_HS_TABLE_SIZE - 1; 1597 #ifdef SCTP_DEBUG 1598 printf("HS CC CAlled.\n"); 1599 #endif 1600 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1601 /* normal mode */ 1602 if (net->net_ack > net->mtu) { 1603 net->cwnd += net->mtu; 1604 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1605 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 1606 } 1607 } else { 1608 net->cwnd += net->net_ack; 1609 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1610 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 1611 } 1612 } 1613 } else { 1614 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 1615 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 1616 indx = i; 1617 break; 1618 } 1619 } 1620 net->last_hs_used = indx; 1621 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 1622 net->cwnd += incr; 1623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1624 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 1625 } 1626 } 1627 } 1628 1629 static void 1630 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 1631 { 1632 int cur_val, i, indx; 1633 int old_cwnd = net->cwnd; 1634 1635 cur_val = net->cwnd >> 10; 1636 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1637 /* normal mode */ 1638 net->ssthresh = net->cwnd / 2; 1639 if (net->ssthresh < (net->mtu * 2)) { 1640 net->ssthresh = 2 * net->mtu; 1641 } 1642 net->cwnd = net->ssthresh; 1643 } else { 1644 /* drop by the proper amount */ 1645 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 1646 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 1647 net->cwnd = net->ssthresh; 1648 /* now where are we */ 1649 indx = net->last_hs_used; 1650 cur_val = net->cwnd >> 10; 1651 /* reset where we are in the table */ 1652 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1653 /* feel out of hs */ 1654 net->last_hs_used = 0; 1655 } else { 1656 for (i = indx; i >= 1; i--) { 1657 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 1658 break; 1659 } 1660 } 1661 net->last_hs_used = indx; 1662 } 1663 } 1664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1665 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 1666 } 1667 } 1668 1669 static void 1670 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 1671 struct sctp_association *asoc) 1672 { 1673 struct sctp_nets *net; 1674 1675 /* 1676 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 1677 * (net->fast_retran_loss_recovery == 0))) 1678 */ 1679 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1680 if ((asoc->fast_retran_loss_recovery == 0) || 1681 (asoc->sctp_cmt_on_off > 0)) { 1682 /* out of a RFC2582 Fast recovery window? */ 1683 if (net->net_ack > 0) { 1684 /* 1685 * per section 7.2.3, are there any 1686 * destinations that had a fast retransmit 1687 * to them. If so what we need to do is 1688 * adjust ssthresh and cwnd. 1689 */ 1690 struct sctp_tmit_chunk *lchk; 1691 1692 sctp_hs_cwnd_decrease(stcb, net); 1693 1694 lchk = TAILQ_FIRST(&asoc->send_queue); 1695 1696 net->partial_bytes_acked = 0; 1697 /* Turn on fast recovery window */ 1698 asoc->fast_retran_loss_recovery = 1; 1699 if (lchk == NULL) { 1700 /* Mark end of the window */ 1701 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1702 } else { 1703 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1704 } 1705 1706 /* 1707 * CMT fast recovery -- per destination 1708 * recovery variable. 1709 */ 1710 net->fast_retran_loss_recovery = 1; 1711 1712 if (lchk == NULL) { 1713 /* Mark end of the window */ 1714 net->fast_recovery_tsn = asoc->sending_seq - 1; 1715 } else { 1716 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1717 } 1718 1719 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1720 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 1721 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1722 stcb->sctp_ep, stcb, net); 1723 } 1724 } else if (net->net_ack > 0) { 1725 /* 1726 * Mark a peg that we WOULD have done a cwnd 1727 * reduction but RFC2582 prevented this action. 1728 */ 1729 SCTP_STAT_INCR(sctps_fastretransinrtt); 1730 } 1731 } 1732 } 1733 1734 static void 1735 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 1736 struct sctp_association *asoc, 1737 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 1738 { 1739 struct sctp_nets *net; 1740 1741 /******************************/ 1742 /* update cwnd and Early FR */ 1743 /******************************/ 1744 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1745 1746 #ifdef JANA_CMT_FAST_RECOVERY 1747 /* 1748 * CMT fast recovery code. Need to debug. 1749 */ 1750 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1751 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1752 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1753 net->will_exit_fast_recovery = 1; 1754 } 1755 } 1756 #endif 1757 /* if nothing was acked on this destination skip it */ 1758 if (net->net_ack == 0) { 1759 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1760 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1761 } 1762 continue; 1763 } 1764 #ifdef JANA_CMT_FAST_RECOVERY 1765 /* 1766 * CMT fast recovery code 1767 */ 1768 /* 1769 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1770 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1771 * } else if (sctp_cmt_on_off == 0 && 1772 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1773 */ 1774 #endif 1775 1776 if (asoc->fast_retran_loss_recovery && 1777 (will_exit == 0) && 1778 (asoc->sctp_cmt_on_off == 0)) { 1779 /* 1780 * If we are in loss recovery we skip any cwnd 1781 * update 1782 */ 1783 return; 1784 } 1785 /* 1786 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1787 * moved. 1788 */ 1789 if (accum_moved || 1790 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1791 /* If the cumulative ack moved we can proceed */ 1792 if (net->cwnd <= net->ssthresh) { 1793 /* We are in slow start */ 1794 if (net->flight_size + net->net_ack >= net->cwnd) { 1795 1796 sctp_hs_cwnd_increase(stcb, net); 1797 1798 } else { 1799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1800 sctp_log_cwnd(stcb, net, net->net_ack, 1801 SCTP_CWND_LOG_NOADV_SS); 1802 } 1803 } 1804 } else { 1805 /* We are in congestion avoidance */ 1806 net->partial_bytes_acked += net->net_ack; 1807 if ((net->flight_size + net->net_ack >= net->cwnd) && 1808 (net->partial_bytes_acked >= net->cwnd)) { 1809 net->partial_bytes_acked -= net->cwnd; 1810 net->cwnd += net->mtu; 1811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1812 sctp_log_cwnd(stcb, net, net->mtu, 1813 SCTP_CWND_LOG_FROM_CA); 1814 } 1815 } else { 1816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1817 sctp_log_cwnd(stcb, net, net->net_ack, 1818 SCTP_CWND_LOG_NOADV_CA); 1819 } 1820 } 1821 } 1822 } else { 1823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1824 sctp_log_cwnd(stcb, net, net->mtu, 1825 SCTP_CWND_LOG_NO_CUMACK); 1826 } 1827 } 1828 } 1829 } 1830 1831 1832 /* 1833 * H-TCP congestion control. The algorithm is detailed in: 1834 * R.N.Shorten, D.J.Leith: 1835 * "H-TCP: TCP for high-speed and long-distance networks" 1836 * Proc. PFLDnet, Argonne, 2004. 1837 * http://www.hamilton.ie/net/htcp3.pdf 1838 */ 1839 1840 1841 static int use_rtt_scaling = 1; 1842 static int use_bandwidth_switch = 1; 1843 1844 static inline int 1845 between(uint32_t seq1, uint32_t seq2, uint32_t seq3) 1846 { 1847 return (seq3 - seq2 >= seq1 - seq2); 1848 } 1849 1850 static inline uint32_t 1851 htcp_cong_time(struct htcp *ca) 1852 { 1853 return (sctp_get_tick_count() - ca->last_cong); 1854 } 1855 1856 static inline uint32_t 1857 htcp_ccount(struct htcp *ca) 1858 { 1859 return (htcp_cong_time(ca) / ca->minRTT); 1860 } 1861 1862 static inline void 1863 htcp_reset(struct htcp *ca) 1864 { 1865 ca->undo_last_cong = ca->last_cong; 1866 ca->undo_maxRTT = ca->maxRTT; 1867 ca->undo_old_maxB = ca->old_maxB; 1868 ca->last_cong = sctp_get_tick_count(); 1869 } 1870 1871 #ifdef SCTP_NOT_USED 1872 1873 static uint32_t 1874 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 1875 { 1876 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong; 1877 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT; 1878 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB; 1879 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu)); 1880 } 1881 1882 #endif 1883 1884 static inline void 1885 measure_rtt(struct sctp_nets *net) 1886 { 1887 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT; 1888 1889 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1890 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT) 1891 net->cc_mod.htcp_ca.minRTT = srtt; 1892 1893 /* max RTT */ 1894 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { 1895 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) 1896 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; 1897 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20)) 1898 net->cc_mod.htcp_ca.maxRTT = srtt; 1899 } 1900 } 1901 1902 static void 1903 measure_achieved_throughput(struct sctp_nets *net) 1904 { 1905 uint32_t now = sctp_get_tick_count(); 1906 1907 if (net->fast_retran_ip == 0) 1908 net->cc_mod.htcp_ca.bytes_acked = net->net_ack; 1909 1910 if (!use_bandwidth_switch) 1911 return; 1912 1913 /* achieved throughput calculations */ 1914 /* JRS - not 100% sure of this statement */ 1915 if (net->fast_retran_ip == 1) { 1916 net->cc_mod.htcp_ca.bytecount = 0; 1917 net->cc_mod.htcp_ca.lasttime = now; 1918 return; 1919 } 1920 net->cc_mod.htcp_ca.bytecount += net->net_ack; 1921 1922 if (net->cc_mod.htcp_ca.bytecount >= net->cwnd - ((net->cc_mod.htcp_ca.alpha >> 7 ? : 1) * net->mtu) 1923 && now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT 1924 && net->cc_mod.htcp_ca.minRTT > 0) { 1925 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime); 1926 1927 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) { 1928 /* just after backoff */ 1929 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi; 1930 } else { 1931 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4; 1932 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB) 1933 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi; 1934 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB) 1935 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB; 1936 } 1937 net->cc_mod.htcp_ca.bytecount = 0; 1938 net->cc_mod.htcp_ca.lasttime = now; 1939 } 1940 } 1941 1942 static inline void 1943 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1944 { 1945 if (use_bandwidth_switch) { 1946 uint32_t maxB = ca->maxB; 1947 uint32_t old_maxB = ca->old_maxB; 1948 1949 ca->old_maxB = ca->maxB; 1950 1951 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1952 ca->beta = BETA_MIN; 1953 ca->modeswitch = 0; 1954 return; 1955 } 1956 } 1957 if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) { 1958 ca->beta = (minRTT << 7) / maxRTT; 1959 if (ca->beta < BETA_MIN) 1960 ca->beta = BETA_MIN; 1961 else if (ca->beta > BETA_MAX) 1962 ca->beta = BETA_MAX; 1963 } else { 1964 ca->beta = BETA_MIN; 1965 ca->modeswitch = 1; 1966 } 1967 } 1968 1969 static inline void 1970 htcp_alpha_update(struct htcp *ca) 1971 { 1972 uint32_t minRTT = ca->minRTT; 1973 uint32_t factor = 1; 1974 uint32_t diff = htcp_cong_time(ca); 1975 1976 if (diff > (uint32_t) hz) { 1977 diff -= hz; 1978 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1979 } 1980 if (use_rtt_scaling && minRTT) { 1981 uint32_t scale = (hz << 3) / (10 * minRTT); 1982 1983 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1984 * interval [0.5,10]<<3 */ 1985 factor = (factor << 3) / scale; 1986 if (!factor) 1987 factor = 1; 1988 } 1989 ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 1990 if (!ca->alpha) 1991 ca->alpha = ALPHA_BASE; 1992 } 1993 1994 /* After we have the rtt data to calculate beta, we'd still prefer to wait one 1995 * rtt before we adjust our beta to ensure we are working from a consistent 1996 * data. 1997 * 1998 * This function should be called when we hit a congestion event since only at 1999 * that point do we really have a real sense of maxRTT (the queues en route 2000 * were getting just too full now). 2001 */ 2002 static void 2003 htcp_param_update(struct sctp_nets *net) 2004 { 2005 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT; 2006 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT; 2007 2008 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT); 2009 htcp_alpha_update(&net->cc_mod.htcp_ca); 2010 2011 /* 2012 * add slowly fading memory for maxRTT to accommodate routing 2013 * changes etc 2014 */ 2015 if (minRTT > 0 && maxRTT > minRTT) 2016 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 2017 } 2018 2019 static uint32_t 2020 htcp_recalc_ssthresh(struct sctp_nets *net) 2021 { 2022 htcp_param_update(net); 2023 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu)); 2024 } 2025 2026 static void 2027 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 2028 { 2029 /*- 2030 * How to handle these functions? 2031 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 2032 * return; 2033 */ 2034 if (net->cwnd <= net->ssthresh) { 2035 /* We are in slow start */ 2036 if (net->flight_size + net->net_ack >= net->cwnd) { 2037 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 2038 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 2039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2040 sctp_log_cwnd(stcb, net, net->mtu, 2041 SCTP_CWND_LOG_FROM_SS); 2042 } 2043 } else { 2044 net->cwnd += net->net_ack; 2045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2046 sctp_log_cwnd(stcb, net, net->net_ack, 2047 SCTP_CWND_LOG_FROM_SS); 2048 } 2049 } 2050 } else { 2051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2052 sctp_log_cwnd(stcb, net, net->net_ack, 2053 SCTP_CWND_LOG_NOADV_SS); 2054 } 2055 } 2056 } else { 2057 measure_rtt(net); 2058 2059 /* 2060 * In dangerous area, increase slowly. In theory this is 2061 * net->cwnd += alpha / net->cwnd 2062 */ 2063 /* What is snd_cwnd_cnt?? */ 2064 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 2065 /*- 2066 * Does SCTP have a cwnd clamp? 2067 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 2068 */ 2069 net->cwnd += net->mtu; 2070 net->partial_bytes_acked = 0; 2071 htcp_alpha_update(&net->cc_mod.htcp_ca); 2072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2073 sctp_log_cwnd(stcb, net, net->mtu, 2074 SCTP_CWND_LOG_FROM_CA); 2075 } 2076 } else { 2077 net->partial_bytes_acked += net->net_ack; 2078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2079 sctp_log_cwnd(stcb, net, net->net_ack, 2080 SCTP_CWND_LOG_NOADV_CA); 2081 } 2082 } 2083 2084 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2085 } 2086 } 2087 2088 #ifdef SCTP_NOT_USED 2089 /* Lower bound on congestion window. */ 2090 static uint32_t 2091 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 2092 { 2093 return (net->ssthresh); 2094 } 2095 2096 #endif 2097 2098 static void 2099 htcp_init(struct sctp_nets *net) 2100 { 2101 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp)); 2102 net->cc_mod.htcp_ca.alpha = ALPHA_BASE; 2103 net->cc_mod.htcp_ca.beta = BETA_MIN; 2104 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2105 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 2106 } 2107 2108 static void 2109 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 2110 { 2111 /* 2112 * We take the max of the burst limit times a MTU or the 2113 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 2114 */ 2115 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 2116 net->ssthresh = stcb->asoc.peers_rwnd; 2117 htcp_init(net); 2118 2119 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 2120 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 2121 } 2122 } 2123 2124 static void 2125 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 2126 struct sctp_association *asoc, 2127 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 2128 { 2129 struct sctp_nets *net; 2130 2131 /******************************/ 2132 /* update cwnd and Early FR */ 2133 /******************************/ 2134 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2135 2136 #ifdef JANA_CMT_FAST_RECOVERY 2137 /* 2138 * CMT fast recovery code. Need to debug. 2139 */ 2140 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 2141 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 2142 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 2143 net->will_exit_fast_recovery = 1; 2144 } 2145 } 2146 #endif 2147 /* if nothing was acked on this destination skip it */ 2148 if (net->net_ack == 0) { 2149 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2150 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 2151 } 2152 continue; 2153 } 2154 #ifdef JANA_CMT_FAST_RECOVERY 2155 /* 2156 * CMT fast recovery code 2157 */ 2158 /* 2159 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 2160 * && net->will_exit_fast_recovery == 0) { @@@ Do something 2161 * } else if (sctp_cmt_on_off == 0 && 2162 * asoc->fast_retran_loss_recovery && will_exit == 0) { 2163 */ 2164 #endif 2165 2166 if (asoc->fast_retran_loss_recovery && 2167 will_exit == 0 && 2168 (asoc->sctp_cmt_on_off == 0)) { 2169 /* 2170 * If we are in loss recovery we skip any cwnd 2171 * update 2172 */ 2173 return; 2174 } 2175 /* 2176 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 2177 * moved. 2178 */ 2179 if (accum_moved || 2180 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 2181 htcp_cong_avoid(stcb, net); 2182 measure_achieved_throughput(net); 2183 } else { 2184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2185 sctp_log_cwnd(stcb, net, net->mtu, 2186 SCTP_CWND_LOG_NO_CUMACK); 2187 } 2188 } 2189 } 2190 } 2191 2192 static void 2193 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 2194 struct sctp_association *asoc) 2195 { 2196 struct sctp_nets *net; 2197 2198 /* 2199 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 2200 * (net->fast_retran_loss_recovery == 0))) 2201 */ 2202 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2203 if ((asoc->fast_retran_loss_recovery == 0) || 2204 (asoc->sctp_cmt_on_off > 0)) { 2205 /* out of a RFC2582 Fast recovery window? */ 2206 if (net->net_ack > 0) { 2207 /* 2208 * per section 7.2.3, are there any 2209 * destinations that had a fast retransmit 2210 * to them. If so what we need to do is 2211 * adjust ssthresh and cwnd. 2212 */ 2213 struct sctp_tmit_chunk *lchk; 2214 int old_cwnd = net->cwnd; 2215 2216 /* JRS - reset as if state were changed */ 2217 htcp_reset(&net->cc_mod.htcp_ca); 2218 net->ssthresh = htcp_recalc_ssthresh(net); 2219 net->cwnd = net->ssthresh; 2220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2221 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 2222 SCTP_CWND_LOG_FROM_FR); 2223 } 2224 lchk = TAILQ_FIRST(&asoc->send_queue); 2225 2226 net->partial_bytes_acked = 0; 2227 /* Turn on fast recovery window */ 2228 asoc->fast_retran_loss_recovery = 1; 2229 if (lchk == NULL) { 2230 /* Mark end of the window */ 2231 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 2232 } else { 2233 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2234 } 2235 2236 /* 2237 * CMT fast recovery -- per destination 2238 * recovery variable. 2239 */ 2240 net->fast_retran_loss_recovery = 1; 2241 2242 if (lchk == NULL) { 2243 /* Mark end of the window */ 2244 net->fast_recovery_tsn = asoc->sending_seq - 1; 2245 } else { 2246 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2247 } 2248 2249 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 2250 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 2251 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 2252 stcb->sctp_ep, stcb, net); 2253 } 2254 } else if (net->net_ack > 0) { 2255 /* 2256 * Mark a peg that we WOULD have done a cwnd 2257 * reduction but RFC2582 prevented this action. 2258 */ 2259 SCTP_STAT_INCR(sctps_fastretransinrtt); 2260 } 2261 } 2262 } 2263 2264 static void 2265 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 2266 struct sctp_nets *net) 2267 { 2268 int old_cwnd = net->cwnd; 2269 2270 /* JRS - reset as if the state were being changed to timeout */ 2271 htcp_reset(&net->cc_mod.htcp_ca); 2272 net->ssthresh = htcp_recalc_ssthresh(net); 2273 net->cwnd = net->mtu; 2274 net->partial_bytes_acked = 0; 2275 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2276 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 2277 } 2278 } 2279 2280 static void 2281 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 2282 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED) 2283 { 2284 int old_cwnd; 2285 2286 old_cwnd = net->cwnd; 2287 2288 /* JRS - reset hctp as if state changed */ 2289 if (in_window == 0) { 2290 htcp_reset(&net->cc_mod.htcp_ca); 2291 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2292 net->ssthresh = htcp_recalc_ssthresh(net); 2293 if (net->ssthresh < net->mtu) { 2294 net->ssthresh = net->mtu; 2295 /* here back off the timer as well, to slow us down */ 2296 net->RTO <<= 1; 2297 } 2298 net->cwnd = net->ssthresh; 2299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2300 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2301 } 2302 } 2303 } 2304 2305 struct sctp_cc_functions sctp_cc_functions[] = { 2306 { 2307 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2308 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack, 2309 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2310 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2311 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2312 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2313 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2314 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2315 }, 2316 { 2317 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2318 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack, 2319 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2320 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr, 2321 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2322 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2323 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2324 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2325 }, 2326 { 2327 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param, 2328 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack, 2329 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2330 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr, 2331 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout, 2332 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo, 2333 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2334 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2335 }, 2336 { 2337 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param, 2338 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack, 2339 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2340 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2341 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2342 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo, 2343 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2344 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2345 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted, 2346 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged, 2347 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins, 2348 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack, 2349 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option, 2350 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated 2351 } 2352 }; 2353