1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctputil.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_input.h> 46 #include <netinet/sctp_indata.h> 47 #include <netinet/sctp_uio.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_kdtrace.h> 52 53 #define SHIFT_MPTCP_MULTI_N 40 54 #define SHIFT_MPTCP_MULTI_Z 16 55 #define SHIFT_MPTCP_MULTI 8 56 57 static void 58 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net) 59 { 60 if ((assoc->max_cwnd > 0) && 61 (net->cwnd > assoc->max_cwnd) && 62 (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { 63 net->cwnd = assoc->max_cwnd; 64 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 65 net->cwnd = net->mtu - sizeof(struct sctphdr); 66 } 67 } 68 } 69 70 static void 71 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 72 { 73 struct sctp_association *assoc; 74 uint32_t cwnd_in_mtu; 75 76 assoc = &stcb->asoc; 77 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 78 if (cwnd_in_mtu == 0) { 79 /* Using 0 means that the value of RFC 4960 is used. */ 80 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 81 } else { 82 /* 83 * We take the minimum of the burst limit and the initial 84 * congestion window. 85 */ 86 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst)) 87 cwnd_in_mtu = assoc->max_burst; 88 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 89 } 90 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 91 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 92 /* In case of resource pooling initialize appropriately */ 93 net->cwnd /= assoc->numnets; 94 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 95 net->cwnd = net->mtu - sizeof(struct sctphdr); 96 } 97 } 98 sctp_enforce_cwnd_limit(assoc, net); 99 net->ssthresh = assoc->peers_rwnd; 100 SDT_PROBE5(sctp, cwnd, net, init, 101 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 102 0, net->cwnd); 103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & 104 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 105 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 106 } 107 } 108 109 static void 110 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 111 struct sctp_association *asoc) 112 { 113 struct sctp_nets *net; 114 uint32_t t_ssthresh, t_cwnd; 115 uint64_t t_ucwnd_sbw; 116 117 /* MT FIXME: Don't compute this over and over again */ 118 t_ssthresh = 0; 119 t_cwnd = 0; 120 t_ucwnd_sbw = 0; 121 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 122 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 124 t_ssthresh += net->ssthresh; 125 t_cwnd += net->cwnd; 126 if (net->lastsa > 0) { 127 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa; 128 } 129 } 130 if (t_ucwnd_sbw == 0) { 131 t_ucwnd_sbw = 1; 132 } 133 } 134 135 /*- 136 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 137 * (net->fast_retran_loss_recovery == 0))) 138 */ 139 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 140 if ((asoc->fast_retran_loss_recovery == 0) || 141 (asoc->sctp_cmt_on_off > 0)) { 142 /* out of a RFC2582 Fast recovery window? */ 143 if (net->net_ack > 0) { 144 /* 145 * per section 7.2.3, are there any 146 * destinations that had a fast retransmit 147 * to them. If so what we need to do is 148 * adjust ssthresh and cwnd. 149 */ 150 struct sctp_tmit_chunk *lchk; 151 int old_cwnd = net->cwnd; 152 153 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 154 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 155 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) { 156 net->ssthresh = (uint32_t)(((uint64_t)4 * 157 (uint64_t)net->mtu * 158 (uint64_t)net->ssthresh) / 159 (uint64_t)t_ssthresh); 160 } 161 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) { 162 uint32_t srtt; 163 164 srtt = net->lastsa; 165 /* 166 * lastsa>>3; we don't need 167 * to devide ... 168 */ 169 if (srtt == 0) { 170 srtt = 1; 171 } 172 /* 173 * Short Version => Equal to 174 * Contel Version MBe 175 */ 176 net->ssthresh = (uint32_t)(((uint64_t)4 * 177 (uint64_t)net->mtu * 178 (uint64_t)net->cwnd) / 179 ((uint64_t)srtt * 180 t_ucwnd_sbw)); 181 /* INCREASE FACTOR */ ; 182 } 183 if ((net->cwnd > t_cwnd / 2) && 184 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 185 net->ssthresh = net->cwnd - t_cwnd / 2; 186 } 187 if (net->ssthresh < net->mtu) { 188 net->ssthresh = net->mtu; 189 } 190 } else { 191 net->ssthresh = net->cwnd / 2; 192 if (net->ssthresh < (net->mtu * 2)) { 193 net->ssthresh = 2 * net->mtu; 194 } 195 } 196 net->cwnd = net->ssthresh; 197 sctp_enforce_cwnd_limit(asoc, net); 198 SDT_PROBE5(sctp, cwnd, net, fr, 199 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 200 old_cwnd, net->cwnd); 201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 202 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 203 SCTP_CWND_LOG_FROM_FR); 204 } 205 lchk = TAILQ_FIRST(&asoc->send_queue); 206 207 net->partial_bytes_acked = 0; 208 /* Turn on fast recovery window */ 209 asoc->fast_retran_loss_recovery = 1; 210 if (lchk == NULL) { 211 /* Mark end of the window */ 212 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 213 } else { 214 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1; 215 } 216 217 /* 218 * CMT fast recovery -- per destination 219 * recovery variable. 220 */ 221 net->fast_retran_loss_recovery = 1; 222 223 if (lchk == NULL) { 224 /* Mark end of the window */ 225 net->fast_recovery_tsn = asoc->sending_seq - 1; 226 } else { 227 net->fast_recovery_tsn = lchk->rec.data.tsn - 1; 228 } 229 230 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 231 stcb->sctp_ep, stcb, net, 232 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1); 233 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 234 stcb->sctp_ep, stcb, net); 235 } 236 } else if (net->net_ack > 0) { 237 /* 238 * Mark a peg that we WOULD have done a cwnd 239 * reduction but RFC2582 prevented this action. 240 */ 241 SCTP_STAT_INCR(sctps_fastretransinrtt); 242 } 243 } 244 } 245 246 /* Defines for instantaneous bw decisions */ 247 #define SCTP_INST_LOOSING 1 /* Losing to other flows */ 248 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ 249 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */ 250 251 static int 252 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, 253 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) 254 { 255 uint64_t oth, probepoint; 256 257 probepoint = (((uint64_t)net->cwnd) << 32); 258 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 259 /* 260 * rtt increased we don't update bw.. so we don't update the 261 * rtt either. 262 */ 263 /* Probe point 5 */ 264 probepoint |= ((5 << 16) | 1); 265 SDT_PROBE5(sctp, cwnd, net, rttvar, 266 vtag, 267 ((net->cc_mod.rtcc.lbw << 32) | nbw), 268 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 269 net->flight_size, 270 probepoint); 271 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 272 if (net->cc_mod.rtcc.last_step_state == 5) 273 net->cc_mod.rtcc.step_cnt++; 274 else 275 net->cc_mod.rtcc.step_cnt = 1; 276 net->cc_mod.rtcc.last_step_state = 5; 277 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 278 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 279 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 280 /* Try a step down */ 281 oth = net->cc_mod.rtcc.vol_reduce; 282 oth <<= 16; 283 oth |= net->cc_mod.rtcc.step_cnt; 284 oth <<= 16; 285 oth |= net->cc_mod.rtcc.last_step_state; 286 SDT_PROBE5(sctp, cwnd, net, rttstep, 287 vtag, 288 ((net->cc_mod.rtcc.lbw << 32) | nbw), 289 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 290 oth, 291 probepoint); 292 if (net->cwnd > (4 * net->mtu)) { 293 net->cwnd -= net->mtu; 294 net->cc_mod.rtcc.vol_reduce++; 295 } else { 296 net->cc_mod.rtcc.step_cnt = 0; 297 } 298 } 299 } 300 return (1); 301 } 302 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 303 /* 304 * rtt decreased, there could be more room. we update both 305 * the bw and the rtt here to lock this in as a good step 306 * down. 307 */ 308 /* Probe point 6 */ 309 probepoint |= ((6 << 16) | 0); 310 SDT_PROBE5(sctp, cwnd, net, rttvar, 311 vtag, 312 ((net->cc_mod.rtcc.lbw << 32) | nbw), 313 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 314 net->flight_size, 315 probepoint); 316 if (net->cc_mod.rtcc.steady_step) { 317 oth = net->cc_mod.rtcc.vol_reduce; 318 oth <<= 16; 319 oth |= net->cc_mod.rtcc.step_cnt; 320 oth <<= 16; 321 oth |= net->cc_mod.rtcc.last_step_state; 322 SDT_PROBE5(sctp, cwnd, net, rttstep, 323 vtag, 324 ((net->cc_mod.rtcc.lbw << 32) | nbw), 325 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 326 oth, 327 probepoint); 328 if ((net->cc_mod.rtcc.last_step_state == 5) && 329 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) { 330 /* Step down worked */ 331 net->cc_mod.rtcc.step_cnt = 0; 332 return (1); 333 } else { 334 net->cc_mod.rtcc.last_step_state = 6; 335 net->cc_mod.rtcc.step_cnt = 0; 336 } 337 } 338 net->cc_mod.rtcc.lbw = nbw; 339 net->cc_mod.rtcc.lbw_rtt = net->rtt; 340 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 341 if (inst_ind == SCTP_INST_GAINING) 342 return (1); 343 else if (inst_ind == SCTP_INST_NEUTRAL) 344 return (1); 345 else 346 return (0); 347 } 348 /* 349 * Ok bw and rtt remained the same .. no update to any 350 */ 351 /* Probe point 7 */ 352 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); 353 SDT_PROBE5(sctp, cwnd, net, rttvar, 354 vtag, 355 ((net->cc_mod.rtcc.lbw << 32) | nbw), 356 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 357 net->flight_size, 358 probepoint); 359 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 360 if (net->cc_mod.rtcc.last_step_state == 5) 361 net->cc_mod.rtcc.step_cnt++; 362 else 363 net->cc_mod.rtcc.step_cnt = 1; 364 net->cc_mod.rtcc.last_step_state = 5; 365 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 366 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 367 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 368 /* Try a step down */ 369 if (net->cwnd > (4 * net->mtu)) { 370 net->cwnd -= net->mtu; 371 net->cc_mod.rtcc.vol_reduce++; 372 return (1); 373 } else { 374 net->cc_mod.rtcc.step_cnt = 0; 375 } 376 } 377 } 378 if (inst_ind == SCTP_INST_GAINING) 379 return (1); 380 else if (inst_ind == SCTP_INST_NEUTRAL) 381 return (1); 382 else 383 return ((int)net->cc_mod.rtcc.ret_from_eq); 384 } 385 386 static int 387 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, 388 uint64_t vtag, uint8_t inst_ind) 389 { 390 uint64_t oth, probepoint; 391 392 /* Bandwidth decreased. */ 393 probepoint = (((uint64_t)net->cwnd) << 32); 394 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 395 /* rtt increased */ 396 /* Did we add more */ 397 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) && 398 (inst_ind != SCTP_INST_LOOSING)) { 399 /* We caused it maybe.. back off? */ 400 /* PROBE POINT 1 */ 401 probepoint |= ((1 << 16) | 1); 402 SDT_PROBE5(sctp, cwnd, net, rttvar, 403 vtag, 404 ((net->cc_mod.rtcc.lbw << 32) | nbw), 405 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 406 net->flight_size, 407 probepoint); 408 if (net->cc_mod.rtcc.ret_from_eq) { 409 /* 410 * Switch over to CA if we are less 411 * aggressive 412 */ 413 net->ssthresh = net->cwnd - 1; 414 net->partial_bytes_acked = 0; 415 } 416 return (1); 417 } 418 /* Probe point 2 */ 419 probepoint |= ((2 << 16) | 0); 420 SDT_PROBE5(sctp, cwnd, net, rttvar, 421 vtag, 422 ((net->cc_mod.rtcc.lbw << 32) | nbw), 423 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 424 net->flight_size, 425 probepoint); 426 /* Someone else - fight for more? */ 427 if (net->cc_mod.rtcc.steady_step) { 428 oth = net->cc_mod.rtcc.vol_reduce; 429 oth <<= 16; 430 oth |= net->cc_mod.rtcc.step_cnt; 431 oth <<= 16; 432 oth |= net->cc_mod.rtcc.last_step_state; 433 SDT_PROBE5(sctp, cwnd, net, rttstep, 434 vtag, 435 ((net->cc_mod.rtcc.lbw << 32) | nbw), 436 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 437 oth, 438 probepoint); 439 /* 440 * Did we voluntarily give up some? if so take one 441 * back please 442 */ 443 if ((net->cc_mod.rtcc.vol_reduce) && 444 (inst_ind != SCTP_INST_GAINING)) { 445 net->cwnd += net->mtu; 446 sctp_enforce_cwnd_limit(&stcb->asoc, net); 447 net->cc_mod.rtcc.vol_reduce--; 448 } 449 net->cc_mod.rtcc.last_step_state = 2; 450 net->cc_mod.rtcc.step_cnt = 0; 451 } 452 goto out_decision; 453 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 454 /* bw & rtt decreased */ 455 /* Probe point 3 */ 456 probepoint |= ((3 << 16) | 0); 457 SDT_PROBE5(sctp, cwnd, net, rttvar, 458 vtag, 459 ((net->cc_mod.rtcc.lbw << 32) | nbw), 460 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 461 net->flight_size, 462 probepoint); 463 if (net->cc_mod.rtcc.steady_step) { 464 oth = net->cc_mod.rtcc.vol_reduce; 465 oth <<= 16; 466 oth |= net->cc_mod.rtcc.step_cnt; 467 oth <<= 16; 468 oth |= net->cc_mod.rtcc.last_step_state; 469 SDT_PROBE5(sctp, cwnd, net, rttstep, 470 vtag, 471 ((net->cc_mod.rtcc.lbw << 32) | nbw), 472 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 473 oth, 474 probepoint); 475 if ((net->cc_mod.rtcc.vol_reduce) && 476 (inst_ind != SCTP_INST_GAINING)) { 477 net->cwnd += net->mtu; 478 sctp_enforce_cwnd_limit(&stcb->asoc, net); 479 net->cc_mod.rtcc.vol_reduce--; 480 } 481 net->cc_mod.rtcc.last_step_state = 3; 482 net->cc_mod.rtcc.step_cnt = 0; 483 } 484 goto out_decision; 485 } 486 /* The bw decreased but rtt stayed the same */ 487 /* Probe point 4 */ 488 probepoint |= ((4 << 16) | 0); 489 SDT_PROBE5(sctp, cwnd, net, rttvar, 490 vtag, 491 ((net->cc_mod.rtcc.lbw << 32) | nbw), 492 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 493 net->flight_size, 494 probepoint); 495 if (net->cc_mod.rtcc.steady_step) { 496 oth = net->cc_mod.rtcc.vol_reduce; 497 oth <<= 16; 498 oth |= net->cc_mod.rtcc.step_cnt; 499 oth <<= 16; 500 oth |= net->cc_mod.rtcc.last_step_state; 501 SDT_PROBE5(sctp, cwnd, net, rttstep, 502 vtag, 503 ((net->cc_mod.rtcc.lbw << 32) | nbw), 504 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 505 oth, 506 probepoint); 507 if ((net->cc_mod.rtcc.vol_reduce) && 508 (inst_ind != SCTP_INST_GAINING)) { 509 net->cwnd += net->mtu; 510 sctp_enforce_cwnd_limit(&stcb->asoc, net); 511 net->cc_mod.rtcc.vol_reduce--; 512 } 513 net->cc_mod.rtcc.last_step_state = 4; 514 net->cc_mod.rtcc.step_cnt = 0; 515 } 516 out_decision: 517 net->cc_mod.rtcc.lbw = nbw; 518 net->cc_mod.rtcc.lbw_rtt = net->rtt; 519 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 520 if (inst_ind == SCTP_INST_GAINING) { 521 return (1); 522 } else { 523 return (0); 524 } 525 } 526 527 static int 528 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag) 529 { 530 uint64_t oth, probepoint; 531 532 /* 533 * BW increased, so update and return 0, since all actions in our 534 * table say to do the normal CC update. Note that we pay no 535 * attention to the inst_ind since our overall sum is increasing. 536 */ 537 /* PROBE POINT 0 */ 538 probepoint = (((uint64_t)net->cwnd) << 32); 539 SDT_PROBE5(sctp, cwnd, net, rttvar, 540 vtag, 541 ((net->cc_mod.rtcc.lbw << 32) | nbw), 542 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 543 net->flight_size, 544 probepoint); 545 if (net->cc_mod.rtcc.steady_step) { 546 oth = net->cc_mod.rtcc.vol_reduce; 547 oth <<= 16; 548 oth |= net->cc_mod.rtcc.step_cnt; 549 oth <<= 16; 550 oth |= net->cc_mod.rtcc.last_step_state; 551 SDT_PROBE5(sctp, cwnd, net, rttstep, 552 vtag, 553 ((net->cc_mod.rtcc.lbw << 32) | nbw), 554 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 555 oth, 556 probepoint); 557 net->cc_mod.rtcc.last_step_state = 0; 558 net->cc_mod.rtcc.step_cnt = 0; 559 net->cc_mod.rtcc.vol_reduce = 0; 560 } 561 net->cc_mod.rtcc.lbw = nbw; 562 net->cc_mod.rtcc.lbw_rtt = net->rtt; 563 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 564 return (0); 565 } 566 567 /* RTCC Algorithm to limit growth of cwnd, return 568 * true if you want to NOT allow cwnd growth 569 */ 570 static int 571 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) 572 { 573 uint64_t bw_offset, rtt_offset; 574 uint64_t probepoint, rtt, vtag; 575 uint64_t bytes_for_this_rtt, inst_bw; 576 uint64_t div, inst_off; 577 int bw_shift; 578 uint8_t inst_ind; 579 int ret; 580 581 /*- 582 * Here we need to see if we want 583 * to limit cwnd growth due to increase 584 * in overall rtt but no increase in bw. 585 * We use the following table to figure 586 * out what we should do. When we return 587 * 0, cc update goes on as planned. If we 588 * return 1, then no cc update happens and cwnd 589 * stays where it is at. 590 * ---------------------------------- 591 * BW | RTT | Action 592 * ********************************* 593 * INC | INC | return 0 594 * ---------------------------------- 595 * INC | SAME | return 0 596 * ---------------------------------- 597 * INC | DECR | return 0 598 * ---------------------------------- 599 * SAME | INC | return 1 600 * ---------------------------------- 601 * SAME | SAME | return 1 602 * ---------------------------------- 603 * SAME | DECR | return 0 604 * ---------------------------------- 605 * DECR | INC | return 0 or 1 based on if we caused. 606 * ---------------------------------- 607 * DECR | SAME | return 0 608 * ---------------------------------- 609 * DECR | DECR | return 0 610 * ---------------------------------- 611 * 612 * We are a bit fuzz on what an increase or 613 * decrease is. For BW it is the same if 614 * it did not change within 1/64th. For 615 * RTT it stayed the same if it did not 616 * change within 1/32nd 617 */ 618 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); 619 rtt = stcb->asoc.my_vtag; 620 vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); 621 probepoint = (((uint64_t)net->cwnd) << 32); 622 rtt = net->rtt; 623 if (net->cc_mod.rtcc.rtt_set_this_sack) { 624 net->cc_mod.rtcc.rtt_set_this_sack = 0; 625 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc; 626 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 627 if (net->rtt) { 628 div = net->rtt / 1000; 629 if (div) { 630 inst_bw = bytes_for_this_rtt / div; 631 inst_off = inst_bw >> bw_shift; 632 if (inst_bw > nbw) 633 inst_ind = SCTP_INST_GAINING; 634 else if ((inst_bw + inst_off) < nbw) 635 inst_ind = SCTP_INST_LOOSING; 636 else 637 inst_ind = SCTP_INST_NEUTRAL; 638 probepoint |= ((0xb << 16) | inst_ind); 639 } else { 640 inst_ind = net->cc_mod.rtcc.last_inst_ind; 641 inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt); 642 /* Can't determine do not change */ 643 probepoint |= ((0xc << 16) | inst_ind); 644 } 645 } else { 646 inst_ind = net->cc_mod.rtcc.last_inst_ind; 647 inst_bw = bytes_for_this_rtt; 648 /* Can't determine do not change */ 649 probepoint |= ((0xd << 16) | inst_ind); 650 } 651 SDT_PROBE5(sctp, cwnd, net, rttvar, 652 vtag, 653 ((nbw << 32) | inst_bw), 654 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt), 655 net->flight_size, 656 probepoint); 657 } else { 658 /* No rtt measurement, use last one */ 659 inst_ind = net->cc_mod.rtcc.last_inst_ind; 660 } 661 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; 662 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { 663 ret = cc_bw_increase(stcb, net, nbw, vtag); 664 goto out; 665 } 666 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); 667 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { 668 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); 669 goto out; 670 } 671 /* 672 * If we reach here then we are in a situation where the bw stayed 673 * the same. 674 */ 675 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); 676 out: 677 net->cc_mod.rtcc.last_inst_ind = inst_ind; 678 return (ret); 679 } 680 681 static void 682 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, 683 struct sctp_association *asoc, 684 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc) 685 { 686 struct sctp_nets *net; 687 int old_cwnd; 688 uint32_t t_ssthresh, t_cwnd, incr; 689 uint64_t t_ucwnd_sbw; 690 uint64_t t_path_mptcp; 691 uint64_t mptcp_like_alpha; 692 uint32_t srtt; 693 uint64_t max_path; 694 695 /* MT FIXME: Don't compute this over and over again */ 696 t_ssthresh = 0; 697 t_cwnd = 0; 698 t_ucwnd_sbw = 0; 699 t_path_mptcp = 0; 700 mptcp_like_alpha = 1; 701 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 702 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) || 703 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) { 704 max_path = 0; 705 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 706 t_ssthresh += net->ssthresh; 707 t_cwnd += net->cwnd; 708 /* lastsa>>3; we don't need to devide ... */ 709 srtt = net->lastsa; 710 if (srtt > 0) { 711 uint64_t tmp; 712 713 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt; 714 t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) / 715 (((uint64_t)net->mtu) * (uint64_t)srtt); 716 tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) / 717 ((uint64_t)net->mtu * (uint64_t)(srtt * srtt)); 718 if (tmp > max_path) { 719 max_path = tmp; 720 } 721 } 722 } 723 if (t_path_mptcp > 0) { 724 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp); 725 } else { 726 mptcp_like_alpha = 1; 727 } 728 } 729 if (t_ssthresh == 0) { 730 t_ssthresh = 1; 731 } 732 if (t_ucwnd_sbw == 0) { 733 t_ucwnd_sbw = 1; 734 } 735 /******************************/ 736 /* update cwnd and Early FR */ 737 /******************************/ 738 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 739 #ifdef JANA_CMT_FAST_RECOVERY 740 /* 741 * CMT fast recovery code. Need to debug. 742 */ 743 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 744 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 745 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 746 net->will_exit_fast_recovery = 1; 747 } 748 } 749 #endif 750 /* if nothing was acked on this destination skip it */ 751 if (net->net_ack == 0) { 752 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 753 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 754 } 755 continue; 756 } 757 #ifdef JANA_CMT_FAST_RECOVERY 758 /* 759 * CMT fast recovery code 760 */ 761 /* 762 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 763 * && net->will_exit_fast_recovery == 0) { @@@ Do something 764 * } else if (sctp_cmt_on_off == 0 && 765 * asoc->fast_retran_loss_recovery && will_exit == 0) { 766 */ 767 #endif 768 769 if (asoc->fast_retran_loss_recovery && 770 (will_exit == 0) && 771 (asoc->sctp_cmt_on_off == 0)) { 772 /* 773 * If we are in loss recovery we skip any cwnd 774 * update 775 */ 776 return; 777 } 778 /* 779 * Did any measurements go on for this network? 780 */ 781 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) { 782 uint64_t nbw; 783 784 /* 785 * At this point our bw_bytes has been updated by 786 * incoming sack information. 787 * 788 * But our bw may not yet be set. 789 * 790 */ 791 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) { 792 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000); 793 } else { 794 nbw = net->cc_mod.rtcc.bw_bytes; 795 } 796 if (net->cc_mod.rtcc.lbw) { 797 if (cc_bw_limit(stcb, net, nbw)) { 798 /* Hold here, no update */ 799 continue; 800 } 801 } else { 802 uint64_t vtag, probepoint; 803 804 probepoint = (((uint64_t)net->cwnd) << 32); 805 probepoint |= ((0xa << 16) | 0); 806 vtag = (net->rtt << 32) | 807 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | 808 (stcb->rport); 809 810 SDT_PROBE5(sctp, cwnd, net, rttvar, 811 vtag, 812 nbw, 813 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 814 net->flight_size, 815 probepoint); 816 net->cc_mod.rtcc.lbw = nbw; 817 net->cc_mod.rtcc.lbw_rtt = net->rtt; 818 if (net->cc_mod.rtcc.rtt_set_this_sack) { 819 net->cc_mod.rtcc.rtt_set_this_sack = 0; 820 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 821 } 822 } 823 } 824 /* 825 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 826 * moved. 827 */ 828 if (accum_moved || 829 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 830 /* If the cumulative ack moved we can proceed */ 831 if (net->cwnd <= net->ssthresh) { 832 /* We are in slow start */ 833 if (net->flight_size + net->net_ack >= net->cwnd) { 834 uint32_t limit; 835 836 old_cwnd = net->cwnd; 837 switch (asoc->sctp_cmt_on_off) { 838 case SCTP_CMT_RPV1: 839 limit = (uint32_t)(((uint64_t)net->mtu * 840 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 841 (uint64_t)net->ssthresh) / 842 (uint64_t)t_ssthresh); 843 incr = (uint32_t)(((uint64_t)net->net_ack * 844 (uint64_t)net->ssthresh) / 845 (uint64_t)t_ssthresh); 846 if (incr > limit) { 847 incr = limit; 848 } 849 if (incr == 0) { 850 incr = 1; 851 } 852 break; 853 case SCTP_CMT_RPV2: 854 /* 855 * lastsa>>3; we don't need 856 * to divide ... 857 */ 858 srtt = net->lastsa; 859 if (srtt == 0) { 860 srtt = 1; 861 } 862 limit = (uint32_t)(((uint64_t)net->mtu * 863 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 864 (uint64_t)net->cwnd) / 865 ((uint64_t)srtt * t_ucwnd_sbw)); 866 /* INCREASE FACTOR */ 867 incr = (uint32_t)(((uint64_t)net->net_ack * 868 (uint64_t)net->cwnd) / 869 ((uint64_t)srtt * t_ucwnd_sbw)); 870 /* INCREASE FACTOR */ 871 if (incr > limit) { 872 incr = limit; 873 } 874 if (incr == 0) { 875 incr = 1; 876 } 877 break; 878 case SCTP_CMT_MPTCP: 879 limit = (uint32_t)(((uint64_t)net->mtu * 880 mptcp_like_alpha * 881 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >> 882 SHIFT_MPTCP_MULTI); 883 incr = (uint32_t)(((uint64_t)net->net_ack * 884 mptcp_like_alpha) >> 885 SHIFT_MPTCP_MULTI); 886 if (incr > limit) { 887 incr = limit; 888 } 889 if (incr > net->net_ack) { 890 incr = net->net_ack; 891 } 892 if (incr > net->mtu) { 893 incr = net->mtu; 894 } 895 break; 896 default: 897 incr = net->net_ack; 898 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) { 899 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable); 900 } 901 break; 902 } 903 net->cwnd += incr; 904 sctp_enforce_cwnd_limit(asoc, net); 905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 906 sctp_log_cwnd(stcb, net, incr, 907 SCTP_CWND_LOG_FROM_SS); 908 } 909 SDT_PROBE5(sctp, cwnd, net, ack, 910 stcb->asoc.my_vtag, 911 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 912 net, 913 old_cwnd, net->cwnd); 914 } else { 915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 916 sctp_log_cwnd(stcb, net, net->net_ack, 917 SCTP_CWND_LOG_NOADV_SS); 918 } 919 } 920 } else { 921 /* We are in congestion avoidance */ 922 /* 923 * Add to pba 924 */ 925 net->partial_bytes_acked += net->net_ack; 926 927 if ((net->flight_size + net->net_ack >= net->cwnd) && 928 (net->partial_bytes_acked >= net->cwnd)) { 929 net->partial_bytes_acked -= net->cwnd; 930 old_cwnd = net->cwnd; 931 switch (asoc->sctp_cmt_on_off) { 932 case SCTP_CMT_RPV1: 933 incr = (uint32_t)(((uint64_t)net->mtu * 934 (uint64_t)net->ssthresh) / 935 (uint64_t)t_ssthresh); 936 if (incr == 0) { 937 incr = 1; 938 } 939 break; 940 case SCTP_CMT_RPV2: 941 /* 942 * lastsa>>3; we don't need 943 * to divide ... 944 */ 945 srtt = net->lastsa; 946 if (srtt == 0) { 947 srtt = 1; 948 } 949 incr = (uint32_t)((uint64_t)net->mtu * 950 (uint64_t)net->cwnd / 951 ((uint64_t)srtt * 952 t_ucwnd_sbw)); 953 /* INCREASE FACTOR */ 954 if (incr == 0) { 955 incr = 1; 956 } 957 break; 958 case SCTP_CMT_MPTCP: 959 incr = (uint32_t)((mptcp_like_alpha * 960 (uint64_t)net->cwnd) >> 961 SHIFT_MPTCP_MULTI); 962 if (incr > net->mtu) { 963 incr = net->mtu; 964 } 965 break; 966 default: 967 incr = net->mtu; 968 break; 969 } 970 net->cwnd += incr; 971 sctp_enforce_cwnd_limit(asoc, net); 972 SDT_PROBE5(sctp, cwnd, net, ack, 973 stcb->asoc.my_vtag, 974 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 975 net, 976 old_cwnd, net->cwnd); 977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 978 sctp_log_cwnd(stcb, net, net->mtu, 979 SCTP_CWND_LOG_FROM_CA); 980 } 981 } else { 982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 983 sctp_log_cwnd(stcb, net, net->net_ack, 984 SCTP_CWND_LOG_NOADV_CA); 985 } 986 } 987 } 988 } else { 989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 990 sctp_log_cwnd(stcb, net, net->mtu, 991 SCTP_CWND_LOG_NO_CUMACK); 992 } 993 } 994 } 995 } 996 997 static void 998 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net) 999 { 1000 int old_cwnd; 1001 1002 old_cwnd = net->cwnd; 1003 net->cwnd = net->mtu; 1004 SDT_PROBE5(sctp, cwnd, net, ack, 1005 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 1006 old_cwnd, net->cwnd); 1007 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 1008 (void *)net, net->cwnd); 1009 } 1010 1011 static void 1012 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 1013 { 1014 int old_cwnd = net->cwnd; 1015 uint32_t t_ssthresh, t_cwnd; 1016 uint64_t t_ucwnd_sbw; 1017 1018 /* MT FIXME: Don't compute this over and over again */ 1019 t_ssthresh = 0; 1020 t_cwnd = 0; 1021 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 1022 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 1023 struct sctp_nets *lnet; 1024 uint32_t srtt; 1025 1026 t_ucwnd_sbw = 0; 1027 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1028 t_ssthresh += lnet->ssthresh; 1029 t_cwnd += lnet->cwnd; 1030 srtt = lnet->lastsa; 1031 /* lastsa>>3; we don't need to divide ... */ 1032 if (srtt > 0) { 1033 t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt; 1034 } 1035 } 1036 if (t_ssthresh < 1) { 1037 t_ssthresh = 1; 1038 } 1039 if (t_ucwnd_sbw < 1) { 1040 t_ucwnd_sbw = 1; 1041 } 1042 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) { 1043 net->ssthresh = (uint32_t)(((uint64_t)4 * 1044 (uint64_t)net->mtu * 1045 (uint64_t)net->ssthresh) / 1046 (uint64_t)t_ssthresh); 1047 } else { 1048 uint64_t cc_delta; 1049 1050 srtt = net->lastsa; 1051 /* lastsa>>3; we don't need to divide ... */ 1052 if (srtt == 0) { 1053 srtt = 1; 1054 } 1055 cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2; 1056 if (cc_delta < t_cwnd) { 1057 net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta); 1058 } else { 1059 net->ssthresh = net->mtu; 1060 } 1061 } 1062 if ((net->cwnd > t_cwnd / 2) && 1063 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 1064 net->ssthresh = net->cwnd - t_cwnd / 2; 1065 } 1066 if (net->ssthresh < net->mtu) { 1067 net->ssthresh = net->mtu; 1068 } 1069 } else { 1070 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 1071 } 1072 net->cwnd = net->mtu; 1073 net->partial_bytes_acked = 0; 1074 SDT_PROBE5(sctp, cwnd, net, to, 1075 stcb->asoc.my_vtag, 1076 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1077 net, 1078 old_cwnd, net->cwnd); 1079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1080 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 1081 } 1082 } 1083 1084 static void 1085 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, 1086 int in_window, int num_pkt_lost, int use_rtcc) 1087 { 1088 int old_cwnd = net->cwnd; 1089 1090 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { 1091 /* Data center Congestion Control */ 1092 if (in_window == 0) { 1093 /* 1094 * Go to CA with the cwnd at the point we sent the 1095 * TSN that was marked with a CE. 1096 */ 1097 if (net->ecn_prev_cwnd < net->cwnd) { 1098 /* Restore to prev cwnd */ 1099 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost); 1100 } else { 1101 /* Just cut in 1/2 */ 1102 net->cwnd /= 2; 1103 } 1104 /* Drop to CA */ 1105 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); 1106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1107 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1108 } 1109 } else { 1110 /* 1111 * Further tuning down required over the drastic 1112 * original cut 1113 */ 1114 net->ssthresh -= (net->mtu * num_pkt_lost); 1115 net->cwnd -= (net->mtu * num_pkt_lost); 1116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1117 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1118 } 1119 } 1120 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1121 } else { 1122 if (in_window == 0) { 1123 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1124 net->ssthresh = net->cwnd / 2; 1125 if (net->ssthresh < net->mtu) { 1126 net->ssthresh = net->mtu; 1127 /* 1128 * here back off the timer as well, to slow 1129 * us down 1130 */ 1131 net->RTO <<= 1; 1132 } 1133 net->cwnd = net->ssthresh; 1134 SDT_PROBE5(sctp, cwnd, net, ecn, 1135 stcb->asoc.my_vtag, 1136 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1137 net, 1138 old_cwnd, net->cwnd); 1139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1140 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1141 } 1142 } 1143 } 1144 1145 } 1146 1147 static void 1148 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 1149 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 1150 uint32_t *bottle_bw, uint32_t *on_queue) 1151 { 1152 uint32_t bw_avail; 1153 unsigned int incr; 1154 int old_cwnd = net->cwnd; 1155 1156 /* get bottle neck bw */ 1157 *bottle_bw = ntohl(cp->bottle_bw); 1158 /* and whats on queue */ 1159 *on_queue = ntohl(cp->current_onq); 1160 /* 1161 * adjust the on-queue if our flight is more it could be that the 1162 * router has not yet gotten data "in-flight" to it 1163 */ 1164 if (*on_queue < net->flight_size) { 1165 *on_queue = net->flight_size; 1166 } 1167 /* rtt is measured in micro seconds, bottle_bw in bytes per second */ 1168 bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000); 1169 if (bw_avail > *bottle_bw) { 1170 /* 1171 * Cap the growth to no more than the bottle neck. This can 1172 * happen as RTT slides up due to queues. It also means if 1173 * you have more than a 1 second RTT with a empty queue you 1174 * will be limited to the bottle_bw per second no matter if 1175 * other points have 1/2 the RTT and you could get more 1176 * out... 1177 */ 1178 bw_avail = *bottle_bw; 1179 } 1180 if (*on_queue > bw_avail) { 1181 /* 1182 * No room for anything else don't allow anything else to be 1183 * "added to the fire". 1184 */ 1185 int seg_inflight, seg_onqueue, my_portion; 1186 1187 net->partial_bytes_acked = 0; 1188 /* how much are we over queue size? */ 1189 incr = *on_queue - bw_avail; 1190 if (stcb->asoc.seen_a_sack_this_pkt) { 1191 /* 1192 * undo any cwnd adjustment that the sack might have 1193 * made 1194 */ 1195 net->cwnd = net->prev_cwnd; 1196 } 1197 /* Now how much of that is mine? */ 1198 seg_inflight = net->flight_size / net->mtu; 1199 seg_onqueue = *on_queue / net->mtu; 1200 my_portion = (incr * seg_inflight) / seg_onqueue; 1201 1202 /* Have I made an adjustment already */ 1203 if (net->cwnd > net->flight_size) { 1204 /* 1205 * for this flight I made an adjustment we need to 1206 * decrease the portion by a share our previous 1207 * adjustment. 1208 */ 1209 int diff_adj; 1210 1211 diff_adj = net->cwnd - net->flight_size; 1212 if (diff_adj > my_portion) 1213 my_portion = 0; 1214 else 1215 my_portion -= diff_adj; 1216 } 1217 /* 1218 * back down to the previous cwnd (assume we have had a sack 1219 * before this packet). minus what ever portion of the 1220 * overage is my fault. 1221 */ 1222 net->cwnd -= my_portion; 1223 1224 /* we will NOT back down more than 1 MTU */ 1225 if (net->cwnd <= net->mtu) { 1226 net->cwnd = net->mtu; 1227 } 1228 /* force into CA */ 1229 net->ssthresh = net->cwnd - 1; 1230 } else { 1231 /* 1232 * Take 1/4 of the space left or max burst up .. whichever 1233 * is less. 1234 */ 1235 incr = (bw_avail - *on_queue) >> 2; 1236 if ((stcb->asoc.max_burst > 0) && 1237 (stcb->asoc.max_burst * net->mtu < incr)) { 1238 incr = stcb->asoc.max_burst * net->mtu; 1239 } 1240 net->cwnd += incr; 1241 } 1242 if (net->cwnd > bw_avail) { 1243 /* We can't exceed the pipe size */ 1244 net->cwnd = bw_avail; 1245 } 1246 if (net->cwnd < net->mtu) { 1247 /* We always have 1 MTU */ 1248 net->cwnd = net->mtu; 1249 } 1250 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1251 if (net->cwnd - old_cwnd != 0) { 1252 /* log only changes */ 1253 SDT_PROBE5(sctp, cwnd, net, pd, 1254 stcb->asoc.my_vtag, 1255 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1256 net, 1257 old_cwnd, net->cwnd); 1258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1259 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 1260 SCTP_CWND_LOG_FROM_SAT); 1261 } 1262 } 1263 } 1264 1265 static void 1266 sctp_cwnd_update_after_output(struct sctp_tcb *stcb, 1267 struct sctp_nets *net, int burst_limit) 1268 { 1269 int old_cwnd = net->cwnd; 1270 1271 if (net->ssthresh < net->cwnd) 1272 net->ssthresh = net->cwnd; 1273 if (burst_limit) { 1274 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 1275 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1276 SDT_PROBE5(sctp, cwnd, net, bl, 1277 stcb->asoc.my_vtag, 1278 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1279 net, 1280 old_cwnd, net->cwnd); 1281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1282 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 1283 } 1284 } 1285 } 1286 1287 static void 1288 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1289 struct sctp_association *asoc, 1290 int accum_moved, int reneged_all, int will_exit) 1291 { 1292 /* Passing a zero argument in last disables the rtcc algorithm */ 1293 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); 1294 } 1295 1296 static void 1297 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1298 int in_window, int num_pkt_lost) 1299 { 1300 /* Passing a zero argument in last disables the rtcc algorithm */ 1301 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); 1302 } 1303 1304 /* Here starts the RTCCVAR type CC invented by RRS which 1305 * is a slight mod to RFC2581. We reuse a common routine or 1306 * two since these algorithms are so close and need to 1307 * remain the same. 1308 */ 1309 static void 1310 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1311 int in_window, int num_pkt_lost) 1312 { 1313 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); 1314 } 1315 1316 static 1317 void 1318 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, 1319 struct sctp_tmit_chunk *tp1) 1320 { 1321 net->cc_mod.rtcc.bw_bytes += tp1->send_size; 1322 } 1323 1324 static void 1325 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, 1326 struct sctp_nets *net) 1327 { 1328 if (net->cc_mod.rtcc.tls_needs_set > 0) { 1329 /* We had a bw measurment going on */ 1330 struct timeval ltls; 1331 1332 SCTP_GETPTIME_TIMEVAL(<ls); 1333 timevalsub(<ls, &net->cc_mod.rtcc.tls); 1334 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec; 1335 } 1336 } 1337 1338 static void 1339 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, 1340 struct sctp_nets *net) 1341 { 1342 uint64_t vtag, probepoint; 1343 1344 if (net->cc_mod.rtcc.lbw) { 1345 /* Clear the old bw.. we went to 0 in-flight */ 1346 vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | 1347 (stcb->rport); 1348 probepoint = (((uint64_t)net->cwnd) << 32); 1349 /* Probe point 8 */ 1350 probepoint |= ((8 << 16) | 0); 1351 SDT_PROBE5(sctp, cwnd, net, rttvar, 1352 vtag, 1353 ((net->cc_mod.rtcc.lbw << 32) | 0), 1354 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 1355 net->flight_size, 1356 probepoint); 1357 net->cc_mod.rtcc.lbw_rtt = 0; 1358 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1359 net->cc_mod.rtcc.lbw = 0; 1360 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1361 net->cc_mod.rtcc.vol_reduce = 0; 1362 net->cc_mod.rtcc.bw_tot_time = 0; 1363 net->cc_mod.rtcc.bw_bytes = 0; 1364 net->cc_mod.rtcc.tls_needs_set = 0; 1365 if (net->cc_mod.rtcc.steady_step) { 1366 net->cc_mod.rtcc.vol_reduce = 0; 1367 net->cc_mod.rtcc.step_cnt = 0; 1368 net->cc_mod.rtcc.last_step_state = 0; 1369 } 1370 if (net->cc_mod.rtcc.ret_from_eq) { 1371 /* less aggressive one - reset cwnd too */ 1372 uint32_t cwnd_in_mtu, cwnd; 1373 1374 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 1375 if (cwnd_in_mtu == 0) { 1376 /* 1377 * Using 0 means that the value of RFC 4960 1378 * is used. 1379 */ 1380 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1381 } else { 1382 /* 1383 * We take the minimum of the burst limit 1384 * and the initial congestion window. 1385 */ 1386 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst)) 1387 cwnd_in_mtu = stcb->asoc.max_burst; 1388 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 1389 } 1390 if (net->cwnd > cwnd) { 1391 /* 1392 * Only set if we are not a timeout (i.e. 1393 * down to 1 mtu) 1394 */ 1395 net->cwnd = cwnd; 1396 } 1397 } 1398 } 1399 } 1400 1401 static void 1402 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, 1403 struct sctp_nets *net) 1404 { 1405 uint64_t vtag, probepoint; 1406 1407 sctp_set_initial_cc_param(stcb, net); 1408 stcb->asoc.use_precise_time = 1; 1409 probepoint = (((uint64_t)net->cwnd) << 32); 1410 probepoint |= ((9 << 16) | 0); 1411 vtag = (net->rtt << 32) | 1412 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | 1413 (stcb->rport); 1414 SDT_PROBE5(sctp, cwnd, net, rttvar, 1415 vtag, 1416 0, 1417 0, 1418 0, 1419 probepoint); 1420 net->cc_mod.rtcc.lbw_rtt = 0; 1421 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1422 net->cc_mod.rtcc.vol_reduce = 0; 1423 net->cc_mod.rtcc.lbw = 0; 1424 net->cc_mod.rtcc.vol_reduce = 0; 1425 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1426 net->cc_mod.rtcc.bw_tot_time = 0; 1427 net->cc_mod.rtcc.bw_bytes = 0; 1428 net->cc_mod.rtcc.tls_needs_set = 0; 1429 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret); 1430 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step); 1431 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn); 1432 net->cc_mod.rtcc.step_cnt = 0; 1433 net->cc_mod.rtcc.last_step_state = 0; 1434 1435 } 1436 1437 static int 1438 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, 1439 struct sctp_cc_option *cc_opt) 1440 { 1441 struct sctp_nets *net; 1442 1443 if (setorget == 1) { 1444 /* a set */ 1445 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1446 if ((cc_opt->aid_value.assoc_value != 0) && 1447 (cc_opt->aid_value.assoc_value != 1)) { 1448 return (EINVAL); 1449 } 1450 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1451 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value; 1452 } 1453 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1454 if ((cc_opt->aid_value.assoc_value != 0) && 1455 (cc_opt->aid_value.assoc_value != 1)) { 1456 return (EINVAL); 1457 } 1458 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1459 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value; 1460 } 1461 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1462 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1463 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value; 1464 } 1465 } else { 1466 return (EINVAL); 1467 } 1468 } else { 1469 /* a get */ 1470 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1471 net = TAILQ_FIRST(&stcb->asoc.nets); 1472 if (net == NULL) { 1473 return (EFAULT); 1474 } 1475 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq; 1476 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1477 net = TAILQ_FIRST(&stcb->asoc.nets); 1478 if (net == NULL) { 1479 return (EFAULT); 1480 } 1481 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn; 1482 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1483 net = TAILQ_FIRST(&stcb->asoc.nets); 1484 if (net == NULL) { 1485 return (EFAULT); 1486 } 1487 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step; 1488 } else { 1489 return (EINVAL); 1490 } 1491 } 1492 return (0); 1493 } 1494 1495 static void 1496 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED, 1497 struct sctp_nets *net) 1498 { 1499 if (net->cc_mod.rtcc.tls_needs_set == 0) { 1500 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls); 1501 net->cc_mod.rtcc.tls_needs_set = 2; 1502 } 1503 } 1504 1505 static void 1506 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, 1507 struct sctp_association *asoc, 1508 int accum_moved, int reneged_all, int will_exit) 1509 { 1510 /* Passing a one argument at the last enables the rtcc algorithm */ 1511 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); 1512 } 1513 1514 static void 1515 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED, 1516 struct sctp_nets *net, 1517 struct timeval *now SCTP_UNUSED) 1518 { 1519 net->cc_mod.rtcc.rtt_set_this_sack = 1; 1520 } 1521 1522 /* Here starts Sally Floyds HS-TCP */ 1523 1524 struct sctp_hs_raise_drop { 1525 int32_t cwnd; 1526 int8_t increase; 1527 int8_t drop_percent; 1528 }; 1529 1530 #define SCTP_HS_TABLE_SIZE 73 1531 1532 static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 1533 {38, 1, 50}, /* 0 */ 1534 {118, 2, 44}, /* 1 */ 1535 {221, 3, 41}, /* 2 */ 1536 {347, 4, 38}, /* 3 */ 1537 {495, 5, 37}, /* 4 */ 1538 {663, 6, 35}, /* 5 */ 1539 {851, 7, 34}, /* 6 */ 1540 {1058, 8, 33}, /* 7 */ 1541 {1284, 9, 32}, /* 8 */ 1542 {1529, 10, 31}, /* 9 */ 1543 {1793, 11, 30}, /* 10 */ 1544 {2076, 12, 29}, /* 11 */ 1545 {2378, 13, 28}, /* 12 */ 1546 {2699, 14, 28}, /* 13 */ 1547 {3039, 15, 27}, /* 14 */ 1548 {3399, 16, 27}, /* 15 */ 1549 {3778, 17, 26}, /* 16 */ 1550 {4177, 18, 26}, /* 17 */ 1551 {4596, 19, 25}, /* 18 */ 1552 {5036, 20, 25}, /* 19 */ 1553 {5497, 21, 24}, /* 20 */ 1554 {5979, 22, 24}, /* 21 */ 1555 {6483, 23, 23}, /* 22 */ 1556 {7009, 24, 23}, /* 23 */ 1557 {7558, 25, 22}, /* 24 */ 1558 {8130, 26, 22}, /* 25 */ 1559 {8726, 27, 22}, /* 26 */ 1560 {9346, 28, 21}, /* 27 */ 1561 {9991, 29, 21}, /* 28 */ 1562 {10661, 30, 21}, /* 29 */ 1563 {11358, 31, 20}, /* 30 */ 1564 {12082, 32, 20}, /* 31 */ 1565 {12834, 33, 20}, /* 32 */ 1566 {13614, 34, 19}, /* 33 */ 1567 {14424, 35, 19}, /* 34 */ 1568 {15265, 36, 19}, /* 35 */ 1569 {16137, 37, 19}, /* 36 */ 1570 {17042, 38, 18}, /* 37 */ 1571 {17981, 39, 18}, /* 38 */ 1572 {18955, 40, 18}, /* 39 */ 1573 {19965, 41, 17}, /* 40 */ 1574 {21013, 42, 17}, /* 41 */ 1575 {22101, 43, 17}, /* 42 */ 1576 {23230, 44, 17}, /* 43 */ 1577 {24402, 45, 16}, /* 44 */ 1578 {25618, 46, 16}, /* 45 */ 1579 {26881, 47, 16}, /* 46 */ 1580 {28193, 48, 16}, /* 47 */ 1581 {29557, 49, 15}, /* 48 */ 1582 {30975, 50, 15}, /* 49 */ 1583 {32450, 51, 15}, /* 50 */ 1584 {33986, 52, 15}, /* 51 */ 1585 {35586, 53, 14}, /* 52 */ 1586 {37253, 54, 14}, /* 53 */ 1587 {38992, 55, 14}, /* 54 */ 1588 {40808, 56, 14}, /* 55 */ 1589 {42707, 57, 13}, /* 56 */ 1590 {44694, 58, 13}, /* 57 */ 1591 {46776, 59, 13}, /* 58 */ 1592 {48961, 60, 13}, /* 59 */ 1593 {51258, 61, 13}, /* 60 */ 1594 {53677, 62, 12}, /* 61 */ 1595 {56230, 63, 12}, /* 62 */ 1596 {58932, 64, 12}, /* 63 */ 1597 {61799, 65, 12}, /* 64 */ 1598 {64851, 66, 11}, /* 65 */ 1599 {68113, 67, 11}, /* 66 */ 1600 {71617, 68, 11}, /* 67 */ 1601 {75401, 69, 10}, /* 68 */ 1602 {79517, 70, 10}, /* 69 */ 1603 {84035, 71, 10}, /* 70 */ 1604 {89053, 72, 10}, /* 71 */ 1605 {94717, 73, 9} /* 72 */ 1606 }; 1607 1608 static void 1609 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 1610 { 1611 int cur_val, i, indx, incr; 1612 int old_cwnd = net->cwnd; 1613 1614 cur_val = net->cwnd >> 10; 1615 indx = SCTP_HS_TABLE_SIZE - 1; 1616 1617 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1618 /* normal mode */ 1619 if (net->net_ack > net->mtu) { 1620 net->cwnd += net->mtu; 1621 } else { 1622 net->cwnd += net->net_ack; 1623 } 1624 } else { 1625 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 1626 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 1627 indx = i; 1628 break; 1629 } 1630 } 1631 net->last_hs_used = indx; 1632 incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10); 1633 net->cwnd += incr; 1634 } 1635 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1636 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1637 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS); 1638 } 1639 } 1640 1641 static void 1642 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 1643 { 1644 int cur_val, i, indx; 1645 int old_cwnd = net->cwnd; 1646 1647 cur_val = net->cwnd >> 10; 1648 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1649 /* normal mode */ 1650 net->ssthresh = net->cwnd / 2; 1651 if (net->ssthresh < (net->mtu * 2)) { 1652 net->ssthresh = 2 * net->mtu; 1653 } 1654 net->cwnd = net->ssthresh; 1655 } else { 1656 /* drop by the proper amount */ 1657 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 1658 (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent); 1659 net->cwnd = net->ssthresh; 1660 /* now where are we */ 1661 indx = net->last_hs_used; 1662 cur_val = net->cwnd >> 10; 1663 /* reset where we are in the table */ 1664 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1665 /* feel out of hs */ 1666 net->last_hs_used = 0; 1667 } else { 1668 for (i = indx; i >= 1; i--) { 1669 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 1670 break; 1671 } 1672 } 1673 net->last_hs_used = indx; 1674 } 1675 } 1676 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1678 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 1679 } 1680 } 1681 1682 static void 1683 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 1684 struct sctp_association *asoc) 1685 { 1686 struct sctp_nets *net; 1687 1688 /* 1689 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 1690 * (net->fast_retran_loss_recovery == 0))) 1691 */ 1692 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1693 if ((asoc->fast_retran_loss_recovery == 0) || 1694 (asoc->sctp_cmt_on_off > 0)) { 1695 /* out of a RFC2582 Fast recovery window? */ 1696 if (net->net_ack > 0) { 1697 /* 1698 * per section 7.2.3, are there any 1699 * destinations that had a fast retransmit 1700 * to them. If so what we need to do is 1701 * adjust ssthresh and cwnd. 1702 */ 1703 struct sctp_tmit_chunk *lchk; 1704 1705 sctp_hs_cwnd_decrease(stcb, net); 1706 1707 lchk = TAILQ_FIRST(&asoc->send_queue); 1708 1709 net->partial_bytes_acked = 0; 1710 /* Turn on fast recovery window */ 1711 asoc->fast_retran_loss_recovery = 1; 1712 if (lchk == NULL) { 1713 /* Mark end of the window */ 1714 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1715 } else { 1716 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1; 1717 } 1718 1719 /* 1720 * CMT fast recovery -- per destination 1721 * recovery variable. 1722 */ 1723 net->fast_retran_loss_recovery = 1; 1724 1725 if (lchk == NULL) { 1726 /* Mark end of the window */ 1727 net->fast_recovery_tsn = asoc->sending_seq - 1; 1728 } else { 1729 net->fast_recovery_tsn = lchk->rec.data.tsn - 1; 1730 } 1731 1732 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1733 stcb->sctp_ep, stcb, net, 1734 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2); 1735 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1736 stcb->sctp_ep, stcb, net); 1737 } 1738 } else if (net->net_ack > 0) { 1739 /* 1740 * Mark a peg that we WOULD have done a cwnd 1741 * reduction but RFC2582 prevented this action. 1742 */ 1743 SCTP_STAT_INCR(sctps_fastretransinrtt); 1744 } 1745 } 1746 } 1747 1748 static void 1749 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 1750 struct sctp_association *asoc, 1751 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 1752 { 1753 struct sctp_nets *net; 1754 1755 /******************************/ 1756 /* update cwnd and Early FR */ 1757 /******************************/ 1758 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1759 #ifdef JANA_CMT_FAST_RECOVERY 1760 /* 1761 * CMT fast recovery code. Need to debug. 1762 */ 1763 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1764 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1765 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1766 net->will_exit_fast_recovery = 1; 1767 } 1768 } 1769 #endif 1770 /* if nothing was acked on this destination skip it */ 1771 if (net->net_ack == 0) { 1772 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1773 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1774 } 1775 continue; 1776 } 1777 #ifdef JANA_CMT_FAST_RECOVERY 1778 /* 1779 * CMT fast recovery code 1780 */ 1781 /* 1782 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1783 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1784 * } else if (sctp_cmt_on_off == 0 && 1785 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1786 */ 1787 #endif 1788 1789 if (asoc->fast_retran_loss_recovery && 1790 (will_exit == 0) && 1791 (asoc->sctp_cmt_on_off == 0)) { 1792 /* 1793 * If we are in loss recovery we skip any cwnd 1794 * update 1795 */ 1796 return; 1797 } 1798 /* 1799 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1800 * moved. 1801 */ 1802 if (accum_moved || 1803 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1804 /* If the cumulative ack moved we can proceed */ 1805 if (net->cwnd <= net->ssthresh) { 1806 /* We are in slow start */ 1807 if (net->flight_size + net->net_ack >= net->cwnd) { 1808 sctp_hs_cwnd_increase(stcb, net); 1809 } else { 1810 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1811 sctp_log_cwnd(stcb, net, net->net_ack, 1812 SCTP_CWND_LOG_NOADV_SS); 1813 } 1814 } 1815 } else { 1816 /* We are in congestion avoidance */ 1817 net->partial_bytes_acked += net->net_ack; 1818 if ((net->flight_size + net->net_ack >= net->cwnd) && 1819 (net->partial_bytes_acked >= net->cwnd)) { 1820 net->partial_bytes_acked -= net->cwnd; 1821 net->cwnd += net->mtu; 1822 sctp_enforce_cwnd_limit(asoc, net); 1823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1824 sctp_log_cwnd(stcb, net, net->mtu, 1825 SCTP_CWND_LOG_FROM_CA); 1826 } 1827 } else { 1828 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1829 sctp_log_cwnd(stcb, net, net->net_ack, 1830 SCTP_CWND_LOG_NOADV_CA); 1831 } 1832 } 1833 } 1834 } else { 1835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1836 sctp_log_cwnd(stcb, net, net->mtu, 1837 SCTP_CWND_LOG_NO_CUMACK); 1838 } 1839 } 1840 } 1841 } 1842 1843 /* 1844 * H-TCP congestion control. The algorithm is detailed in: 1845 * R.N.Shorten, D.J.Leith: 1846 * "H-TCP: TCP for high-speed and long-distance networks" 1847 * Proc. PFLDnet, Argonne, 2004. 1848 * http://www.hamilton.ie/net/htcp3.pdf 1849 */ 1850 1851 static int use_rtt_scaling = 1; 1852 static int use_bandwidth_switch = 1; 1853 1854 static inline int 1855 between(uint32_t seq1, uint32_t seq2, uint32_t seq3) 1856 { 1857 return (seq3 - seq2 >= seq1 - seq2); 1858 } 1859 1860 static inline uint32_t 1861 htcp_cong_time(struct htcp *ca) 1862 { 1863 return (sctp_get_tick_count() - ca->last_cong); 1864 } 1865 1866 static inline uint32_t 1867 htcp_ccount(struct htcp *ca) 1868 { 1869 return (ca->minRTT == 0 ? htcp_cong_time(ca) : htcp_cong_time(ca) / ca->minRTT); 1870 } 1871 1872 static inline void 1873 htcp_reset(struct htcp *ca) 1874 { 1875 ca->undo_last_cong = ca->last_cong; 1876 ca->undo_maxRTT = ca->maxRTT; 1877 ca->undo_old_maxB = ca->old_maxB; 1878 ca->last_cong = sctp_get_tick_count(); 1879 } 1880 1881 #ifdef SCTP_NOT_USED 1882 1883 static uint32_t 1884 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 1885 { 1886 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong; 1887 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT; 1888 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB; 1889 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu)); 1890 } 1891 1892 #endif 1893 1894 static inline void 1895 measure_rtt(struct sctp_nets *net) 1896 { 1897 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT; 1898 1899 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1900 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT) 1901 net->cc_mod.htcp_ca.minRTT = srtt; 1902 1903 /* max RTT */ 1904 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { 1905 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) 1906 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; 1907 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + sctp_msecs_to_ticks(20)) 1908 net->cc_mod.htcp_ca.maxRTT = srtt; 1909 } 1910 } 1911 1912 static void 1913 measure_achieved_throughput(struct sctp_nets *net) 1914 { 1915 uint32_t now = sctp_get_tick_count(); 1916 1917 if (net->fast_retran_ip == 0) 1918 net->cc_mod.htcp_ca.bytes_acked = net->net_ack; 1919 1920 if (!use_bandwidth_switch) 1921 return; 1922 1923 /* achieved throughput calculations */ 1924 /* JRS - not 100% sure of this statement */ 1925 if (net->fast_retran_ip == 1) { 1926 net->cc_mod.htcp_ca.bytecount = 0; 1927 net->cc_mod.htcp_ca.lasttime = now; 1928 return; 1929 } 1930 1931 net->cc_mod.htcp_ca.bytecount += net->net_ack; 1932 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) && 1933 (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) && 1934 (net->cc_mod.htcp_ca.minRTT > 0)) { 1935 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime); 1936 1937 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) { 1938 /* just after backoff */ 1939 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi; 1940 } else { 1941 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4; 1942 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB) 1943 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi; 1944 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB) 1945 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB; 1946 } 1947 net->cc_mod.htcp_ca.bytecount = 0; 1948 net->cc_mod.htcp_ca.lasttime = now; 1949 } 1950 } 1951 1952 static inline void 1953 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1954 { 1955 if (use_bandwidth_switch) { 1956 uint32_t maxB = ca->maxB; 1957 uint32_t old_maxB = ca->old_maxB; 1958 1959 ca->old_maxB = ca->maxB; 1960 1961 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1962 ca->beta = BETA_MIN; 1963 ca->modeswitch = 0; 1964 return; 1965 } 1966 } 1967 1968 if (ca->modeswitch && minRTT > sctp_msecs_to_ticks(10) && maxRTT) { 1969 ca->beta = (minRTT << 7) / maxRTT; 1970 if (ca->beta < BETA_MIN) 1971 ca->beta = BETA_MIN; 1972 else if (ca->beta > BETA_MAX) 1973 ca->beta = BETA_MAX; 1974 } else { 1975 ca->beta = BETA_MIN; 1976 ca->modeswitch = 1; 1977 } 1978 } 1979 1980 static inline void 1981 htcp_alpha_update(struct htcp *ca) 1982 { 1983 uint32_t minRTT = ca->minRTT; 1984 uint32_t factor = 1; 1985 uint32_t diff = htcp_cong_time(ca); 1986 1987 if (diff > (uint32_t)hz) { 1988 diff -= hz; 1989 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1990 } 1991 1992 if (use_rtt_scaling && minRTT) { 1993 uint32_t scale = (hz << 3) / (10 * minRTT); 1994 1995 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1996 * interval [0.5,10]<<3 */ 1997 factor = (factor << 3) / scale; 1998 if (!factor) 1999 factor = 1; 2000 } 2001 2002 ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 2003 if (!ca->alpha) 2004 ca->alpha = ALPHA_BASE; 2005 } 2006 2007 /* After we have the rtt data to calculate beta, we'd still prefer to wait one 2008 * rtt before we adjust our beta to ensure we are working from a consistent 2009 * data. 2010 * 2011 * This function should be called when we hit a congestion event since only at 2012 * that point do we really have a real sense of maxRTT (the queues en route 2013 * were getting just too full now). 2014 */ 2015 static void 2016 htcp_param_update(struct sctp_nets *net) 2017 { 2018 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT; 2019 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT; 2020 2021 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT); 2022 htcp_alpha_update(&net->cc_mod.htcp_ca); 2023 2024 /* 2025 * add slowly fading memory for maxRTT to accommodate routing 2026 * changes etc 2027 */ 2028 if (minRTT > 0 && maxRTT > minRTT) 2029 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 2030 } 2031 2032 static uint32_t 2033 htcp_recalc_ssthresh(struct sctp_nets *net) 2034 { 2035 htcp_param_update(net); 2036 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu)); 2037 } 2038 2039 static void 2040 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 2041 { 2042 /*- 2043 * How to handle these functions? 2044 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 2045 * return; 2046 */ 2047 if (net->cwnd <= net->ssthresh) { 2048 /* We are in slow start */ 2049 if (net->flight_size + net->net_ack >= net->cwnd) { 2050 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 2051 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 2052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2053 sctp_log_cwnd(stcb, net, net->mtu, 2054 SCTP_CWND_LOG_FROM_SS); 2055 } 2056 2057 } else { 2058 net->cwnd += net->net_ack; 2059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2060 sctp_log_cwnd(stcb, net, net->net_ack, 2061 SCTP_CWND_LOG_FROM_SS); 2062 } 2063 } 2064 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2065 } else { 2066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2067 sctp_log_cwnd(stcb, net, net->net_ack, 2068 SCTP_CWND_LOG_NOADV_SS); 2069 } 2070 } 2071 } else { 2072 measure_rtt(net); 2073 2074 /* 2075 * In dangerous area, increase slowly. In theory this is 2076 * net->cwnd += alpha / net->cwnd 2077 */ 2078 /* What is snd_cwnd_cnt?? */ 2079 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 2080 /*- 2081 * Does SCTP have a cwnd clamp? 2082 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 2083 */ 2084 net->cwnd += net->mtu; 2085 net->partial_bytes_acked = 0; 2086 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2087 htcp_alpha_update(&net->cc_mod.htcp_ca); 2088 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2089 sctp_log_cwnd(stcb, net, net->mtu, 2090 SCTP_CWND_LOG_FROM_CA); 2091 } 2092 } else { 2093 net->partial_bytes_acked += net->net_ack; 2094 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2095 sctp_log_cwnd(stcb, net, net->net_ack, 2096 SCTP_CWND_LOG_NOADV_CA); 2097 } 2098 } 2099 2100 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2101 } 2102 } 2103 2104 #ifdef SCTP_NOT_USED 2105 /* Lower bound on congestion window. */ 2106 static uint32_t 2107 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 2108 { 2109 return (net->ssthresh); 2110 } 2111 #endif 2112 2113 static void 2114 htcp_init(struct sctp_nets *net) 2115 { 2116 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp)); 2117 net->cc_mod.htcp_ca.alpha = ALPHA_BASE; 2118 net->cc_mod.htcp_ca.beta = BETA_MIN; 2119 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2120 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 2121 } 2122 2123 static void 2124 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 2125 { 2126 /* 2127 * We take the max of the burst limit times a MTU or the 2128 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 2129 */ 2130 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 2131 net->ssthresh = stcb->asoc.peers_rwnd; 2132 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2133 htcp_init(net); 2134 2135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 2136 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 2137 } 2138 } 2139 2140 static void 2141 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 2142 struct sctp_association *asoc, 2143 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 2144 { 2145 struct sctp_nets *net; 2146 2147 /******************************/ 2148 /* update cwnd and Early FR */ 2149 /******************************/ 2150 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2151 #ifdef JANA_CMT_FAST_RECOVERY 2152 /* 2153 * CMT fast recovery code. Need to debug. 2154 */ 2155 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 2156 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 2157 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 2158 net->will_exit_fast_recovery = 1; 2159 } 2160 } 2161 #endif 2162 /* if nothing was acked on this destination skip it */ 2163 if (net->net_ack == 0) { 2164 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2165 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 2166 } 2167 continue; 2168 } 2169 #ifdef JANA_CMT_FAST_RECOVERY 2170 /* 2171 * CMT fast recovery code 2172 */ 2173 /* 2174 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 2175 * && net->will_exit_fast_recovery == 0) { @@@ Do something 2176 * } else if (sctp_cmt_on_off == 0 && 2177 * asoc->fast_retran_loss_recovery && will_exit == 0) { 2178 */ 2179 #endif 2180 2181 if (asoc->fast_retran_loss_recovery && 2182 will_exit == 0 && 2183 (asoc->sctp_cmt_on_off == 0)) { 2184 /* 2185 * If we are in loss recovery we skip any cwnd 2186 * update 2187 */ 2188 return; 2189 } 2190 /* 2191 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 2192 * moved. 2193 */ 2194 if (accum_moved || 2195 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 2196 htcp_cong_avoid(stcb, net); 2197 measure_achieved_throughput(net); 2198 } else { 2199 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2200 sctp_log_cwnd(stcb, net, net->mtu, 2201 SCTP_CWND_LOG_NO_CUMACK); 2202 } 2203 } 2204 } 2205 } 2206 2207 static void 2208 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 2209 struct sctp_association *asoc) 2210 { 2211 struct sctp_nets *net; 2212 2213 /* 2214 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 2215 * (net->fast_retran_loss_recovery == 0))) 2216 */ 2217 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2218 if ((asoc->fast_retran_loss_recovery == 0) || 2219 (asoc->sctp_cmt_on_off > 0)) { 2220 /* out of a RFC2582 Fast recovery window? */ 2221 if (net->net_ack > 0) { 2222 /* 2223 * per section 7.2.3, are there any 2224 * destinations that had a fast retransmit 2225 * to them. If so what we need to do is 2226 * adjust ssthresh and cwnd. 2227 */ 2228 struct sctp_tmit_chunk *lchk; 2229 int old_cwnd = net->cwnd; 2230 2231 /* JRS - reset as if state were changed */ 2232 htcp_reset(&net->cc_mod.htcp_ca); 2233 net->ssthresh = htcp_recalc_ssthresh(net); 2234 net->cwnd = net->ssthresh; 2235 sctp_enforce_cwnd_limit(asoc, net); 2236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2237 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 2238 SCTP_CWND_LOG_FROM_FR); 2239 } 2240 lchk = TAILQ_FIRST(&asoc->send_queue); 2241 2242 net->partial_bytes_acked = 0; 2243 /* Turn on fast recovery window */ 2244 asoc->fast_retran_loss_recovery = 1; 2245 if (lchk == NULL) { 2246 /* Mark end of the window */ 2247 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 2248 } else { 2249 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1; 2250 } 2251 2252 /* 2253 * CMT fast recovery -- per destination 2254 * recovery variable. 2255 */ 2256 net->fast_retran_loss_recovery = 1; 2257 2258 if (lchk == NULL) { 2259 /* Mark end of the window */ 2260 net->fast_recovery_tsn = asoc->sending_seq - 1; 2261 } else { 2262 net->fast_recovery_tsn = lchk->rec.data.tsn - 1; 2263 } 2264 2265 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 2266 stcb->sctp_ep, stcb, net, 2267 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3); 2268 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 2269 stcb->sctp_ep, stcb, net); 2270 } 2271 } else if (net->net_ack > 0) { 2272 /* 2273 * Mark a peg that we WOULD have done a cwnd 2274 * reduction but RFC2582 prevented this action. 2275 */ 2276 SCTP_STAT_INCR(sctps_fastretransinrtt); 2277 } 2278 } 2279 } 2280 2281 static void 2282 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 2283 struct sctp_nets *net) 2284 { 2285 int old_cwnd = net->cwnd; 2286 2287 /* JRS - reset as if the state were being changed to timeout */ 2288 htcp_reset(&net->cc_mod.htcp_ca); 2289 net->ssthresh = htcp_recalc_ssthresh(net); 2290 net->cwnd = net->mtu; 2291 net->partial_bytes_acked = 0; 2292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2293 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 2294 } 2295 } 2296 2297 static void 2298 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 2299 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED) 2300 { 2301 int old_cwnd; 2302 2303 old_cwnd = net->cwnd; 2304 2305 /* JRS - reset hctp as if state changed */ 2306 if (in_window == 0) { 2307 htcp_reset(&net->cc_mod.htcp_ca); 2308 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2309 net->ssthresh = htcp_recalc_ssthresh(net); 2310 if (net->ssthresh < net->mtu) { 2311 net->ssthresh = net->mtu; 2312 /* here back off the timer as well, to slow us down */ 2313 net->RTO <<= 1; 2314 } 2315 net->cwnd = net->ssthresh; 2316 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2318 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2319 } 2320 } 2321 } 2322 2323 const struct sctp_cc_functions sctp_cc_functions[] = { 2324 { 2325 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2326 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack, 2327 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2328 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2329 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2330 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2331 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2332 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2333 }, 2334 { 2335 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2336 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack, 2337 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2338 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr, 2339 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2340 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2341 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2342 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2343 }, 2344 { 2345 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param, 2346 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack, 2347 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2348 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr, 2349 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout, 2350 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo, 2351 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2352 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2353 }, 2354 { 2355 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param, 2356 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack, 2357 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2358 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2359 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2360 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo, 2361 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2362 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2363 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted, 2364 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged, 2365 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins, 2366 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack, 2367 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option, 2368 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated 2369 } 2370 }; 2371