1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_dtrace_declare.h> 50 51 #define SHIFT_MPTCP_MULTI_N 40 52 #define SHIFT_MPTCP_MULTI_Z 16 53 #define SHIFT_MPTCP_MULTI 8 54 55 static void 56 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net) 57 { 58 if ((assoc->max_cwnd > 0) && 59 (net->cwnd > assoc->max_cwnd) && 60 (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { 61 net->cwnd = assoc->max_cwnd; 62 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 63 net->cwnd = net->mtu - sizeof(struct sctphdr); 64 } 65 } 66 } 67 68 static void 69 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 70 { 71 struct sctp_association *assoc; 72 uint32_t cwnd_in_mtu; 73 74 assoc = &stcb->asoc; 75 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 76 if (cwnd_in_mtu == 0) { 77 /* Using 0 means that the value of RFC 4960 is used. */ 78 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 79 } else { 80 /* 81 * We take the minimum of the burst limit and the initial 82 * congestion window. 83 */ 84 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst)) 85 cwnd_in_mtu = assoc->max_burst; 86 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 87 } 88 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 89 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 90 /* In case of resource pooling initialize appropriately */ 91 net->cwnd /= assoc->numnets; 92 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 93 net->cwnd = net->mtu - sizeof(struct sctphdr); 94 } 95 } 96 sctp_enforce_cwnd_limit(assoc, net); 97 net->ssthresh = assoc->peers_rwnd; 98 SDT_PROBE(sctp, cwnd, net, init, 99 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 100 0, net->cwnd); 101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & 102 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 103 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 104 } 105 } 106 107 static void 108 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 109 struct sctp_association *asoc) 110 { 111 struct sctp_nets *net; 112 uint32_t t_ssthresh, t_cwnd; 113 uint64_t t_ucwnd_sbw; 114 115 /* MT FIXME: Don't compute this over and over again */ 116 t_ssthresh = 0; 117 t_cwnd = 0; 118 t_ucwnd_sbw = 0; 119 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 120 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 121 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 122 t_ssthresh += net->ssthresh; 123 t_cwnd += net->cwnd; 124 if (net->lastsa > 0) { 125 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) net->lastsa; 126 } 127 } 128 if (t_ucwnd_sbw == 0) { 129 t_ucwnd_sbw = 1; 130 } 131 } 132 /*- 133 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 134 * (net->fast_retran_loss_recovery == 0))) 135 */ 136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 137 if ((asoc->fast_retran_loss_recovery == 0) || 138 (asoc->sctp_cmt_on_off > 0)) { 139 /* out of a RFC2582 Fast recovery window? */ 140 if (net->net_ack > 0) { 141 /* 142 * per section 7.2.3, are there any 143 * destinations that had a fast retransmit 144 * to them. If so what we need to do is 145 * adjust ssthresh and cwnd. 146 */ 147 struct sctp_tmit_chunk *lchk; 148 int old_cwnd = net->cwnd; 149 150 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 151 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 152 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) { 153 net->ssthresh = (uint32_t) (((uint64_t) 4 * 154 (uint64_t) net->mtu * 155 (uint64_t) net->ssthresh) / 156 (uint64_t) t_ssthresh); 157 158 } 159 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) { 160 uint32_t srtt; 161 162 srtt = net->lastsa; 163 /* 164 * lastsa>>3; we don't need 165 * to devide ... 166 */ 167 if (srtt == 0) { 168 srtt = 1; 169 } 170 /* 171 * Short Version => Equal to 172 * Contel Version MBe 173 */ 174 net->ssthresh = (uint32_t) (((uint64_t) 4 * 175 (uint64_t) net->mtu * 176 (uint64_t) net->cwnd) / 177 ((uint64_t) srtt * 178 t_ucwnd_sbw)); 179 /* INCREASE FACTOR */ ; 180 } 181 if ((net->cwnd > t_cwnd / 2) && 182 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 183 net->ssthresh = net->cwnd - t_cwnd / 2; 184 } 185 if (net->ssthresh < net->mtu) { 186 net->ssthresh = net->mtu; 187 } 188 } else { 189 net->ssthresh = net->cwnd / 2; 190 if (net->ssthresh < (net->mtu * 2)) { 191 net->ssthresh = 2 * net->mtu; 192 } 193 } 194 net->cwnd = net->ssthresh; 195 sctp_enforce_cwnd_limit(asoc, net); 196 SDT_PROBE(sctp, cwnd, net, fr, 197 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 198 old_cwnd, net->cwnd); 199 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 200 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 201 SCTP_CWND_LOG_FROM_FR); 202 } 203 lchk = TAILQ_FIRST(&asoc->send_queue); 204 205 net->partial_bytes_acked = 0; 206 /* Turn on fast recovery window */ 207 asoc->fast_retran_loss_recovery = 1; 208 if (lchk == NULL) { 209 /* Mark end of the window */ 210 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 211 } else { 212 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 213 } 214 215 /* 216 * CMT fast recovery -- per destination 217 * recovery variable. 218 */ 219 net->fast_retran_loss_recovery = 1; 220 221 if (lchk == NULL) { 222 /* Mark end of the window */ 223 net->fast_recovery_tsn = asoc->sending_seq - 1; 224 } else { 225 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 226 } 227 228 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 229 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 230 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 231 stcb->sctp_ep, stcb, net); 232 } 233 } else if (net->net_ack > 0) { 234 /* 235 * Mark a peg that we WOULD have done a cwnd 236 * reduction but RFC2582 prevented this action. 237 */ 238 SCTP_STAT_INCR(sctps_fastretransinrtt); 239 } 240 } 241 } 242 243 /* Defines for instantaneous bw decisions */ 244 #define SCTP_INST_LOOSING 1 /* Loosing to other flows */ 245 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ 246 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */ 247 248 249 static int 250 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, 251 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) 252 { 253 uint64_t oth, probepoint; 254 255 probepoint = (((uint64_t) net->cwnd) << 32); 256 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 257 /* 258 * rtt increased we don't update bw.. so we don't update the 259 * rtt either. 260 */ 261 /* Probe point 5 */ 262 probepoint |= ((5 << 16) | 1); 263 SDT_PROBE(sctp, cwnd, net, rttvar, 264 vtag, 265 ((net->cc_mod.rtcc.lbw << 32) | nbw), 266 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 267 net->flight_size, 268 probepoint); 269 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 270 if (net->cc_mod.rtcc.last_step_state == 5) 271 net->cc_mod.rtcc.step_cnt++; 272 else 273 net->cc_mod.rtcc.step_cnt = 1; 274 net->cc_mod.rtcc.last_step_state = 5; 275 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 276 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 277 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 278 /* Try a step down */ 279 oth = net->cc_mod.rtcc.vol_reduce; 280 oth <<= 16; 281 oth |= net->cc_mod.rtcc.step_cnt; 282 oth <<= 16; 283 oth |= net->cc_mod.rtcc.last_step_state; 284 SDT_PROBE(sctp, cwnd, net, rttstep, 285 vtag, 286 ((net->cc_mod.rtcc.lbw << 32) | nbw), 287 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 288 oth, 289 probepoint); 290 if (net->cwnd > (4 * net->mtu)) { 291 net->cwnd -= net->mtu; 292 net->cc_mod.rtcc.vol_reduce++; 293 } else { 294 net->cc_mod.rtcc.step_cnt = 0; 295 } 296 } 297 } 298 return (1); 299 } 300 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 301 /* 302 * rtt decreased, there could be more room. we update both 303 * the bw and the rtt here to lock this in as a good step 304 * down. 305 */ 306 /* Probe point 6 */ 307 probepoint |= ((6 << 16) | 0); 308 SDT_PROBE(sctp, cwnd, net, rttvar, 309 vtag, 310 ((net->cc_mod.rtcc.lbw << 32) | nbw), 311 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 312 net->flight_size, 313 probepoint); 314 if (net->cc_mod.rtcc.steady_step) { 315 oth = net->cc_mod.rtcc.vol_reduce; 316 oth <<= 16; 317 oth |= net->cc_mod.rtcc.step_cnt; 318 oth <<= 16; 319 oth |= net->cc_mod.rtcc.last_step_state; 320 SDT_PROBE(sctp, cwnd, net, rttstep, 321 vtag, 322 ((net->cc_mod.rtcc.lbw << 32) | nbw), 323 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 324 oth, 325 probepoint); 326 if ((net->cc_mod.rtcc.last_step_state == 5) && 327 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) { 328 /* Step down worked */ 329 net->cc_mod.rtcc.step_cnt = 0; 330 return (1); 331 } else { 332 net->cc_mod.rtcc.last_step_state = 6; 333 net->cc_mod.rtcc.step_cnt = 0; 334 } 335 } 336 net->cc_mod.rtcc.lbw = nbw; 337 net->cc_mod.rtcc.lbw_rtt = net->rtt; 338 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 339 if (inst_ind == SCTP_INST_GAINING) 340 return (1); 341 else if (inst_ind == SCTP_INST_NEUTRAL) 342 return (1); 343 else 344 return (0); 345 } 346 /* 347 * Ok bw and rtt remained the same .. no update to any 348 */ 349 /* Probe point 7 */ 350 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); 351 SDT_PROBE(sctp, cwnd, net, rttvar, 352 vtag, 353 ((net->cc_mod.rtcc.lbw << 32) | nbw), 354 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 355 net->flight_size, 356 probepoint); 357 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 358 if (net->cc_mod.rtcc.last_step_state == 5) 359 net->cc_mod.rtcc.step_cnt++; 360 else 361 net->cc_mod.rtcc.step_cnt = 1; 362 net->cc_mod.rtcc.last_step_state = 5; 363 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 364 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 365 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 366 /* Try a step down */ 367 if (net->cwnd > (4 * net->mtu)) { 368 net->cwnd -= net->mtu; 369 net->cc_mod.rtcc.vol_reduce++; 370 return (1); 371 } else { 372 net->cc_mod.rtcc.step_cnt = 0; 373 } 374 } 375 } 376 if (inst_ind == SCTP_INST_GAINING) 377 return (1); 378 else if (inst_ind == SCTP_INST_NEUTRAL) 379 return (1); 380 else 381 return ((int)net->cc_mod.rtcc.ret_from_eq); 382 } 383 384 static int 385 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, 386 uint64_t vtag, uint8_t inst_ind) 387 { 388 uint64_t oth, probepoint; 389 390 /* Bandwidth decreased. */ 391 probepoint = (((uint64_t) net->cwnd) << 32); 392 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 393 /* rtt increased */ 394 /* Did we add more */ 395 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) && 396 (inst_ind != SCTP_INST_LOOSING)) { 397 /* We caused it maybe.. back off? */ 398 /* PROBE POINT 1 */ 399 probepoint |= ((1 << 16) | 1); 400 SDT_PROBE(sctp, cwnd, net, rttvar, 401 vtag, 402 ((net->cc_mod.rtcc.lbw << 32) | nbw), 403 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 404 net->flight_size, 405 probepoint); 406 if (net->cc_mod.rtcc.ret_from_eq) { 407 /* 408 * Switch over to CA if we are less 409 * aggressive 410 */ 411 net->ssthresh = net->cwnd - 1; 412 net->partial_bytes_acked = 0; 413 } 414 return (1); 415 } 416 /* Probe point 2 */ 417 probepoint |= ((2 << 16) | 0); 418 SDT_PROBE(sctp, cwnd, net, rttvar, 419 vtag, 420 ((net->cc_mod.rtcc.lbw << 32) | nbw), 421 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 422 net->flight_size, 423 probepoint); 424 /* Someone else - fight for more? */ 425 if (net->cc_mod.rtcc.steady_step) { 426 oth = net->cc_mod.rtcc.vol_reduce; 427 oth <<= 16; 428 oth |= net->cc_mod.rtcc.step_cnt; 429 oth <<= 16; 430 oth |= net->cc_mod.rtcc.last_step_state; 431 SDT_PROBE(sctp, cwnd, net, rttstep, 432 vtag, 433 ((net->cc_mod.rtcc.lbw << 32) | nbw), 434 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 435 oth, 436 probepoint); 437 /* 438 * Did we voluntarily give up some? if so take one 439 * back please 440 */ 441 if ((net->cc_mod.rtcc.vol_reduce) && 442 (inst_ind != SCTP_INST_GAINING)) { 443 net->cwnd += net->mtu; 444 sctp_enforce_cwnd_limit(&stcb->asoc, net); 445 net->cc_mod.rtcc.vol_reduce--; 446 } 447 net->cc_mod.rtcc.last_step_state = 2; 448 net->cc_mod.rtcc.step_cnt = 0; 449 } 450 goto out_decision; 451 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 452 /* bw & rtt decreased */ 453 /* Probe point 3 */ 454 probepoint |= ((3 << 16) | 0); 455 SDT_PROBE(sctp, cwnd, net, rttvar, 456 vtag, 457 ((net->cc_mod.rtcc.lbw << 32) | nbw), 458 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 459 net->flight_size, 460 probepoint); 461 if (net->cc_mod.rtcc.steady_step) { 462 oth = net->cc_mod.rtcc.vol_reduce; 463 oth <<= 16; 464 oth |= net->cc_mod.rtcc.step_cnt; 465 oth <<= 16; 466 oth |= net->cc_mod.rtcc.last_step_state; 467 SDT_PROBE(sctp, cwnd, net, rttstep, 468 vtag, 469 ((net->cc_mod.rtcc.lbw << 32) | nbw), 470 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 471 oth, 472 probepoint); 473 if ((net->cc_mod.rtcc.vol_reduce) && 474 (inst_ind != SCTP_INST_GAINING)) { 475 net->cwnd += net->mtu; 476 sctp_enforce_cwnd_limit(&stcb->asoc, net); 477 net->cc_mod.rtcc.vol_reduce--; 478 } 479 net->cc_mod.rtcc.last_step_state = 3; 480 net->cc_mod.rtcc.step_cnt = 0; 481 } 482 goto out_decision; 483 } 484 /* The bw decreased but rtt stayed the same */ 485 /* Probe point 4 */ 486 probepoint |= ((4 << 16) | 0); 487 SDT_PROBE(sctp, cwnd, net, rttvar, 488 vtag, 489 ((net->cc_mod.rtcc.lbw << 32) | nbw), 490 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 491 net->flight_size, 492 probepoint); 493 if (net->cc_mod.rtcc.steady_step) { 494 oth = net->cc_mod.rtcc.vol_reduce; 495 oth <<= 16; 496 oth |= net->cc_mod.rtcc.step_cnt; 497 oth <<= 16; 498 oth |= net->cc_mod.rtcc.last_step_state; 499 SDT_PROBE(sctp, cwnd, net, rttstep, 500 vtag, 501 ((net->cc_mod.rtcc.lbw << 32) | nbw), 502 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 503 oth, 504 probepoint); 505 if ((net->cc_mod.rtcc.vol_reduce) && 506 (inst_ind != SCTP_INST_GAINING)) { 507 net->cwnd += net->mtu; 508 sctp_enforce_cwnd_limit(&stcb->asoc, net); 509 net->cc_mod.rtcc.vol_reduce--; 510 } 511 net->cc_mod.rtcc.last_step_state = 4; 512 net->cc_mod.rtcc.step_cnt = 0; 513 } 514 out_decision: 515 net->cc_mod.rtcc.lbw = nbw; 516 net->cc_mod.rtcc.lbw_rtt = net->rtt; 517 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 518 if (inst_ind == SCTP_INST_GAINING) { 519 return (1); 520 } else { 521 return (0); 522 } 523 } 524 525 static int 526 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag) 527 { 528 uint64_t oth, probepoint; 529 530 /* 531 * BW increased, so update and return 0, since all actions in our 532 * table say to do the normal CC update. Note that we pay no 533 * attention to the inst_ind since our overall sum is increasing. 534 */ 535 /* PROBE POINT 0 */ 536 probepoint = (((uint64_t) net->cwnd) << 32); 537 SDT_PROBE(sctp, cwnd, net, rttvar, 538 vtag, 539 ((net->cc_mod.rtcc.lbw << 32) | nbw), 540 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 541 net->flight_size, 542 probepoint); 543 if (net->cc_mod.rtcc.steady_step) { 544 oth = net->cc_mod.rtcc.vol_reduce; 545 oth <<= 16; 546 oth |= net->cc_mod.rtcc.step_cnt; 547 oth <<= 16; 548 oth |= net->cc_mod.rtcc.last_step_state; 549 SDT_PROBE(sctp, cwnd, net, rttstep, 550 vtag, 551 ((net->cc_mod.rtcc.lbw << 32) | nbw), 552 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 553 oth, 554 probepoint); 555 net->cc_mod.rtcc.last_step_state = 0; 556 net->cc_mod.rtcc.step_cnt = 0; 557 net->cc_mod.rtcc.vol_reduce = 0; 558 } 559 net->cc_mod.rtcc.lbw = nbw; 560 net->cc_mod.rtcc.lbw_rtt = net->rtt; 561 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 562 return (0); 563 } 564 565 /* RTCC Algoritm to limit growth of cwnd, return 566 * true if you want to NOT allow cwnd growth 567 */ 568 static int 569 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) 570 { 571 uint64_t bw_offset, rtt_offset; 572 uint64_t probepoint, rtt, vtag; 573 uint64_t bytes_for_this_rtt, inst_bw; 574 uint64_t div, inst_off; 575 int bw_shift; 576 uint8_t inst_ind; 577 int ret; 578 579 /*- 580 * Here we need to see if we want 581 * to limit cwnd growth due to increase 582 * in overall rtt but no increase in bw. 583 * We use the following table to figure 584 * out what we should do. When we return 585 * 0, cc update goes on as planned. If we 586 * return 1, then no cc update happens and cwnd 587 * stays where it is at. 588 * ---------------------------------- 589 * BW | RTT | Action 590 * ********************************* 591 * INC | INC | return 0 592 * ---------------------------------- 593 * INC | SAME | return 0 594 * ---------------------------------- 595 * INC | DECR | return 0 596 * ---------------------------------- 597 * SAME | INC | return 1 598 * ---------------------------------- 599 * SAME | SAME | return 1 600 * ---------------------------------- 601 * SAME | DECR | return 0 602 * ---------------------------------- 603 * DECR | INC | return 0 or 1 based on if we caused. 604 * ---------------------------------- 605 * DECR | SAME | return 0 606 * ---------------------------------- 607 * DECR | DECR | return 0 608 * ---------------------------------- 609 * 610 * We are a bit fuzz on what an increase or 611 * decrease is. For BW it is the same if 612 * it did not change within 1/64th. For 613 * RTT it stayed the same if it did not 614 * change within 1/32nd 615 */ 616 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); 617 rtt = stcb->asoc.my_vtag; 618 vtag = (rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); 619 probepoint = (((uint64_t) net->cwnd) << 32); 620 rtt = net->rtt; 621 if (net->cc_mod.rtcc.rtt_set_this_sack) { 622 net->cc_mod.rtcc.rtt_set_this_sack = 0; 623 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc; 624 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 625 if (net->rtt) { 626 div = net->rtt / 1000; 627 if (div) { 628 inst_bw = bytes_for_this_rtt / div; 629 inst_off = inst_bw >> bw_shift; 630 if (inst_bw > nbw) 631 inst_ind = SCTP_INST_GAINING; 632 else if ((inst_bw + inst_off) < nbw) 633 inst_ind = SCTP_INST_LOOSING; 634 else 635 inst_ind = SCTP_INST_NEUTRAL; 636 probepoint |= ((0xb << 16) | inst_ind); 637 } else { 638 inst_ind = net->cc_mod.rtcc.last_inst_ind; 639 inst_bw = bytes_for_this_rtt / (uint64_t) (net->rtt); 640 /* Can't determine do not change */ 641 probepoint |= ((0xc << 16) | inst_ind); 642 } 643 } else { 644 inst_ind = net->cc_mod.rtcc.last_inst_ind; 645 inst_bw = bytes_for_this_rtt; 646 /* Can't determine do not change */ 647 probepoint |= ((0xd << 16) | inst_ind); 648 } 649 SDT_PROBE(sctp, cwnd, net, rttvar, 650 vtag, 651 ((nbw << 32) | inst_bw), 652 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt), 653 net->flight_size, 654 probepoint); 655 } else { 656 /* No rtt measurement, use last one */ 657 inst_ind = net->cc_mod.rtcc.last_inst_ind; 658 } 659 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; 660 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { 661 ret = cc_bw_increase(stcb, net, nbw, vtag); 662 goto out; 663 } 664 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); 665 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { 666 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); 667 goto out; 668 } 669 /* 670 * If we reach here then we are in a situation where the bw stayed 671 * the same. 672 */ 673 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); 674 out: 675 net->cc_mod.rtcc.last_inst_ind = inst_ind; 676 return (ret); 677 } 678 679 static void 680 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, 681 struct sctp_association *asoc, 682 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc) 683 { 684 struct sctp_nets *net; 685 int old_cwnd; 686 uint32_t t_ssthresh, t_cwnd, incr; 687 uint64_t t_ucwnd_sbw; 688 uint64_t t_path_mptcp; 689 uint64_t mptcp_like_alpha; 690 uint32_t srtt; 691 uint64_t max_path; 692 693 /* MT FIXME: Don't compute this over and over again */ 694 t_ssthresh = 0; 695 t_cwnd = 0; 696 t_ucwnd_sbw = 0; 697 t_path_mptcp = 0; 698 mptcp_like_alpha = 1; 699 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 700 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) || 701 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) { 702 max_path = 0; 703 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 704 t_ssthresh += net->ssthresh; 705 t_cwnd += net->cwnd; 706 /* lastsa>>3; we don't need to devide ... */ 707 srtt = net->lastsa; 708 if (srtt > 0) { 709 uint64_t tmp; 710 711 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) srtt; 712 t_path_mptcp += (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_Z) / 713 (((uint64_t) net->mtu) * (uint64_t) srtt); 714 tmp = (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_N) / 715 ((uint64_t) net->mtu * (uint64_t) (srtt * srtt)); 716 if (tmp > max_path) { 717 max_path = tmp; 718 } 719 } 720 } 721 if (t_path_mptcp > 0) { 722 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp); 723 } else { 724 mptcp_like_alpha = 1; 725 } 726 } 727 if (t_ssthresh == 0) { 728 t_ssthresh = 1; 729 } 730 if (t_ucwnd_sbw == 0) { 731 t_ucwnd_sbw = 1; 732 } 733 /******************************/ 734 /* update cwnd and Early FR */ 735 /******************************/ 736 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 737 738 #ifdef JANA_CMT_FAST_RECOVERY 739 /* 740 * CMT fast recovery code. Need to debug. 741 */ 742 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 743 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 744 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 745 net->will_exit_fast_recovery = 1; 746 } 747 } 748 #endif 749 /* if nothing was acked on this destination skip it */ 750 if (net->net_ack == 0) { 751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 752 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 753 } 754 continue; 755 } 756 #ifdef JANA_CMT_FAST_RECOVERY 757 /* 758 * CMT fast recovery code 759 */ 760 /* 761 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 762 * && net->will_exit_fast_recovery == 0) { @@@ Do something 763 * } else if (sctp_cmt_on_off == 0 && 764 * asoc->fast_retran_loss_recovery && will_exit == 0) { 765 */ 766 #endif 767 768 if (asoc->fast_retran_loss_recovery && 769 (will_exit == 0) && 770 (asoc->sctp_cmt_on_off == 0)) { 771 /* 772 * If we are in loss recovery we skip any cwnd 773 * update 774 */ 775 return; 776 } 777 /* 778 * Did any measurements go on for this network? 779 */ 780 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) { 781 uint64_t nbw; 782 783 /* 784 * At this point our bw_bytes has been updated by 785 * incoming sack information. 786 * 787 * But our bw may not yet be set. 788 * 789 */ 790 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) { 791 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000); 792 } else { 793 nbw = net->cc_mod.rtcc.bw_bytes; 794 } 795 if (net->cc_mod.rtcc.lbw) { 796 if (cc_bw_limit(stcb, net, nbw)) { 797 /* Hold here, no update */ 798 continue; 799 } 800 } else { 801 uint64_t vtag, probepoint; 802 803 probepoint = (((uint64_t) net->cwnd) << 32); 804 probepoint |= ((0xa << 16) | 0); 805 vtag = (net->rtt << 32) | 806 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 807 (stcb->rport); 808 809 SDT_PROBE(sctp, cwnd, net, rttvar, 810 vtag, 811 nbw, 812 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 813 net->flight_size, 814 probepoint); 815 net->cc_mod.rtcc.lbw = nbw; 816 net->cc_mod.rtcc.lbw_rtt = net->rtt; 817 if (net->cc_mod.rtcc.rtt_set_this_sack) { 818 net->cc_mod.rtcc.rtt_set_this_sack = 0; 819 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 820 } 821 } 822 } 823 /* 824 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 825 * moved. 826 */ 827 if (accum_moved || 828 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 829 /* If the cumulative ack moved we can proceed */ 830 if (net->cwnd <= net->ssthresh) { 831 /* We are in slow start */ 832 if (net->flight_size + net->net_ack >= net->cwnd) { 833 uint32_t limit; 834 835 old_cwnd = net->cwnd; 836 switch (asoc->sctp_cmt_on_off) { 837 case SCTP_CMT_RPV1: 838 limit = (uint32_t) (((uint64_t) net->mtu * 839 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 840 (uint64_t) net->ssthresh) / 841 (uint64_t) t_ssthresh); 842 incr = (uint32_t) (((uint64_t) net->net_ack * 843 (uint64_t) net->ssthresh) / 844 (uint64_t) t_ssthresh); 845 if (incr > limit) { 846 incr = limit; 847 } 848 if (incr == 0) { 849 incr = 1; 850 } 851 break; 852 case SCTP_CMT_RPV2: 853 /* 854 * lastsa>>3; we don't need 855 * to divide ... 856 */ 857 srtt = net->lastsa; 858 if (srtt == 0) { 859 srtt = 1; 860 } 861 limit = (uint32_t) (((uint64_t) net->mtu * 862 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 863 (uint64_t) net->cwnd) / 864 ((uint64_t) srtt * t_ucwnd_sbw)); 865 /* INCREASE FACTOR */ 866 incr = (uint32_t) (((uint64_t) net->net_ack * 867 (uint64_t) net->cwnd) / 868 ((uint64_t) srtt * t_ucwnd_sbw)); 869 /* INCREASE FACTOR */ 870 if (incr > limit) { 871 incr = limit; 872 } 873 if (incr == 0) { 874 incr = 1; 875 } 876 break; 877 case SCTP_CMT_MPTCP: 878 limit = (uint32_t) (((uint64_t) net->mtu * 879 mptcp_like_alpha * 880 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >> 881 SHIFT_MPTCP_MULTI); 882 incr = (uint32_t) (((uint64_t) net->net_ack * 883 mptcp_like_alpha) >> 884 SHIFT_MPTCP_MULTI); 885 if (incr > limit) { 886 incr = limit; 887 } 888 if (incr > net->net_ack) { 889 incr = net->net_ack; 890 } 891 if (incr > net->mtu) { 892 incr = net->mtu; 893 } 894 break; 895 default: 896 incr = net->net_ack; 897 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) { 898 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable); 899 } 900 break; 901 } 902 net->cwnd += incr; 903 sctp_enforce_cwnd_limit(asoc, net); 904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 905 sctp_log_cwnd(stcb, net, incr, 906 SCTP_CWND_LOG_FROM_SS); 907 } 908 SDT_PROBE(sctp, cwnd, net, ack, 909 stcb->asoc.my_vtag, 910 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 911 net, 912 old_cwnd, net->cwnd); 913 } else { 914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 915 sctp_log_cwnd(stcb, net, net->net_ack, 916 SCTP_CWND_LOG_NOADV_SS); 917 } 918 } 919 } else { 920 /* We are in congestion avoidance */ 921 /* 922 * Add to pba 923 */ 924 net->partial_bytes_acked += net->net_ack; 925 926 if ((net->flight_size + net->net_ack >= net->cwnd) && 927 (net->partial_bytes_acked >= net->cwnd)) { 928 net->partial_bytes_acked -= net->cwnd; 929 old_cwnd = net->cwnd; 930 switch (asoc->sctp_cmt_on_off) { 931 case SCTP_CMT_RPV1: 932 incr = (uint32_t) (((uint64_t) net->mtu * 933 (uint64_t) net->ssthresh) / 934 (uint64_t) t_ssthresh); 935 if (incr == 0) { 936 incr = 1; 937 } 938 break; 939 case SCTP_CMT_RPV2: 940 /* 941 * lastsa>>3; we don't need 942 * to divide ... 943 */ 944 srtt = net->lastsa; 945 if (srtt == 0) { 946 srtt = 1; 947 } 948 incr = (uint32_t) ((uint64_t) net->mtu * 949 (uint64_t) net->cwnd / 950 ((uint64_t) srtt * 951 t_ucwnd_sbw)); 952 /* INCREASE FACTOR */ 953 if (incr == 0) { 954 incr = 1; 955 } 956 break; 957 case SCTP_CMT_MPTCP: 958 incr = (uint32_t) ((mptcp_like_alpha * 959 (uint64_t) net->cwnd) >> 960 SHIFT_MPTCP_MULTI); 961 if (incr > net->mtu) { 962 incr = net->mtu; 963 } 964 break; 965 default: 966 incr = net->mtu; 967 break; 968 } 969 net->cwnd += incr; 970 sctp_enforce_cwnd_limit(asoc, net); 971 SDT_PROBE(sctp, cwnd, net, ack, 972 stcb->asoc.my_vtag, 973 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 974 net, 975 old_cwnd, net->cwnd); 976 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 977 sctp_log_cwnd(stcb, net, net->mtu, 978 SCTP_CWND_LOG_FROM_CA); 979 } 980 } else { 981 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 982 sctp_log_cwnd(stcb, net, net->net_ack, 983 SCTP_CWND_LOG_NOADV_CA); 984 } 985 } 986 } 987 } else { 988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 989 sctp_log_cwnd(stcb, net, net->mtu, 990 SCTP_CWND_LOG_NO_CUMACK); 991 } 992 } 993 } 994 } 995 996 static void 997 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net) 998 { 999 int old_cwnd; 1000 1001 old_cwnd = net->cwnd; 1002 net->cwnd = net->mtu; 1003 SDT_PROBE(sctp, cwnd, net, ack, 1004 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 1005 old_cwnd, net->cwnd); 1006 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 1007 (void *)net, net->cwnd); 1008 } 1009 1010 1011 static void 1012 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 1013 { 1014 int old_cwnd = net->cwnd; 1015 uint32_t t_ssthresh, t_cwnd; 1016 uint64_t t_ucwnd_sbw; 1017 1018 /* MT FIXME: Don't compute this over and over again */ 1019 t_ssthresh = 0; 1020 t_cwnd = 0; 1021 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 1022 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 1023 struct sctp_nets *lnet; 1024 uint32_t srtt; 1025 1026 t_ucwnd_sbw = 0; 1027 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1028 t_ssthresh += lnet->ssthresh; 1029 t_cwnd += lnet->cwnd; 1030 srtt = lnet->lastsa; 1031 /* lastsa>>3; we don't need to divide ... */ 1032 if (srtt > 0) { 1033 t_ucwnd_sbw += (uint64_t) lnet->cwnd / (uint64_t) srtt; 1034 } 1035 } 1036 if (t_ssthresh < 1) { 1037 t_ssthresh = 1; 1038 } 1039 if (t_ucwnd_sbw < 1) { 1040 t_ucwnd_sbw = 1; 1041 } 1042 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) { 1043 net->ssthresh = (uint32_t) (((uint64_t) 4 * 1044 (uint64_t) net->mtu * 1045 (uint64_t) net->ssthresh) / 1046 (uint64_t) t_ssthresh); 1047 } else { 1048 uint64_t cc_delta; 1049 1050 srtt = net->lastsa; 1051 /* lastsa>>3; we don't need to divide ... */ 1052 if (srtt == 0) { 1053 srtt = 1; 1054 } 1055 cc_delta = t_ucwnd_sbw * (uint64_t) srtt / 2; 1056 if (cc_delta < t_cwnd) { 1057 net->ssthresh = (uint32_t) ((uint64_t) t_cwnd - cc_delta); 1058 } else { 1059 net->ssthresh = net->mtu; 1060 } 1061 } 1062 if ((net->cwnd > t_cwnd / 2) && 1063 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 1064 net->ssthresh = net->cwnd - t_cwnd / 2; 1065 } 1066 if (net->ssthresh < net->mtu) { 1067 net->ssthresh = net->mtu; 1068 } 1069 } else { 1070 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 1071 } 1072 net->cwnd = net->mtu; 1073 net->partial_bytes_acked = 0; 1074 SDT_PROBE(sctp, cwnd, net, to, 1075 stcb->asoc.my_vtag, 1076 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1077 net, 1078 old_cwnd, net->cwnd); 1079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1080 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 1081 } 1082 } 1083 1084 static void 1085 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, 1086 int in_window, int num_pkt_lost, int use_rtcc) 1087 { 1088 int old_cwnd = net->cwnd; 1089 1090 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { 1091 /* Data center Congestion Control */ 1092 if (in_window == 0) { 1093 /* 1094 * Go to CA with the cwnd at the point we sent the 1095 * TSN that was marked with a CE. 1096 */ 1097 if (net->ecn_prev_cwnd < net->cwnd) { 1098 /* Restore to prev cwnd */ 1099 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost); 1100 } else { 1101 /* Just cut in 1/2 */ 1102 net->cwnd /= 2; 1103 } 1104 /* Drop to CA */ 1105 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); 1106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1107 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1108 } 1109 } else { 1110 /* 1111 * Further tuning down required over the drastic 1112 * orginal cut 1113 */ 1114 net->ssthresh -= (net->mtu * num_pkt_lost); 1115 net->cwnd -= (net->mtu * num_pkt_lost); 1116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1117 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1118 } 1119 } 1120 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1121 } else { 1122 if (in_window == 0) { 1123 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1124 net->ssthresh = net->cwnd / 2; 1125 if (net->ssthresh < net->mtu) { 1126 net->ssthresh = net->mtu; 1127 /* 1128 * here back off the timer as well, to slow 1129 * us down 1130 */ 1131 net->RTO <<= 1; 1132 } 1133 net->cwnd = net->ssthresh; 1134 SDT_PROBE(sctp, cwnd, net, ecn, 1135 stcb->asoc.my_vtag, 1136 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1137 net, 1138 old_cwnd, net->cwnd); 1139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1140 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1141 } 1142 } 1143 } 1144 1145 } 1146 1147 static void 1148 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 1149 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 1150 uint32_t * bottle_bw, uint32_t * on_queue) 1151 { 1152 uint32_t bw_avail; 1153 unsigned int incr; 1154 int old_cwnd = net->cwnd; 1155 1156 /* get bottle neck bw */ 1157 *bottle_bw = ntohl(cp->bottle_bw); 1158 /* and whats on queue */ 1159 *on_queue = ntohl(cp->current_onq); 1160 /* 1161 * adjust the on-queue if our flight is more it could be that the 1162 * router has not yet gotten data "in-flight" to it 1163 */ 1164 if (*on_queue < net->flight_size) { 1165 *on_queue = net->flight_size; 1166 } 1167 /* rtt is measured in micro seconds, bottle_bw in bytes per second */ 1168 bw_avail = (uint32_t) (((uint64_t) (*bottle_bw) * net->rtt) / (uint64_t) 1000000); 1169 if (bw_avail > *bottle_bw) { 1170 /* 1171 * Cap the growth to no more than the bottle neck. This can 1172 * happen as RTT slides up due to queues. It also means if 1173 * you have more than a 1 second RTT with a empty queue you 1174 * will be limited to the bottle_bw per second no matter if 1175 * other points have 1/2 the RTT and you could get more 1176 * out... 1177 */ 1178 bw_avail = *bottle_bw; 1179 } 1180 if (*on_queue > bw_avail) { 1181 /* 1182 * No room for anything else don't allow anything else to be 1183 * "added to the fire". 1184 */ 1185 int seg_inflight, seg_onqueue, my_portion; 1186 1187 net->partial_bytes_acked = 0; 1188 /* how much are we over queue size? */ 1189 incr = *on_queue - bw_avail; 1190 if (stcb->asoc.seen_a_sack_this_pkt) { 1191 /* 1192 * undo any cwnd adjustment that the sack might have 1193 * made 1194 */ 1195 net->cwnd = net->prev_cwnd; 1196 } 1197 /* Now how much of that is mine? */ 1198 seg_inflight = net->flight_size / net->mtu; 1199 seg_onqueue = *on_queue / net->mtu; 1200 my_portion = (incr * seg_inflight) / seg_onqueue; 1201 1202 /* Have I made an adjustment already */ 1203 if (net->cwnd > net->flight_size) { 1204 /* 1205 * for this flight I made an adjustment we need to 1206 * decrease the portion by a share our previous 1207 * adjustment. 1208 */ 1209 int diff_adj; 1210 1211 diff_adj = net->cwnd - net->flight_size; 1212 if (diff_adj > my_portion) 1213 my_portion = 0; 1214 else 1215 my_portion -= diff_adj; 1216 } 1217 /* 1218 * back down to the previous cwnd (assume we have had a sack 1219 * before this packet). minus what ever portion of the 1220 * overage is my fault. 1221 */ 1222 net->cwnd -= my_portion; 1223 1224 /* we will NOT back down more than 1 MTU */ 1225 if (net->cwnd <= net->mtu) { 1226 net->cwnd = net->mtu; 1227 } 1228 /* force into CA */ 1229 net->ssthresh = net->cwnd - 1; 1230 } else { 1231 /* 1232 * Take 1/4 of the space left or max burst up .. whichever 1233 * is less. 1234 */ 1235 incr = (bw_avail - *on_queue) >> 2; 1236 if ((stcb->asoc.max_burst > 0) && 1237 (stcb->asoc.max_burst * net->mtu < incr)) { 1238 incr = stcb->asoc.max_burst * net->mtu; 1239 } 1240 net->cwnd += incr; 1241 } 1242 if (net->cwnd > bw_avail) { 1243 /* We can't exceed the pipe size */ 1244 net->cwnd = bw_avail; 1245 } 1246 if (net->cwnd < net->mtu) { 1247 /* We always have 1 MTU */ 1248 net->cwnd = net->mtu; 1249 } 1250 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1251 if (net->cwnd - old_cwnd != 0) { 1252 /* log only changes */ 1253 SDT_PROBE(sctp, cwnd, net, pd, 1254 stcb->asoc.my_vtag, 1255 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1256 net, 1257 old_cwnd, net->cwnd); 1258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1259 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 1260 SCTP_CWND_LOG_FROM_SAT); 1261 } 1262 } 1263 } 1264 1265 static void 1266 sctp_cwnd_update_after_output(struct sctp_tcb *stcb, 1267 struct sctp_nets *net, int burst_limit) 1268 { 1269 int old_cwnd = net->cwnd; 1270 1271 if (net->ssthresh < net->cwnd) 1272 net->ssthresh = net->cwnd; 1273 if (burst_limit) { 1274 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 1275 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1276 SDT_PROBE(sctp, cwnd, net, bl, 1277 stcb->asoc.my_vtag, 1278 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1279 net, 1280 old_cwnd, net->cwnd); 1281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1282 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 1283 } 1284 } 1285 } 1286 1287 static void 1288 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1289 struct sctp_association *asoc, 1290 int accum_moved, int reneged_all, int will_exit) 1291 { 1292 /* Passing a zero argument in last disables the rtcc algoritm */ 1293 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); 1294 } 1295 1296 static void 1297 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1298 int in_window, int num_pkt_lost) 1299 { 1300 /* Passing a zero argument in last disables the rtcc algoritm */ 1301 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); 1302 } 1303 1304 /* Here starts the RTCCVAR type CC invented by RRS which 1305 * is a slight mod to RFC2581. We reuse a common routine or 1306 * two since these algoritms are so close and need to 1307 * remain the same. 1308 */ 1309 static void 1310 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1311 int in_window, int num_pkt_lost) 1312 { 1313 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); 1314 } 1315 1316 1317 static 1318 void 1319 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, 1320 struct sctp_tmit_chunk *tp1) 1321 { 1322 net->cc_mod.rtcc.bw_bytes += tp1->send_size; 1323 } 1324 1325 static void 1326 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, 1327 struct sctp_nets *net) 1328 { 1329 if (net->cc_mod.rtcc.tls_needs_set > 0) { 1330 /* We had a bw measurment going on */ 1331 struct timeval ltls; 1332 1333 SCTP_GETPTIME_TIMEVAL(<ls); 1334 timevalsub(<ls, &net->cc_mod.rtcc.tls); 1335 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec; 1336 } 1337 } 1338 1339 static void 1340 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, 1341 struct sctp_nets *net) 1342 { 1343 uint64_t vtag, probepoint; 1344 1345 if (net->cc_mod.rtcc.lbw) { 1346 /* Clear the old bw.. we went to 0 in-flight */ 1347 vtag = (net->rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1348 (stcb->rport); 1349 probepoint = (((uint64_t) net->cwnd) << 32); 1350 /* Probe point 8 */ 1351 probepoint |= ((8 << 16) | 0); 1352 SDT_PROBE(sctp, cwnd, net, rttvar, 1353 vtag, 1354 ((net->cc_mod.rtcc.lbw << 32) | 0), 1355 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 1356 net->flight_size, 1357 probepoint); 1358 net->cc_mod.rtcc.lbw_rtt = 0; 1359 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1360 net->cc_mod.rtcc.lbw = 0; 1361 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1362 net->cc_mod.rtcc.vol_reduce = 0; 1363 net->cc_mod.rtcc.bw_tot_time = 0; 1364 net->cc_mod.rtcc.bw_bytes = 0; 1365 net->cc_mod.rtcc.tls_needs_set = 0; 1366 if (net->cc_mod.rtcc.steady_step) { 1367 net->cc_mod.rtcc.vol_reduce = 0; 1368 net->cc_mod.rtcc.step_cnt = 0; 1369 net->cc_mod.rtcc.last_step_state = 0; 1370 } 1371 if (net->cc_mod.rtcc.ret_from_eq) { 1372 /* less aggressive one - reset cwnd too */ 1373 uint32_t cwnd_in_mtu, cwnd; 1374 1375 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 1376 if (cwnd_in_mtu == 0) { 1377 /* 1378 * Using 0 means that the value of RFC 4960 1379 * is used. 1380 */ 1381 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1382 } else { 1383 /* 1384 * We take the minimum of the burst limit 1385 * and the initial congestion window. 1386 */ 1387 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst)) 1388 cwnd_in_mtu = stcb->asoc.max_burst; 1389 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 1390 } 1391 if (net->cwnd > cwnd) { 1392 /* 1393 * Only set if we are not a timeout (i.e. 1394 * down to 1 mtu) 1395 */ 1396 net->cwnd = cwnd; 1397 } 1398 } 1399 } 1400 } 1401 1402 static void 1403 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, 1404 struct sctp_nets *net) 1405 { 1406 uint64_t vtag, probepoint; 1407 1408 sctp_set_initial_cc_param(stcb, net); 1409 stcb->asoc.use_precise_time = 1; 1410 probepoint = (((uint64_t) net->cwnd) << 32); 1411 probepoint |= ((9 << 16) | 0); 1412 vtag = (net->rtt << 32) | 1413 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1414 (stcb->rport); 1415 SDT_PROBE(sctp, cwnd, net, rttvar, 1416 vtag, 1417 0, 1418 0, 1419 0, 1420 probepoint); 1421 net->cc_mod.rtcc.lbw_rtt = 0; 1422 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1423 net->cc_mod.rtcc.vol_reduce = 0; 1424 net->cc_mod.rtcc.lbw = 0; 1425 net->cc_mod.rtcc.vol_reduce = 0; 1426 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1427 net->cc_mod.rtcc.bw_tot_time = 0; 1428 net->cc_mod.rtcc.bw_bytes = 0; 1429 net->cc_mod.rtcc.tls_needs_set = 0; 1430 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret); 1431 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step); 1432 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn); 1433 net->cc_mod.rtcc.step_cnt = 0; 1434 net->cc_mod.rtcc.last_step_state = 0; 1435 1436 1437 } 1438 1439 static int 1440 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, 1441 struct sctp_cc_option *cc_opt) 1442 { 1443 struct sctp_nets *net; 1444 1445 if (setorget == 1) { 1446 /* a set */ 1447 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1448 if ((cc_opt->aid_value.assoc_value != 0) && 1449 (cc_opt->aid_value.assoc_value != 1)) { 1450 return (EINVAL); 1451 } 1452 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1453 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value; 1454 } 1455 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1456 if ((cc_opt->aid_value.assoc_value != 0) && 1457 (cc_opt->aid_value.assoc_value != 1)) { 1458 return (EINVAL); 1459 } 1460 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1461 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value; 1462 } 1463 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1464 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1465 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value; 1466 } 1467 } else { 1468 return (EINVAL); 1469 } 1470 } else { 1471 /* a get */ 1472 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1473 net = TAILQ_FIRST(&stcb->asoc.nets); 1474 if (net == NULL) { 1475 return (EFAULT); 1476 } 1477 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq; 1478 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1479 net = TAILQ_FIRST(&stcb->asoc.nets); 1480 if (net == NULL) { 1481 return (EFAULT); 1482 } 1483 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn; 1484 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1485 net = TAILQ_FIRST(&stcb->asoc.nets); 1486 if (net == NULL) { 1487 return (EFAULT); 1488 } 1489 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step; 1490 } else { 1491 return (EINVAL); 1492 } 1493 } 1494 return (0); 1495 } 1496 1497 static void 1498 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED, 1499 struct sctp_nets *net) 1500 { 1501 if (net->cc_mod.rtcc.tls_needs_set == 0) { 1502 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls); 1503 net->cc_mod.rtcc.tls_needs_set = 2; 1504 } 1505 } 1506 1507 static void 1508 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, 1509 struct sctp_association *asoc, 1510 int accum_moved, int reneged_all, int will_exit) 1511 { 1512 /* Passing a one argument at the last enables the rtcc algoritm */ 1513 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); 1514 } 1515 1516 static void 1517 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED, 1518 struct sctp_nets *net, 1519 struct timeval *now SCTP_UNUSED) 1520 { 1521 net->cc_mod.rtcc.rtt_set_this_sack = 1; 1522 } 1523 1524 /* Here starts Sally Floyds HS-TCP */ 1525 1526 struct sctp_hs_raise_drop { 1527 int32_t cwnd; 1528 int32_t increase; 1529 int32_t drop_percent; 1530 }; 1531 1532 #define SCTP_HS_TABLE_SIZE 73 1533 1534 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 1535 {38, 1, 50}, /* 0 */ 1536 {118, 2, 44}, /* 1 */ 1537 {221, 3, 41}, /* 2 */ 1538 {347, 4, 38}, /* 3 */ 1539 {495, 5, 37}, /* 4 */ 1540 {663, 6, 35}, /* 5 */ 1541 {851, 7, 34}, /* 6 */ 1542 {1058, 8, 33}, /* 7 */ 1543 {1284, 9, 32}, /* 8 */ 1544 {1529, 10, 31}, /* 9 */ 1545 {1793, 11, 30}, /* 10 */ 1546 {2076, 12, 29}, /* 11 */ 1547 {2378, 13, 28}, /* 12 */ 1548 {2699, 14, 28}, /* 13 */ 1549 {3039, 15, 27}, /* 14 */ 1550 {3399, 16, 27}, /* 15 */ 1551 {3778, 17, 26}, /* 16 */ 1552 {4177, 18, 26}, /* 17 */ 1553 {4596, 19, 25}, /* 18 */ 1554 {5036, 20, 25}, /* 19 */ 1555 {5497, 21, 24}, /* 20 */ 1556 {5979, 22, 24}, /* 21 */ 1557 {6483, 23, 23}, /* 22 */ 1558 {7009, 24, 23}, /* 23 */ 1559 {7558, 25, 22}, /* 24 */ 1560 {8130, 26, 22}, /* 25 */ 1561 {8726, 27, 22}, /* 26 */ 1562 {9346, 28, 21}, /* 27 */ 1563 {9991, 29, 21}, /* 28 */ 1564 {10661, 30, 21}, /* 29 */ 1565 {11358, 31, 20}, /* 30 */ 1566 {12082, 32, 20}, /* 31 */ 1567 {12834, 33, 20}, /* 32 */ 1568 {13614, 34, 19}, /* 33 */ 1569 {14424, 35, 19}, /* 34 */ 1570 {15265, 36, 19}, /* 35 */ 1571 {16137, 37, 19}, /* 36 */ 1572 {17042, 38, 18}, /* 37 */ 1573 {17981, 39, 18}, /* 38 */ 1574 {18955, 40, 18}, /* 39 */ 1575 {19965, 41, 17}, /* 40 */ 1576 {21013, 42, 17}, /* 41 */ 1577 {22101, 43, 17}, /* 42 */ 1578 {23230, 44, 17}, /* 43 */ 1579 {24402, 45, 16}, /* 44 */ 1580 {25618, 46, 16}, /* 45 */ 1581 {26881, 47, 16}, /* 46 */ 1582 {28193, 48, 16}, /* 47 */ 1583 {29557, 49, 15}, /* 48 */ 1584 {30975, 50, 15}, /* 49 */ 1585 {32450, 51, 15}, /* 50 */ 1586 {33986, 52, 15}, /* 51 */ 1587 {35586, 53, 14}, /* 52 */ 1588 {37253, 54, 14}, /* 53 */ 1589 {38992, 55, 14}, /* 54 */ 1590 {40808, 56, 14}, /* 55 */ 1591 {42707, 57, 13}, /* 56 */ 1592 {44694, 58, 13}, /* 57 */ 1593 {46776, 59, 13}, /* 58 */ 1594 {48961, 60, 13}, /* 59 */ 1595 {51258, 61, 13}, /* 60 */ 1596 {53677, 62, 12}, /* 61 */ 1597 {56230, 63, 12}, /* 62 */ 1598 {58932, 64, 12}, /* 63 */ 1599 {61799, 65, 12}, /* 64 */ 1600 {64851, 66, 11}, /* 65 */ 1601 {68113, 67, 11}, /* 66 */ 1602 {71617, 68, 11}, /* 67 */ 1603 {75401, 69, 10}, /* 68 */ 1604 {79517, 70, 10}, /* 69 */ 1605 {84035, 71, 10}, /* 70 */ 1606 {89053, 72, 10}, /* 71 */ 1607 {94717, 73, 9} /* 72 */ 1608 }; 1609 1610 static void 1611 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 1612 { 1613 int cur_val, i, indx, incr; 1614 int old_cwnd = net->cwnd; 1615 1616 cur_val = net->cwnd >> 10; 1617 indx = SCTP_HS_TABLE_SIZE - 1; 1618 1619 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1620 /* normal mode */ 1621 if (net->net_ack > net->mtu) { 1622 net->cwnd += net->mtu; 1623 } else { 1624 net->cwnd += net->net_ack; 1625 } 1626 } else { 1627 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 1628 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 1629 indx = i; 1630 break; 1631 } 1632 } 1633 net->last_hs_used = indx; 1634 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 1635 net->cwnd += incr; 1636 } 1637 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1638 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1639 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS); 1640 } 1641 } 1642 1643 static void 1644 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 1645 { 1646 int cur_val, i, indx; 1647 int old_cwnd = net->cwnd; 1648 1649 cur_val = net->cwnd >> 10; 1650 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1651 /* normal mode */ 1652 net->ssthresh = net->cwnd / 2; 1653 if (net->ssthresh < (net->mtu * 2)) { 1654 net->ssthresh = 2 * net->mtu; 1655 } 1656 net->cwnd = net->ssthresh; 1657 } else { 1658 /* drop by the proper amount */ 1659 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 1660 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 1661 net->cwnd = net->ssthresh; 1662 /* now where are we */ 1663 indx = net->last_hs_used; 1664 cur_val = net->cwnd >> 10; 1665 /* reset where we are in the table */ 1666 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1667 /* feel out of hs */ 1668 net->last_hs_used = 0; 1669 } else { 1670 for (i = indx; i >= 1; i--) { 1671 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 1672 break; 1673 } 1674 } 1675 net->last_hs_used = indx; 1676 } 1677 } 1678 sctp_enforce_cwnd_limit(&stcb->asoc, net); 1679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1680 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 1681 } 1682 } 1683 1684 static void 1685 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 1686 struct sctp_association *asoc) 1687 { 1688 struct sctp_nets *net; 1689 1690 /* 1691 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 1692 * (net->fast_retran_loss_recovery == 0))) 1693 */ 1694 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1695 if ((asoc->fast_retran_loss_recovery == 0) || 1696 (asoc->sctp_cmt_on_off > 0)) { 1697 /* out of a RFC2582 Fast recovery window? */ 1698 if (net->net_ack > 0) { 1699 /* 1700 * per section 7.2.3, are there any 1701 * destinations that had a fast retransmit 1702 * to them. If so what we need to do is 1703 * adjust ssthresh and cwnd. 1704 */ 1705 struct sctp_tmit_chunk *lchk; 1706 1707 sctp_hs_cwnd_decrease(stcb, net); 1708 1709 lchk = TAILQ_FIRST(&asoc->send_queue); 1710 1711 net->partial_bytes_acked = 0; 1712 /* Turn on fast recovery window */ 1713 asoc->fast_retran_loss_recovery = 1; 1714 if (lchk == NULL) { 1715 /* Mark end of the window */ 1716 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1717 } else { 1718 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1719 } 1720 1721 /* 1722 * CMT fast recovery -- per destination 1723 * recovery variable. 1724 */ 1725 net->fast_retran_loss_recovery = 1; 1726 1727 if (lchk == NULL) { 1728 /* Mark end of the window */ 1729 net->fast_recovery_tsn = asoc->sending_seq - 1; 1730 } else { 1731 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1732 } 1733 1734 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1735 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 1736 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1737 stcb->sctp_ep, stcb, net); 1738 } 1739 } else if (net->net_ack > 0) { 1740 /* 1741 * Mark a peg that we WOULD have done a cwnd 1742 * reduction but RFC2582 prevented this action. 1743 */ 1744 SCTP_STAT_INCR(sctps_fastretransinrtt); 1745 } 1746 } 1747 } 1748 1749 static void 1750 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 1751 struct sctp_association *asoc, 1752 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 1753 { 1754 struct sctp_nets *net; 1755 1756 /******************************/ 1757 /* update cwnd and Early FR */ 1758 /******************************/ 1759 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1760 1761 #ifdef JANA_CMT_FAST_RECOVERY 1762 /* 1763 * CMT fast recovery code. Need to debug. 1764 */ 1765 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1766 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1767 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1768 net->will_exit_fast_recovery = 1; 1769 } 1770 } 1771 #endif 1772 /* if nothing was acked on this destination skip it */ 1773 if (net->net_ack == 0) { 1774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1775 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1776 } 1777 continue; 1778 } 1779 #ifdef JANA_CMT_FAST_RECOVERY 1780 /* 1781 * CMT fast recovery code 1782 */ 1783 /* 1784 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1785 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1786 * } else if (sctp_cmt_on_off == 0 && 1787 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1788 */ 1789 #endif 1790 1791 if (asoc->fast_retran_loss_recovery && 1792 (will_exit == 0) && 1793 (asoc->sctp_cmt_on_off == 0)) { 1794 /* 1795 * If we are in loss recovery we skip any cwnd 1796 * update 1797 */ 1798 return; 1799 } 1800 /* 1801 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1802 * moved. 1803 */ 1804 if (accum_moved || 1805 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1806 /* If the cumulative ack moved we can proceed */ 1807 if (net->cwnd <= net->ssthresh) { 1808 /* We are in slow start */ 1809 if (net->flight_size + net->net_ack >= net->cwnd) { 1810 sctp_hs_cwnd_increase(stcb, net); 1811 } else { 1812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1813 sctp_log_cwnd(stcb, net, net->net_ack, 1814 SCTP_CWND_LOG_NOADV_SS); 1815 } 1816 } 1817 } else { 1818 /* We are in congestion avoidance */ 1819 net->partial_bytes_acked += net->net_ack; 1820 if ((net->flight_size + net->net_ack >= net->cwnd) && 1821 (net->partial_bytes_acked >= net->cwnd)) { 1822 net->partial_bytes_acked -= net->cwnd; 1823 net->cwnd += net->mtu; 1824 sctp_enforce_cwnd_limit(asoc, net); 1825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1826 sctp_log_cwnd(stcb, net, net->mtu, 1827 SCTP_CWND_LOG_FROM_CA); 1828 } 1829 } else { 1830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1831 sctp_log_cwnd(stcb, net, net->net_ack, 1832 SCTP_CWND_LOG_NOADV_CA); 1833 } 1834 } 1835 } 1836 } else { 1837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1838 sctp_log_cwnd(stcb, net, net->mtu, 1839 SCTP_CWND_LOG_NO_CUMACK); 1840 } 1841 } 1842 } 1843 } 1844 1845 1846 /* 1847 * H-TCP congestion control. The algorithm is detailed in: 1848 * R.N.Shorten, D.J.Leith: 1849 * "H-TCP: TCP for high-speed and long-distance networks" 1850 * Proc. PFLDnet, Argonne, 2004. 1851 * http://www.hamilton.ie/net/htcp3.pdf 1852 */ 1853 1854 1855 static int use_rtt_scaling = 1; 1856 static int use_bandwidth_switch = 1; 1857 1858 static inline int 1859 between(uint32_t seq1, uint32_t seq2, uint32_t seq3) 1860 { 1861 return (seq3 - seq2 >= seq1 - seq2); 1862 } 1863 1864 static inline uint32_t 1865 htcp_cong_time(struct htcp *ca) 1866 { 1867 return (sctp_get_tick_count() - ca->last_cong); 1868 } 1869 1870 static inline uint32_t 1871 htcp_ccount(struct htcp *ca) 1872 { 1873 return (htcp_cong_time(ca) / ca->minRTT); 1874 } 1875 1876 static inline void 1877 htcp_reset(struct htcp *ca) 1878 { 1879 ca->undo_last_cong = ca->last_cong; 1880 ca->undo_maxRTT = ca->maxRTT; 1881 ca->undo_old_maxB = ca->old_maxB; 1882 ca->last_cong = sctp_get_tick_count(); 1883 } 1884 1885 #ifdef SCTP_NOT_USED 1886 1887 static uint32_t 1888 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 1889 { 1890 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong; 1891 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT; 1892 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB; 1893 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu)); 1894 } 1895 1896 #endif 1897 1898 static inline void 1899 measure_rtt(struct sctp_nets *net) 1900 { 1901 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT; 1902 1903 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1904 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT) 1905 net->cc_mod.htcp_ca.minRTT = srtt; 1906 1907 /* max RTT */ 1908 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { 1909 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) 1910 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; 1911 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20)) 1912 net->cc_mod.htcp_ca.maxRTT = srtt; 1913 } 1914 } 1915 1916 static void 1917 measure_achieved_throughput(struct sctp_nets *net) 1918 { 1919 uint32_t now = sctp_get_tick_count(); 1920 1921 if (net->fast_retran_ip == 0) 1922 net->cc_mod.htcp_ca.bytes_acked = net->net_ack; 1923 1924 if (!use_bandwidth_switch) 1925 return; 1926 1927 /* achieved throughput calculations */ 1928 /* JRS - not 100% sure of this statement */ 1929 if (net->fast_retran_ip == 1) { 1930 net->cc_mod.htcp_ca.bytecount = 0; 1931 net->cc_mod.htcp_ca.lasttime = now; 1932 return; 1933 } 1934 net->cc_mod.htcp_ca.bytecount += net->net_ack; 1935 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) && 1936 (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) && 1937 (net->cc_mod.htcp_ca.minRTT > 0)) { 1938 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime); 1939 1940 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) { 1941 /* just after backoff */ 1942 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi; 1943 } else { 1944 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4; 1945 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB) 1946 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi; 1947 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB) 1948 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB; 1949 } 1950 net->cc_mod.htcp_ca.bytecount = 0; 1951 net->cc_mod.htcp_ca.lasttime = now; 1952 } 1953 } 1954 1955 static inline void 1956 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1957 { 1958 if (use_bandwidth_switch) { 1959 uint32_t maxB = ca->maxB; 1960 uint32_t old_maxB = ca->old_maxB; 1961 1962 ca->old_maxB = ca->maxB; 1963 1964 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1965 ca->beta = BETA_MIN; 1966 ca->modeswitch = 0; 1967 return; 1968 } 1969 } 1970 if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) { 1971 ca->beta = (minRTT << 7) / maxRTT; 1972 if (ca->beta < BETA_MIN) 1973 ca->beta = BETA_MIN; 1974 else if (ca->beta > BETA_MAX) 1975 ca->beta = BETA_MAX; 1976 } else { 1977 ca->beta = BETA_MIN; 1978 ca->modeswitch = 1; 1979 } 1980 } 1981 1982 static inline void 1983 htcp_alpha_update(struct htcp *ca) 1984 { 1985 uint32_t minRTT = ca->minRTT; 1986 uint32_t factor = 1; 1987 uint32_t diff = htcp_cong_time(ca); 1988 1989 if (diff > (uint32_t) hz) { 1990 diff -= hz; 1991 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1992 } 1993 if (use_rtt_scaling && minRTT) { 1994 uint32_t scale = (hz << 3) / (10 * minRTT); 1995 1996 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1997 * interval [0.5,10]<<3 */ 1998 factor = (factor << 3) / scale; 1999 if (!factor) 2000 factor = 1; 2001 } 2002 ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 2003 if (!ca->alpha) 2004 ca->alpha = ALPHA_BASE; 2005 } 2006 2007 /* After we have the rtt data to calculate beta, we'd still prefer to wait one 2008 * rtt before we adjust our beta to ensure we are working from a consistent 2009 * data. 2010 * 2011 * This function should be called when we hit a congestion event since only at 2012 * that point do we really have a real sense of maxRTT (the queues en route 2013 * were getting just too full now). 2014 */ 2015 static void 2016 htcp_param_update(struct sctp_nets *net) 2017 { 2018 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT; 2019 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT; 2020 2021 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT); 2022 htcp_alpha_update(&net->cc_mod.htcp_ca); 2023 2024 /* 2025 * add slowly fading memory for maxRTT to accommodate routing 2026 * changes etc 2027 */ 2028 if (minRTT > 0 && maxRTT > minRTT) 2029 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 2030 } 2031 2032 static uint32_t 2033 htcp_recalc_ssthresh(struct sctp_nets *net) 2034 { 2035 htcp_param_update(net); 2036 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu)); 2037 } 2038 2039 static void 2040 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 2041 { 2042 /*- 2043 * How to handle these functions? 2044 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 2045 * return; 2046 */ 2047 if (net->cwnd <= net->ssthresh) { 2048 /* We are in slow start */ 2049 if (net->flight_size + net->net_ack >= net->cwnd) { 2050 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 2051 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 2052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2053 sctp_log_cwnd(stcb, net, net->mtu, 2054 SCTP_CWND_LOG_FROM_SS); 2055 } 2056 } else { 2057 net->cwnd += net->net_ack; 2058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2059 sctp_log_cwnd(stcb, net, net->net_ack, 2060 SCTP_CWND_LOG_FROM_SS); 2061 } 2062 } 2063 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2064 } else { 2065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2066 sctp_log_cwnd(stcb, net, net->net_ack, 2067 SCTP_CWND_LOG_NOADV_SS); 2068 } 2069 } 2070 } else { 2071 measure_rtt(net); 2072 2073 /* 2074 * In dangerous area, increase slowly. In theory this is 2075 * net->cwnd += alpha / net->cwnd 2076 */ 2077 /* What is snd_cwnd_cnt?? */ 2078 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 2079 /*- 2080 * Does SCTP have a cwnd clamp? 2081 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 2082 */ 2083 net->cwnd += net->mtu; 2084 net->partial_bytes_acked = 0; 2085 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2086 htcp_alpha_update(&net->cc_mod.htcp_ca); 2087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2088 sctp_log_cwnd(stcb, net, net->mtu, 2089 SCTP_CWND_LOG_FROM_CA); 2090 } 2091 } else { 2092 net->partial_bytes_acked += net->net_ack; 2093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2094 sctp_log_cwnd(stcb, net, net->net_ack, 2095 SCTP_CWND_LOG_NOADV_CA); 2096 } 2097 } 2098 2099 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2100 } 2101 } 2102 2103 #ifdef SCTP_NOT_USED 2104 /* Lower bound on congestion window. */ 2105 static uint32_t 2106 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 2107 { 2108 return (net->ssthresh); 2109 } 2110 2111 #endif 2112 2113 static void 2114 htcp_init(struct sctp_nets *net) 2115 { 2116 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp)); 2117 net->cc_mod.htcp_ca.alpha = ALPHA_BASE; 2118 net->cc_mod.htcp_ca.beta = BETA_MIN; 2119 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2120 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 2121 } 2122 2123 static void 2124 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 2125 { 2126 /* 2127 * We take the max of the burst limit times a MTU or the 2128 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 2129 */ 2130 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 2131 net->ssthresh = stcb->asoc.peers_rwnd; 2132 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2133 htcp_init(net); 2134 2135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 2136 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 2137 } 2138 } 2139 2140 static void 2141 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 2142 struct sctp_association *asoc, 2143 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 2144 { 2145 struct sctp_nets *net; 2146 2147 /******************************/ 2148 /* update cwnd and Early FR */ 2149 /******************************/ 2150 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2151 2152 #ifdef JANA_CMT_FAST_RECOVERY 2153 /* 2154 * CMT fast recovery code. Need to debug. 2155 */ 2156 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 2157 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 2158 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 2159 net->will_exit_fast_recovery = 1; 2160 } 2161 } 2162 #endif 2163 /* if nothing was acked on this destination skip it */ 2164 if (net->net_ack == 0) { 2165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2166 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 2167 } 2168 continue; 2169 } 2170 #ifdef JANA_CMT_FAST_RECOVERY 2171 /* 2172 * CMT fast recovery code 2173 */ 2174 /* 2175 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 2176 * && net->will_exit_fast_recovery == 0) { @@@ Do something 2177 * } else if (sctp_cmt_on_off == 0 && 2178 * asoc->fast_retran_loss_recovery && will_exit == 0) { 2179 */ 2180 #endif 2181 2182 if (asoc->fast_retran_loss_recovery && 2183 will_exit == 0 && 2184 (asoc->sctp_cmt_on_off == 0)) { 2185 /* 2186 * If we are in loss recovery we skip any cwnd 2187 * update 2188 */ 2189 return; 2190 } 2191 /* 2192 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 2193 * moved. 2194 */ 2195 if (accum_moved || 2196 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 2197 htcp_cong_avoid(stcb, net); 2198 measure_achieved_throughput(net); 2199 } else { 2200 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2201 sctp_log_cwnd(stcb, net, net->mtu, 2202 SCTP_CWND_LOG_NO_CUMACK); 2203 } 2204 } 2205 } 2206 } 2207 2208 static void 2209 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 2210 struct sctp_association *asoc) 2211 { 2212 struct sctp_nets *net; 2213 2214 /* 2215 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 2216 * (net->fast_retran_loss_recovery == 0))) 2217 */ 2218 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2219 if ((asoc->fast_retran_loss_recovery == 0) || 2220 (asoc->sctp_cmt_on_off > 0)) { 2221 /* out of a RFC2582 Fast recovery window? */ 2222 if (net->net_ack > 0) { 2223 /* 2224 * per section 7.2.3, are there any 2225 * destinations that had a fast retransmit 2226 * to them. If so what we need to do is 2227 * adjust ssthresh and cwnd. 2228 */ 2229 struct sctp_tmit_chunk *lchk; 2230 int old_cwnd = net->cwnd; 2231 2232 /* JRS - reset as if state were changed */ 2233 htcp_reset(&net->cc_mod.htcp_ca); 2234 net->ssthresh = htcp_recalc_ssthresh(net); 2235 net->cwnd = net->ssthresh; 2236 sctp_enforce_cwnd_limit(asoc, net); 2237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2238 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 2239 SCTP_CWND_LOG_FROM_FR); 2240 } 2241 lchk = TAILQ_FIRST(&asoc->send_queue); 2242 2243 net->partial_bytes_acked = 0; 2244 /* Turn on fast recovery window */ 2245 asoc->fast_retran_loss_recovery = 1; 2246 if (lchk == NULL) { 2247 /* Mark end of the window */ 2248 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 2249 } else { 2250 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2251 } 2252 2253 /* 2254 * CMT fast recovery -- per destination 2255 * recovery variable. 2256 */ 2257 net->fast_retran_loss_recovery = 1; 2258 2259 if (lchk == NULL) { 2260 /* Mark end of the window */ 2261 net->fast_recovery_tsn = asoc->sending_seq - 1; 2262 } else { 2263 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2264 } 2265 2266 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 2267 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 2268 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 2269 stcb->sctp_ep, stcb, net); 2270 } 2271 } else if (net->net_ack > 0) { 2272 /* 2273 * Mark a peg that we WOULD have done a cwnd 2274 * reduction but RFC2582 prevented this action. 2275 */ 2276 SCTP_STAT_INCR(sctps_fastretransinrtt); 2277 } 2278 } 2279 } 2280 2281 static void 2282 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 2283 struct sctp_nets *net) 2284 { 2285 int old_cwnd = net->cwnd; 2286 2287 /* JRS - reset as if the state were being changed to timeout */ 2288 htcp_reset(&net->cc_mod.htcp_ca); 2289 net->ssthresh = htcp_recalc_ssthresh(net); 2290 net->cwnd = net->mtu; 2291 net->partial_bytes_acked = 0; 2292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2293 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 2294 } 2295 } 2296 2297 static void 2298 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 2299 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED) 2300 { 2301 int old_cwnd; 2302 2303 old_cwnd = net->cwnd; 2304 2305 /* JRS - reset hctp as if state changed */ 2306 if (in_window == 0) { 2307 htcp_reset(&net->cc_mod.htcp_ca); 2308 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2309 net->ssthresh = htcp_recalc_ssthresh(net); 2310 if (net->ssthresh < net->mtu) { 2311 net->ssthresh = net->mtu; 2312 /* here back off the timer as well, to slow us down */ 2313 net->RTO <<= 1; 2314 } 2315 net->cwnd = net->ssthresh; 2316 sctp_enforce_cwnd_limit(&stcb->asoc, net); 2317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2318 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2319 } 2320 } 2321 } 2322 2323 struct sctp_cc_functions sctp_cc_functions[] = { 2324 { 2325 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2326 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack, 2327 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2328 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2329 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2330 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2331 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2332 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2333 }, 2334 { 2335 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2336 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack, 2337 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2338 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr, 2339 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2340 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2341 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2342 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2343 }, 2344 { 2345 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param, 2346 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack, 2347 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2348 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr, 2349 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout, 2350 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo, 2351 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2352 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2353 }, 2354 { 2355 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param, 2356 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack, 2357 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2358 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2359 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2360 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo, 2361 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2362 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2363 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted, 2364 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged, 2365 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins, 2366 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack, 2367 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option, 2368 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated 2369 } 2370 }; 2371