1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <netinet/sctp_os.h> 34 #include <netinet/sctp_var.h> 35 #include <netinet/sctp_sysctl.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctp_header.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_output.h> 40 #include <netinet/sctp_input.h> 41 #include <netinet/sctp_indata.h> 42 #include <netinet/sctp_uio.h> 43 #include <netinet/sctp_timer.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctp_dtrace_declare.h> 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #define SHIFT_MPTCP_MULTI_N 40 51 #define SHIFT_MPTCP_MULTI_Z 16 52 #define SHIFT_MPTCP_MULTI 8 53 54 static void 55 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 56 { 57 struct sctp_association *assoc; 58 uint32_t cwnd_in_mtu; 59 60 assoc = &stcb->asoc; 61 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 62 if (cwnd_in_mtu == 0) { 63 /* Using 0 means that the value of RFC 4960 is used. */ 64 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 65 } else { 66 /* 67 * We take the minimum of the burst limit and the initial 68 * congestion window. 69 */ 70 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst)) 71 cwnd_in_mtu = assoc->max_burst; 72 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 73 } 74 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 75 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 76 /* In case of resource pooling initialize appropriately */ 77 net->cwnd /= assoc->numnets; 78 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { 79 net->cwnd = net->mtu - sizeof(struct sctphdr); 80 } 81 } 82 net->ssthresh = assoc->peers_rwnd; 83 84 SDT_PROBE(sctp, cwnd, net, init, 85 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 86 0, net->cwnd); 87 if (SCTP_BASE_SYSCTL(sctp_logging_level) & 88 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 89 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 90 } 91 } 92 93 static void 94 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 95 struct sctp_association *asoc) 96 { 97 struct sctp_nets *net; 98 uint32_t t_ssthresh, t_cwnd; 99 uint64_t t_ucwnd_sbw; 100 101 /* MT FIXME: Don't compute this over and over again */ 102 t_ssthresh = 0; 103 t_cwnd = 0; 104 t_ucwnd_sbw = 0; 105 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 106 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 107 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 108 t_ssthresh += net->ssthresh; 109 t_cwnd += net->cwnd; 110 if (net->lastsa > 0) { 111 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) net->lastsa; 112 } 113 } 114 if (t_ucwnd_sbw == 0) { 115 t_ucwnd_sbw = 1; 116 } 117 } 118 /*- 119 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 120 * (net->fast_retran_loss_recovery == 0))) 121 */ 122 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 123 if ((asoc->fast_retran_loss_recovery == 0) || 124 (asoc->sctp_cmt_on_off > 0)) { 125 /* out of a RFC2582 Fast recovery window? */ 126 if (net->net_ack > 0) { 127 /* 128 * per section 7.2.3, are there any 129 * destinations that had a fast retransmit 130 * to them. If so what we need to do is 131 * adjust ssthresh and cwnd. 132 */ 133 struct sctp_tmit_chunk *lchk; 134 int old_cwnd = net->cwnd; 135 136 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) || 137 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) { 138 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) { 139 net->ssthresh = (uint32_t) (((uint64_t) 4 * 140 (uint64_t) net->mtu * 141 (uint64_t) net->ssthresh) / 142 (uint64_t) t_ssthresh); 143 144 } 145 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) { 146 uint32_t srtt; 147 148 srtt = net->lastsa; 149 /* 150 * lastsa>>3; we don't need 151 * to devide ... 152 */ 153 if (srtt == 0) { 154 srtt = 1; 155 } 156 /* 157 * Short Version => Equal to 158 * Contel Version MBe 159 */ 160 net->ssthresh = (uint32_t) (((uint64_t) 4 * 161 (uint64_t) net->mtu * 162 (uint64_t) net->cwnd) / 163 ((uint64_t) srtt * 164 t_ucwnd_sbw)); 165 /* INCREASE FACTOR */ ; 166 } 167 if ((net->cwnd > t_cwnd / 2) && 168 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 169 net->ssthresh = net->cwnd - t_cwnd / 2; 170 } 171 if (net->ssthresh < net->mtu) { 172 net->ssthresh = net->mtu; 173 } 174 } else { 175 net->ssthresh = net->cwnd / 2; 176 if (net->ssthresh < (net->mtu * 2)) { 177 net->ssthresh = 2 * net->mtu; 178 } 179 } 180 net->cwnd = net->ssthresh; 181 SDT_PROBE(sctp, cwnd, net, fr, 182 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 183 old_cwnd, net->cwnd); 184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 185 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 186 SCTP_CWND_LOG_FROM_FR); 187 } 188 lchk = TAILQ_FIRST(&asoc->send_queue); 189 190 net->partial_bytes_acked = 0; 191 /* Turn on fast recovery window */ 192 asoc->fast_retran_loss_recovery = 1; 193 if (lchk == NULL) { 194 /* Mark end of the window */ 195 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 196 } else { 197 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 198 } 199 200 /* 201 * CMT fast recovery -- per destination 202 * recovery variable. 203 */ 204 net->fast_retran_loss_recovery = 1; 205 206 if (lchk == NULL) { 207 /* Mark end of the window */ 208 net->fast_recovery_tsn = asoc->sending_seq - 1; 209 } else { 210 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 211 } 212 213 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 214 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 215 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 216 stcb->sctp_ep, stcb, net); 217 } 218 } else if (net->net_ack > 0) { 219 /* 220 * Mark a peg that we WOULD have done a cwnd 221 * reduction but RFC2582 prevented this action. 222 */ 223 SCTP_STAT_INCR(sctps_fastretransinrtt); 224 } 225 } 226 } 227 228 /* Defines for instantaneous bw decisions */ 229 #define SCTP_INST_LOOSING 1 /* Loosing to other flows */ 230 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ 231 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */ 232 233 234 static int 235 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, 236 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) 237 { 238 uint64_t oth, probepoint; 239 240 probepoint = (((uint64_t) net->cwnd) << 32); 241 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 242 /* 243 * rtt increased we don't update bw.. so we don't update the 244 * rtt either. 245 */ 246 /* Probe point 5 */ 247 probepoint |= ((5 << 16) | 1); 248 SDT_PROBE(sctp, cwnd, net, rttvar, 249 vtag, 250 ((net->cc_mod.rtcc.lbw << 32) | nbw), 251 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 252 net->flight_size, 253 probepoint); 254 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 255 if (net->cc_mod.rtcc.last_step_state == 5) 256 net->cc_mod.rtcc.step_cnt++; 257 else 258 net->cc_mod.rtcc.step_cnt = 1; 259 net->cc_mod.rtcc.last_step_state = 5; 260 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 261 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 262 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 263 /* Try a step down */ 264 oth = net->cc_mod.rtcc.vol_reduce; 265 oth <<= 16; 266 oth |= net->cc_mod.rtcc.step_cnt; 267 oth <<= 16; 268 oth |= net->cc_mod.rtcc.last_step_state; 269 SDT_PROBE(sctp, cwnd, net, rttstep, 270 vtag, 271 ((net->cc_mod.rtcc.lbw << 32) | nbw), 272 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 273 oth, 274 probepoint); 275 if (net->cwnd > (4 * net->mtu)) { 276 net->cwnd -= net->mtu; 277 net->cc_mod.rtcc.vol_reduce++; 278 } else { 279 net->cc_mod.rtcc.step_cnt = 0; 280 } 281 } 282 } 283 return (1); 284 } 285 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 286 /* 287 * rtt decreased, there could be more room. we update both 288 * the bw and the rtt here to lock this in as a good step 289 * down. 290 */ 291 /* Probe point 6 */ 292 probepoint |= ((6 << 16) | 0); 293 SDT_PROBE(sctp, cwnd, net, rttvar, 294 vtag, 295 ((net->cc_mod.rtcc.lbw << 32) | nbw), 296 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 297 net->flight_size, 298 probepoint); 299 if (net->cc_mod.rtcc.steady_step) { 300 oth = net->cc_mod.rtcc.vol_reduce; 301 oth <<= 16; 302 oth |= net->cc_mod.rtcc.step_cnt; 303 oth <<= 16; 304 oth |= net->cc_mod.rtcc.last_step_state; 305 SDT_PROBE(sctp, cwnd, net, rttstep, 306 vtag, 307 ((net->cc_mod.rtcc.lbw << 32) | nbw), 308 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 309 oth, 310 probepoint); 311 if ((net->cc_mod.rtcc.last_step_state == 5) && 312 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) { 313 /* Step down worked */ 314 net->cc_mod.rtcc.step_cnt = 0; 315 return (1); 316 } else { 317 net->cc_mod.rtcc.last_step_state = 6; 318 net->cc_mod.rtcc.step_cnt = 0; 319 } 320 } 321 net->cc_mod.rtcc.lbw = nbw; 322 net->cc_mod.rtcc.lbw_rtt = net->rtt; 323 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 324 if (inst_ind == SCTP_INST_GAINING) 325 return (1); 326 else if (inst_ind == SCTP_INST_NEUTRAL) 327 return (1); 328 else 329 return (0); 330 } 331 /* 332 * Ok bw and rtt remained the same .. no update to any 333 */ 334 /* Probe point 7 */ 335 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); 336 SDT_PROBE(sctp, cwnd, net, rttvar, 337 vtag, 338 ((net->cc_mod.rtcc.lbw << 32) | nbw), 339 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 340 net->flight_size, 341 probepoint); 342 343 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) { 344 if (net->cc_mod.rtcc.last_step_state == 5) 345 net->cc_mod.rtcc.step_cnt++; 346 else 347 net->cc_mod.rtcc.step_cnt = 1; 348 net->cc_mod.rtcc.last_step_state = 5; 349 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) || 350 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && 351 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { 352 /* Try a step down */ 353 if (net->cwnd > (4 * net->mtu)) { 354 net->cwnd -= net->mtu; 355 net->cc_mod.rtcc.vol_reduce++; 356 return (1); 357 } else { 358 net->cc_mod.rtcc.step_cnt = 0; 359 } 360 } 361 } 362 if (inst_ind == SCTP_INST_GAINING) 363 return (1); 364 else if (inst_ind == SCTP_INST_NEUTRAL) 365 return (1); 366 else 367 return ((int)net->cc_mod.rtcc.ret_from_eq); 368 } 369 370 static int 371 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, 372 uint64_t vtag, uint8_t inst_ind) 373 { 374 uint64_t oth, probepoint; 375 376 /* Bandwidth decreased. */ 377 probepoint = (((uint64_t) net->cwnd) << 32); 378 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { 379 /* rtt increased */ 380 /* Did we add more */ 381 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) && 382 (inst_ind != SCTP_INST_LOOSING)) { 383 /* We caused it maybe.. back off? */ 384 /* PROBE POINT 1 */ 385 probepoint |= ((1 << 16) | 1); 386 SDT_PROBE(sctp, cwnd, net, rttvar, 387 vtag, 388 ((net->cc_mod.rtcc.lbw << 32) | nbw), 389 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 390 net->flight_size, 391 probepoint); 392 393 if (net->cc_mod.rtcc.ret_from_eq) { 394 /* 395 * Switch over to CA if we are less 396 * aggressive 397 */ 398 net->ssthresh = net->cwnd - 1; 399 net->partial_bytes_acked = 0; 400 } 401 return (1); 402 } 403 /* Probe point 2 */ 404 probepoint |= ((2 << 16) | 0); 405 SDT_PROBE(sctp, cwnd, net, rttvar, 406 vtag, 407 ((net->cc_mod.rtcc.lbw << 32) | nbw), 408 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 409 net->flight_size, 410 probepoint); 411 412 /* Someone else - fight for more? */ 413 if (net->cc_mod.rtcc.steady_step) { 414 oth = net->cc_mod.rtcc.vol_reduce; 415 oth <<= 16; 416 oth |= net->cc_mod.rtcc.step_cnt; 417 oth <<= 16; 418 oth |= net->cc_mod.rtcc.last_step_state; 419 SDT_PROBE(sctp, cwnd, net, rttstep, 420 vtag, 421 ((net->cc_mod.rtcc.lbw << 32) | nbw), 422 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 423 oth, 424 probepoint); 425 /* 426 * Did we voluntarily give up some? if so take one 427 * back please 428 */ 429 if ((net->cc_mod.rtcc.vol_reduce) && 430 (inst_ind != SCTP_INST_GAINING)) { 431 net->cwnd += net->mtu; 432 net->cc_mod.rtcc.vol_reduce--; 433 } 434 net->cc_mod.rtcc.last_step_state = 2; 435 net->cc_mod.rtcc.step_cnt = 0; 436 } 437 goto out_decision; 438 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) { 439 /* bw & rtt decreased */ 440 /* Probe point 3 */ 441 probepoint |= ((3 << 16) | 0); 442 SDT_PROBE(sctp, cwnd, net, rttvar, 443 vtag, 444 ((net->cc_mod.rtcc.lbw << 32) | nbw), 445 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 446 net->flight_size, 447 probepoint); 448 if (net->cc_mod.rtcc.steady_step) { 449 oth = net->cc_mod.rtcc.vol_reduce; 450 oth <<= 16; 451 oth |= net->cc_mod.rtcc.step_cnt; 452 oth <<= 16; 453 oth |= net->cc_mod.rtcc.last_step_state; 454 SDT_PROBE(sctp, cwnd, net, rttstep, 455 vtag, 456 ((net->cc_mod.rtcc.lbw << 32) | nbw), 457 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 458 oth, 459 probepoint); 460 if ((net->cc_mod.rtcc.vol_reduce) && 461 (inst_ind != SCTP_INST_GAINING)) { 462 net->cwnd += net->mtu; 463 net->cc_mod.rtcc.vol_reduce--; 464 } 465 net->cc_mod.rtcc.last_step_state = 3; 466 net->cc_mod.rtcc.step_cnt = 0; 467 } 468 goto out_decision; 469 } 470 /* The bw decreased but rtt stayed the same */ 471 /* Probe point 4 */ 472 probepoint |= ((4 << 16) | 0); 473 SDT_PROBE(sctp, cwnd, net, rttvar, 474 vtag, 475 ((net->cc_mod.rtcc.lbw << 32) | nbw), 476 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 477 net->flight_size, 478 probepoint); 479 if (net->cc_mod.rtcc.steady_step) { 480 oth = net->cc_mod.rtcc.vol_reduce; 481 oth <<= 16; 482 oth |= net->cc_mod.rtcc.step_cnt; 483 oth <<= 16; 484 oth |= net->cc_mod.rtcc.last_step_state; 485 SDT_PROBE(sctp, cwnd, net, rttstep, 486 vtag, 487 ((net->cc_mod.rtcc.lbw << 32) | nbw), 488 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 489 oth, 490 probepoint); 491 if ((net->cc_mod.rtcc.vol_reduce) && 492 (inst_ind != SCTP_INST_GAINING)) { 493 net->cwnd += net->mtu; 494 net->cc_mod.rtcc.vol_reduce--; 495 } 496 net->cc_mod.rtcc.last_step_state = 4; 497 net->cc_mod.rtcc.step_cnt = 0; 498 } 499 out_decision: 500 net->cc_mod.rtcc.lbw = nbw; 501 net->cc_mod.rtcc.lbw_rtt = net->rtt; 502 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 503 if (inst_ind == SCTP_INST_GAINING) { 504 return (1); 505 } else { 506 return (0); 507 } 508 } 509 510 static int 511 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag) 512 { 513 uint64_t oth, probepoint; 514 515 /* 516 * BW increased, so update and return 0, since all actions in our 517 * table say to do the normal CC update. Note that we pay no 518 * attention to the inst_ind since our overall sum is increasing. 519 */ 520 /* PROBE POINT 0 */ 521 probepoint = (((uint64_t) net->cwnd) << 32); 522 SDT_PROBE(sctp, cwnd, net, rttvar, 523 vtag, 524 ((net->cc_mod.rtcc.lbw << 32) | nbw), 525 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 526 net->flight_size, 527 probepoint); 528 if (net->cc_mod.rtcc.steady_step) { 529 oth = net->cc_mod.rtcc.vol_reduce; 530 oth <<= 16; 531 oth |= net->cc_mod.rtcc.step_cnt; 532 oth <<= 16; 533 oth |= net->cc_mod.rtcc.last_step_state; 534 SDT_PROBE(sctp, cwnd, net, rttstep, 535 vtag, 536 ((net->cc_mod.rtcc.lbw << 32) | nbw), 537 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 538 oth, 539 probepoint); 540 net->cc_mod.rtcc.last_step_state = 0; 541 net->cc_mod.rtcc.step_cnt = 0; 542 net->cc_mod.rtcc.vol_reduce = 0; 543 } 544 net->cc_mod.rtcc.lbw = nbw; 545 net->cc_mod.rtcc.lbw_rtt = net->rtt; 546 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd; 547 return (0); 548 } 549 550 /* RTCC Algoritm to limit growth of cwnd, return 551 * true if you want to NOT allow cwnd growth 552 */ 553 static int 554 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) 555 { 556 uint64_t bw_offset, rtt_offset, rtt, vtag, probepoint; 557 uint64_t bytes_for_this_rtt, inst_bw; 558 uint64_t div, inst_off; 559 int bw_shift; 560 uint8_t inst_ind; 561 int ret; 562 563 /*- 564 * Here we need to see if we want 565 * to limit cwnd growth due to increase 566 * in overall rtt but no increase in bw. 567 * We use the following table to figure 568 * out what we should do. When we return 569 * 0, cc update goes on as planned. If we 570 * return 1, then no cc update happens and cwnd 571 * stays where it is at. 572 * ---------------------------------- 573 * BW | RTT | Action 574 * ********************************* 575 * INC | INC | return 0 576 * ---------------------------------- 577 * INC | SAME | return 0 578 * ---------------------------------- 579 * INC | DECR | return 0 580 * ---------------------------------- 581 * SAME | INC | return 1 582 * ---------------------------------- 583 * SAME | SAME | return 1 584 * ---------------------------------- 585 * SAME | DECR | return 0 586 * ---------------------------------- 587 * DECR | INC | return 0 or 1 based on if we caused. 588 * ---------------------------------- 589 * DECR | SAME | return 0 590 * ---------------------------------- 591 * DECR | DECR | return 0 592 * ---------------------------------- 593 * 594 * We are a bit fuzz on what an increase or 595 * decrease is. For BW it is the same if 596 * it did not change within 1/64th. For 597 * RTT it stayed the same if it did not 598 * change within 1/32nd 599 */ 600 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); 601 rtt = stcb->asoc.my_vtag; 602 vtag = (rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); 603 probepoint = (((uint64_t) net->cwnd) << 32); 604 rtt = net->rtt; 605 if (net->cc_mod.rtcc.rtt_set_this_sack) { 606 net->cc_mod.rtcc.rtt_set_this_sack = 0; 607 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc; 608 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 609 if (net->rtt) { 610 div = net->rtt / 1000; 611 if (div) { 612 inst_bw = bytes_for_this_rtt / div; 613 inst_off = inst_bw >> bw_shift; 614 if (inst_bw > nbw) 615 inst_ind = SCTP_INST_GAINING; 616 else if ((inst_bw + inst_off) < nbw) 617 inst_ind = SCTP_INST_LOOSING; 618 else 619 inst_ind = SCTP_INST_NEUTRAL; 620 probepoint |= ((0xb << 16) | inst_ind); 621 } else { 622 inst_bw = bytes_for_this_rtt / (uint64_t) (net->rtt); 623 /* Can't determine do not change */ 624 inst_ind = net->cc_mod.rtcc.last_inst_ind; 625 probepoint |= ((0xc << 16) | inst_ind); 626 } 627 } else { 628 inst_bw = bytes_for_this_rtt; 629 /* Can't determine do not change */ 630 inst_ind = net->cc_mod.rtcc.last_inst_ind; 631 probepoint |= ((0xd << 16) | inst_ind); 632 } 633 SDT_PROBE(sctp, cwnd, net, rttvar, 634 vtag, 635 ((nbw << 32) | inst_bw), 636 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt), 637 net->flight_size, 638 probepoint); 639 } else { 640 /* No rtt measurement, use last one */ 641 inst_ind = net->cc_mod.rtcc.last_inst_ind; 642 } 643 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; 644 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { 645 ret = cc_bw_increase(stcb, net, nbw, vtag); 646 goto out; 647 } 648 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); 649 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { 650 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); 651 goto out; 652 } 653 /* 654 * If we reach here then we are in a situation where the bw stayed 655 * the same. 656 */ 657 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); 658 out: 659 net->cc_mod.rtcc.last_inst_ind = inst_ind; 660 return (ret); 661 } 662 663 static void 664 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, 665 struct sctp_association *asoc, 666 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc) 667 { 668 struct sctp_nets *net; 669 int old_cwnd; 670 uint32_t t_ssthresh, t_cwnd, incr; 671 uint64_t t_ucwnd_sbw; 672 uint64_t t_path_mptcp; 673 uint64_t mptcp_like_alpha; 674 uint32_t srtt; 675 uint64_t max_path; 676 677 /* MT FIXME: Don't compute this over and over again */ 678 t_ssthresh = 0; 679 t_cwnd = 0; 680 t_ucwnd_sbw = 0; 681 t_path_mptcp = 0; 682 mptcp_like_alpha = 1; 683 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 684 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) || 685 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) { 686 max_path = 0; 687 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 688 t_ssthresh += net->ssthresh; 689 t_cwnd += net->cwnd; 690 /* lastsa>>3; we don't need to devide ... */ 691 srtt = net->lastsa; 692 if (srtt > 0) { 693 uint64_t tmp; 694 695 t_ucwnd_sbw += (uint64_t) net->cwnd / (uint64_t) srtt; 696 t_path_mptcp += (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_Z) / 697 (((uint64_t) net->mtu) * (uint64_t) srtt); 698 tmp = (((uint64_t) net->cwnd) << SHIFT_MPTCP_MULTI_N) / 699 ((uint64_t) net->mtu * (uint64_t) (srtt * srtt)); 700 if (tmp > max_path) { 701 max_path = tmp; 702 } 703 } 704 } 705 if (t_ucwnd_sbw == 0) { 706 t_ucwnd_sbw = 1; 707 } 708 if (t_path_mptcp > 0) { 709 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp); 710 } else { 711 mptcp_like_alpha = 1; 712 } 713 } 714 /******************************/ 715 /* update cwnd and Early FR */ 716 /******************************/ 717 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 718 719 #ifdef JANA_CMT_FAST_RECOVERY 720 /* 721 * CMT fast recovery code. Need to debug. 722 */ 723 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 724 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 725 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 726 net->will_exit_fast_recovery = 1; 727 } 728 } 729 #endif 730 /* if nothing was acked on this destination skip it */ 731 if (net->net_ack == 0) { 732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 733 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 734 } 735 continue; 736 } 737 #ifdef JANA_CMT_FAST_RECOVERY 738 /* 739 * CMT fast recovery code 740 */ 741 /* 742 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 743 * && net->will_exit_fast_recovery == 0) { @@@ Do something 744 * } else if (sctp_cmt_on_off == 0 && 745 * asoc->fast_retran_loss_recovery && will_exit == 0) { 746 */ 747 #endif 748 749 if (asoc->fast_retran_loss_recovery && 750 (will_exit == 0) && 751 (asoc->sctp_cmt_on_off == 0)) { 752 /* 753 * If we are in loss recovery we skip any cwnd 754 * update 755 */ 756 return; 757 } 758 /* 759 * Did any measurements go on for this network? 760 */ 761 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) { 762 uint64_t nbw; 763 764 /* 765 * At this point our bw_bytes has been updated by 766 * incoming sack information. 767 * 768 * But our bw may not yet be set. 769 * 770 */ 771 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) { 772 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000); 773 } else { 774 nbw = net->cc_mod.rtcc.bw_bytes; 775 } 776 if (net->cc_mod.rtcc.lbw) { 777 if (cc_bw_limit(stcb, net, nbw)) { 778 /* Hold here, no update */ 779 continue; 780 } 781 } else { 782 uint64_t vtag, probepoint; 783 784 probepoint = (((uint64_t) net->cwnd) << 32); 785 probepoint |= ((0xa << 16) | 0); 786 vtag = (net->rtt << 32) | 787 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 788 (stcb->rport); 789 790 SDT_PROBE(sctp, cwnd, net, rttvar, 791 vtag, 792 nbw, 793 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 794 net->flight_size, 795 probepoint); 796 net->cc_mod.rtcc.lbw = nbw; 797 net->cc_mod.rtcc.lbw_rtt = net->rtt; 798 if (net->cc_mod.rtcc.rtt_set_this_sack) { 799 net->cc_mod.rtcc.rtt_set_this_sack = 0; 800 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes; 801 } 802 } 803 } 804 /* 805 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 806 * moved. 807 */ 808 if (accum_moved || 809 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 810 /* If the cumulative ack moved we can proceed */ 811 if (net->cwnd <= net->ssthresh) { 812 /* We are in slow start */ 813 if (net->flight_size + net->net_ack >= net->cwnd) { 814 uint32_t limit; 815 816 old_cwnd = net->cwnd; 817 switch (asoc->sctp_cmt_on_off) { 818 case SCTP_CMT_RPV1: 819 limit = (uint32_t) (((uint64_t) net->mtu * 820 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 821 (uint64_t) net->ssthresh) / 822 (uint64_t) t_ssthresh); 823 incr = (uint32_t) (((uint64_t) net->net_ack * 824 (uint64_t) net->ssthresh) / 825 (uint64_t) t_ssthresh); 826 if (incr > limit) { 827 incr = limit; 828 } 829 if (incr == 0) { 830 incr = 1; 831 } 832 break; 833 case SCTP_CMT_RPV2: 834 /* 835 * lastsa>>3; we don't need 836 * to divide ... 837 */ 838 srtt = net->lastsa; 839 if (srtt == 0) { 840 srtt = 1; 841 } 842 limit = (uint32_t) (((uint64_t) net->mtu * 843 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) * 844 (uint64_t) net->cwnd) / 845 ((uint64_t) srtt * t_ucwnd_sbw)); 846 /* INCREASE FACTOR */ 847 incr = (uint32_t) (((uint64_t) net->net_ack * 848 (uint64_t) net->cwnd) / 849 ((uint64_t) srtt * t_ucwnd_sbw)); 850 /* INCREASE FACTOR */ 851 if (incr > limit) { 852 incr = limit; 853 } 854 if (incr == 0) { 855 incr = 1; 856 } 857 break; 858 case SCTP_CMT_MPTCP: 859 limit = (uint32_t) (((uint64_t) net->mtu * 860 mptcp_like_alpha * 861 (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >> 862 SHIFT_MPTCP_MULTI); 863 incr = (uint32_t) (((uint64_t) net->net_ack * 864 mptcp_like_alpha) >> 865 SHIFT_MPTCP_MULTI); 866 if (incr > limit) { 867 incr = limit; 868 } 869 if (incr > net->net_ack) { 870 incr = net->net_ack; 871 } 872 if (incr > net->mtu) { 873 incr = net->mtu; 874 } 875 break; 876 default: 877 incr = net->net_ack; 878 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) { 879 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable); 880 } 881 break; 882 } 883 net->cwnd += incr; 884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 885 sctp_log_cwnd(stcb, net, incr, 886 SCTP_CWND_LOG_FROM_SS); 887 } 888 SDT_PROBE(sctp, cwnd, net, ack, 889 stcb->asoc.my_vtag, 890 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 891 net, 892 old_cwnd, net->cwnd); 893 } else { 894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 895 sctp_log_cwnd(stcb, net, net->net_ack, 896 SCTP_CWND_LOG_NOADV_SS); 897 } 898 } 899 } else { 900 /* We are in congestion avoidance */ 901 /* 902 * Add to pba 903 */ 904 net->partial_bytes_acked += net->net_ack; 905 906 if ((net->flight_size + net->net_ack >= net->cwnd) && 907 (net->partial_bytes_acked >= net->cwnd)) { 908 net->partial_bytes_acked -= net->cwnd; 909 old_cwnd = net->cwnd; 910 switch (asoc->sctp_cmt_on_off) { 911 case SCTP_CMT_RPV1: 912 incr = (uint32_t) (((uint64_t) net->mtu * 913 (uint64_t) net->ssthresh) / 914 (uint64_t) t_ssthresh); 915 if (incr == 0) { 916 incr = 1; 917 } 918 break; 919 case SCTP_CMT_RPV2: 920 /* 921 * lastsa>>3; we don't need 922 * to divide ... 923 */ 924 srtt = net->lastsa; 925 if (srtt == 0) { 926 srtt = 1; 927 } 928 incr = (uint32_t) ((uint64_t) net->mtu * 929 (uint64_t) net->cwnd / 930 ((uint64_t) srtt * 931 t_ucwnd_sbw)); 932 /* INCREASE FACTOR */ 933 if (incr == 0) { 934 incr = 1; 935 } 936 break; 937 case SCTP_CMT_MPTCP: 938 incr = (uint32_t) ((mptcp_like_alpha * 939 (uint64_t) net->cwnd) >> 940 SHIFT_MPTCP_MULTI); 941 if (incr > net->mtu) { 942 incr = net->mtu; 943 } 944 break; 945 default: 946 incr = net->mtu; 947 break; 948 } 949 net->cwnd += incr; 950 SDT_PROBE(sctp, cwnd, net, ack, 951 stcb->asoc.my_vtag, 952 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 953 net, 954 old_cwnd, net->cwnd); 955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 956 sctp_log_cwnd(stcb, net, net->mtu, 957 SCTP_CWND_LOG_FROM_CA); 958 } 959 } else { 960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 961 sctp_log_cwnd(stcb, net, net->net_ack, 962 SCTP_CWND_LOG_NOADV_CA); 963 } 964 } 965 } 966 } else { 967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 968 sctp_log_cwnd(stcb, net, net->mtu, 969 SCTP_CWND_LOG_NO_CUMACK); 970 } 971 } 972 } 973 } 974 975 static void 976 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net) 977 { 978 int old_cwnd; 979 980 old_cwnd = net->cwnd; 981 net->cwnd = net->mtu; 982 SDT_PROBE(sctp, cwnd, net, ack, 983 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 984 old_cwnd, net->cwnd); 985 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 986 net, net->cwnd); 987 } 988 989 990 static void 991 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 992 { 993 int old_cwnd = net->cwnd; 994 uint32_t t_ssthresh, t_cwnd; 995 uint64_t t_ucwnd_sbw; 996 997 /* MT FIXME: Don't compute this over and over again */ 998 t_ssthresh = 0; 999 t_cwnd = 0; 1000 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) || 1001 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) { 1002 struct sctp_nets *lnet; 1003 uint32_t srtt; 1004 1005 t_ucwnd_sbw = 0; 1006 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1007 t_ssthresh += lnet->ssthresh; 1008 t_cwnd += lnet->cwnd; 1009 srtt = lnet->lastsa; 1010 /* lastsa>>3; we don't need to divide ... */ 1011 if (srtt > 0) { 1012 t_ucwnd_sbw += (uint64_t) lnet->cwnd / (uint64_t) srtt; 1013 } 1014 } 1015 if (t_ucwnd_sbw < 1) { 1016 t_ucwnd_sbw = 1; 1017 } 1018 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) { 1019 net->ssthresh = (uint32_t) (((uint64_t) 4 * 1020 (uint64_t) net->mtu * 1021 (uint64_t) net->ssthresh) / 1022 (uint64_t) t_ssthresh); 1023 } else { 1024 uint64_t cc_delta; 1025 1026 srtt = net->lastsa; 1027 /* lastsa>>3; we don't need to divide ... */ 1028 if (srtt == 0) { 1029 srtt = 1; 1030 } 1031 cc_delta = t_ucwnd_sbw * (uint64_t) srtt / 2; 1032 if (cc_delta < t_cwnd) { 1033 net->ssthresh = (uint32_t) ((uint64_t) t_cwnd - cc_delta); 1034 } else { 1035 net->ssthresh = net->mtu; 1036 } 1037 } 1038 if ((net->cwnd > t_cwnd / 2) && 1039 (net->ssthresh < net->cwnd - t_cwnd / 2)) { 1040 net->ssthresh = net->cwnd - t_cwnd / 2; 1041 } 1042 if (net->ssthresh < net->mtu) { 1043 net->ssthresh = net->mtu; 1044 } 1045 } else { 1046 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 1047 } 1048 net->cwnd = net->mtu; 1049 net->partial_bytes_acked = 0; 1050 SDT_PROBE(sctp, cwnd, net, to, 1051 stcb->asoc.my_vtag, 1052 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1053 net, 1054 old_cwnd, net->cwnd); 1055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1056 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 1057 } 1058 } 1059 1060 static void 1061 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, 1062 int in_window, int num_pkt_lost, int use_rtcc) 1063 { 1064 int old_cwnd = net->cwnd; 1065 1066 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { 1067 /* Data center Congestion Control */ 1068 if (in_window == 0) { 1069 /* 1070 * Go to CA with the cwnd at the point we sent the 1071 * TSN that was marked with a CE. 1072 */ 1073 if (net->ecn_prev_cwnd < net->cwnd) { 1074 /* Restore to prev cwnd */ 1075 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost); 1076 } else { 1077 /* Just cut in 1/2 */ 1078 net->cwnd /= 2; 1079 } 1080 /* Drop to CA */ 1081 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu); 1082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1083 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1084 } 1085 } else { 1086 /* 1087 * Further tuning down required over the drastic 1088 * orginal cut 1089 */ 1090 net->ssthresh -= (net->mtu * num_pkt_lost); 1091 net->cwnd -= (net->mtu * num_pkt_lost); 1092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1093 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1094 } 1095 } 1096 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1097 } else { 1098 if (in_window == 0) { 1099 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1100 net->ssthresh = net->cwnd / 2; 1101 if (net->ssthresh < net->mtu) { 1102 net->ssthresh = net->mtu; 1103 /* 1104 * here back off the timer as well, to slow 1105 * us down 1106 */ 1107 net->RTO <<= 1; 1108 } 1109 net->cwnd = net->ssthresh; 1110 SDT_PROBE(sctp, cwnd, net, ecn, 1111 stcb->asoc.my_vtag, 1112 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1113 net, 1114 old_cwnd, net->cwnd); 1115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1116 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1117 } 1118 } 1119 } 1120 1121 } 1122 1123 static void 1124 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 1125 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 1126 uint32_t * bottle_bw, uint32_t * on_queue) 1127 { 1128 uint32_t bw_avail; 1129 int rtt; 1130 unsigned int incr; 1131 int old_cwnd = net->cwnd; 1132 1133 /* need real RTT in msd for this calc */ 1134 rtt = net->rtt / 1000; 1135 /* get bottle neck bw */ 1136 *bottle_bw = ntohl(cp->bottle_bw); 1137 /* and whats on queue */ 1138 *on_queue = ntohl(cp->current_onq); 1139 /* 1140 * adjust the on-queue if our flight is more it could be that the 1141 * router has not yet gotten data "in-flight" to it 1142 */ 1143 if (*on_queue < net->flight_size) 1144 *on_queue = net->flight_size; 1145 /* calculate the available space */ 1146 bw_avail = (*bottle_bw * rtt) / 1000; 1147 if (bw_avail > *bottle_bw) { 1148 /* 1149 * Cap the growth to no more than the bottle neck. This can 1150 * happen as RTT slides up due to queues. It also means if 1151 * you have more than a 1 second RTT with a empty queue you 1152 * will be limited to the bottle_bw per second no matter if 1153 * other points have 1/2 the RTT and you could get more 1154 * out... 1155 */ 1156 bw_avail = *bottle_bw; 1157 } 1158 if (*on_queue > bw_avail) { 1159 /* 1160 * No room for anything else don't allow anything else to be 1161 * "added to the fire". 1162 */ 1163 int seg_inflight, seg_onqueue, my_portion; 1164 1165 net->partial_bytes_acked = 0; 1166 1167 /* how much are we over queue size? */ 1168 incr = *on_queue - bw_avail; 1169 if (stcb->asoc.seen_a_sack_this_pkt) { 1170 /* 1171 * undo any cwnd adjustment that the sack might have 1172 * made 1173 */ 1174 net->cwnd = net->prev_cwnd; 1175 } 1176 /* Now how much of that is mine? */ 1177 seg_inflight = net->flight_size / net->mtu; 1178 seg_onqueue = *on_queue / net->mtu; 1179 my_portion = (incr * seg_inflight) / seg_onqueue; 1180 1181 /* Have I made an adjustment already */ 1182 if (net->cwnd > net->flight_size) { 1183 /* 1184 * for this flight I made an adjustment we need to 1185 * decrease the portion by a share our previous 1186 * adjustment. 1187 */ 1188 int diff_adj; 1189 1190 diff_adj = net->cwnd - net->flight_size; 1191 if (diff_adj > my_portion) 1192 my_portion = 0; 1193 else 1194 my_portion -= diff_adj; 1195 } 1196 /* 1197 * back down to the previous cwnd (assume we have had a sack 1198 * before this packet). minus what ever portion of the 1199 * overage is my fault. 1200 */ 1201 net->cwnd -= my_portion; 1202 1203 /* we will NOT back down more than 1 MTU */ 1204 if (net->cwnd <= net->mtu) { 1205 net->cwnd = net->mtu; 1206 } 1207 /* force into CA */ 1208 net->ssthresh = net->cwnd - 1; 1209 } else { 1210 /* 1211 * Take 1/4 of the space left or max burst up .. whichever 1212 * is less. 1213 */ 1214 incr = (bw_avail - *on_queue) >> 2; 1215 if ((stcb->asoc.max_burst > 0) && 1216 (stcb->asoc.max_burst * net->mtu < incr)) { 1217 incr = stcb->asoc.max_burst * net->mtu; 1218 } 1219 net->cwnd += incr; 1220 } 1221 if (net->cwnd > bw_avail) { 1222 /* We can't exceed the pipe size */ 1223 net->cwnd = bw_avail; 1224 } 1225 if (net->cwnd < net->mtu) { 1226 /* We always have 1 MTU */ 1227 net->cwnd = net->mtu; 1228 } 1229 if (net->cwnd - old_cwnd != 0) { 1230 /* log only changes */ 1231 SDT_PROBE(sctp, cwnd, net, pd, 1232 stcb->asoc.my_vtag, 1233 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1234 net, 1235 old_cwnd, net->cwnd); 1236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1237 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 1238 SCTP_CWND_LOG_FROM_SAT); 1239 } 1240 } 1241 } 1242 1243 static void 1244 sctp_cwnd_update_after_output(struct sctp_tcb *stcb, 1245 struct sctp_nets *net, int burst_limit) 1246 { 1247 int old_cwnd = net->cwnd; 1248 1249 if (net->ssthresh < net->cwnd) 1250 net->ssthresh = net->cwnd; 1251 if (burst_limit) { 1252 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 1253 SDT_PROBE(sctp, cwnd, net, bl, 1254 stcb->asoc.my_vtag, 1255 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), 1256 net, 1257 old_cwnd, net->cwnd); 1258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1259 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 1260 } 1261 } 1262 } 1263 1264 static void 1265 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1266 struct sctp_association *asoc, 1267 int accum_moved, int reneged_all, int will_exit) 1268 { 1269 /* Passing a zero argument in last disables the rtcc algoritm */ 1270 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); 1271 } 1272 1273 static void 1274 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1275 int in_window, int num_pkt_lost) 1276 { 1277 /* Passing a zero argument in last disables the rtcc algoritm */ 1278 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); 1279 } 1280 1281 /* Here starts the RTCCVAR type CC invented by RRS which 1282 * is a slight mod to RFC2581. We reuse a common routine or 1283 * two since these algoritms are so close and need to 1284 * remain the same. 1285 */ 1286 static void 1287 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1288 int in_window, int num_pkt_lost) 1289 { 1290 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); 1291 } 1292 1293 1294 static 1295 void 1296 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, 1297 struct sctp_tmit_chunk *tp1) 1298 { 1299 net->cc_mod.rtcc.bw_bytes += tp1->send_size; 1300 } 1301 1302 static void 1303 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, 1304 struct sctp_nets *net) 1305 { 1306 if (net->cc_mod.rtcc.tls_needs_set > 0) { 1307 /* We had a bw measurment going on */ 1308 struct timeval ltls; 1309 1310 SCTP_GETPTIME_TIMEVAL(<ls); 1311 timevalsub(<ls, &net->cc_mod.rtcc.tls); 1312 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec; 1313 } 1314 } 1315 1316 static void 1317 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, 1318 struct sctp_nets *net) 1319 { 1320 uint64_t vtag, probepoint; 1321 1322 if (net->cc_mod.rtcc.lbw) { 1323 /* Clear the old bw.. we went to 0 in-flight */ 1324 vtag = (net->rtt << 32) | (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1325 (stcb->rport); 1326 probepoint = (((uint64_t) net->cwnd) << 32); 1327 /* Probe point 8 */ 1328 probepoint |= ((8 << 16) | 0); 1329 SDT_PROBE(sctp, cwnd, net, rttvar, 1330 vtag, 1331 ((net->cc_mod.rtcc.lbw << 32) | 0), 1332 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt), 1333 net->flight_size, 1334 probepoint); 1335 net->cc_mod.rtcc.lbw_rtt = 0; 1336 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1337 net->cc_mod.rtcc.lbw = 0; 1338 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1339 net->cc_mod.rtcc.vol_reduce = 0; 1340 net->cc_mod.rtcc.bw_tot_time = 0; 1341 net->cc_mod.rtcc.bw_bytes = 0; 1342 net->cc_mod.rtcc.tls_needs_set = 0; 1343 if (net->cc_mod.rtcc.steady_step) { 1344 net->cc_mod.rtcc.vol_reduce = 0; 1345 net->cc_mod.rtcc.step_cnt = 0; 1346 net->cc_mod.rtcc.last_step_state = 0; 1347 } 1348 if (net->cc_mod.rtcc.ret_from_eq) { 1349 /* less aggressive one - reset cwnd too */ 1350 uint32_t cwnd_in_mtu, cwnd; 1351 1352 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd); 1353 if (cwnd_in_mtu == 0) { 1354 /* 1355 * Using 0 means that the value of RFC 4960 1356 * is used. 1357 */ 1358 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1359 } else { 1360 /* 1361 * We take the minimum of the burst limit 1362 * and the initial congestion window. 1363 */ 1364 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst)) 1365 cwnd_in_mtu = stcb->asoc.max_burst; 1366 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu; 1367 } 1368 if (net->cwnd > cwnd) { 1369 /* 1370 * Only set if we are not a timeout (i.e. 1371 * down to 1 mtu) 1372 */ 1373 net->cwnd = cwnd; 1374 } 1375 } 1376 } 1377 } 1378 1379 static void 1380 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, 1381 struct sctp_nets *net) 1382 { 1383 uint64_t vtag, probepoint; 1384 1385 sctp_set_initial_cc_param(stcb, net); 1386 stcb->asoc.use_precise_time = 1; 1387 probepoint = (((uint64_t) net->cwnd) << 32); 1388 probepoint |= ((9 << 16) | 0); 1389 vtag = (net->rtt << 32) | 1390 (((uint32_t) (stcb->sctp_ep->sctp_lport)) << 16) | 1391 (stcb->rport); 1392 SDT_PROBE(sctp, cwnd, net, rttvar, 1393 vtag, 1394 0, 1395 0, 1396 0, 1397 probepoint); 1398 net->cc_mod.rtcc.lbw_rtt = 0; 1399 net->cc_mod.rtcc.cwnd_at_bw_set = 0; 1400 net->cc_mod.rtcc.vol_reduce = 0; 1401 net->cc_mod.rtcc.lbw = 0; 1402 net->cc_mod.rtcc.vol_reduce = 0; 1403 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0; 1404 net->cc_mod.rtcc.bw_tot_time = 0; 1405 net->cc_mod.rtcc.bw_bytes = 0; 1406 net->cc_mod.rtcc.tls_needs_set = 0; 1407 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret); 1408 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step); 1409 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn); 1410 net->cc_mod.rtcc.step_cnt = 0; 1411 net->cc_mod.rtcc.last_step_state = 0; 1412 1413 1414 } 1415 1416 static int 1417 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, 1418 struct sctp_cc_option *cc_opt) 1419 { 1420 struct sctp_nets *net; 1421 1422 if (setorget == 1) { 1423 /* a set */ 1424 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1425 if ((cc_opt->aid_value.assoc_value != 0) && 1426 (cc_opt->aid_value.assoc_value != 1)) { 1427 return (EINVAL); 1428 } 1429 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1430 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value; 1431 } 1432 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1433 if ((cc_opt->aid_value.assoc_value != 0) && 1434 (cc_opt->aid_value.assoc_value != 1)) { 1435 return (EINVAL); 1436 } 1437 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1438 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value; 1439 } 1440 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1441 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1442 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value; 1443 } 1444 } else { 1445 return (EINVAL); 1446 } 1447 } else { 1448 /* a get */ 1449 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { 1450 net = TAILQ_FIRST(&stcb->asoc.nets); 1451 if (net == NULL) { 1452 return (EFAULT); 1453 } 1454 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq; 1455 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) { 1456 net = TAILQ_FIRST(&stcb->asoc.nets); 1457 if (net == NULL) { 1458 return (EFAULT); 1459 } 1460 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn; 1461 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) { 1462 net = TAILQ_FIRST(&stcb->asoc.nets); 1463 if (net == NULL) { 1464 return (EFAULT); 1465 } 1466 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step; 1467 } else { 1468 return (EINVAL); 1469 } 1470 } 1471 return (0); 1472 } 1473 1474 static void 1475 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED, 1476 struct sctp_nets *net) 1477 { 1478 if (net->cc_mod.rtcc.tls_needs_set == 0) { 1479 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls); 1480 net->cc_mod.rtcc.tls_needs_set = 2; 1481 } 1482 } 1483 1484 static void 1485 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, 1486 struct sctp_association *asoc, 1487 int accum_moved, int reneged_all, int will_exit) 1488 { 1489 /* Passing a one argument at the last enables the rtcc algoritm */ 1490 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); 1491 } 1492 1493 static void 1494 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED, 1495 struct sctp_nets *net, 1496 struct timeval *now SCTP_UNUSED) 1497 { 1498 net->cc_mod.rtcc.rtt_set_this_sack = 1; 1499 } 1500 1501 /* Here starts Sally Floyds HS-TCP */ 1502 1503 struct sctp_hs_raise_drop { 1504 int32_t cwnd; 1505 int32_t increase; 1506 int32_t drop_percent; 1507 }; 1508 1509 #define SCTP_HS_TABLE_SIZE 73 1510 1511 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 1512 {38, 1, 50}, /* 0 */ 1513 {118, 2, 44}, /* 1 */ 1514 {221, 3, 41}, /* 2 */ 1515 {347, 4, 38}, /* 3 */ 1516 {495, 5, 37}, /* 4 */ 1517 {663, 6, 35}, /* 5 */ 1518 {851, 7, 34}, /* 6 */ 1519 {1058, 8, 33}, /* 7 */ 1520 {1284, 9, 32}, /* 8 */ 1521 {1529, 10, 31}, /* 9 */ 1522 {1793, 11, 30}, /* 10 */ 1523 {2076, 12, 29}, /* 11 */ 1524 {2378, 13, 28}, /* 12 */ 1525 {2699, 14, 28}, /* 13 */ 1526 {3039, 15, 27}, /* 14 */ 1527 {3399, 16, 27}, /* 15 */ 1528 {3778, 17, 26}, /* 16 */ 1529 {4177, 18, 26}, /* 17 */ 1530 {4596, 19, 25}, /* 18 */ 1531 {5036, 20, 25}, /* 19 */ 1532 {5497, 21, 24}, /* 20 */ 1533 {5979, 22, 24}, /* 21 */ 1534 {6483, 23, 23}, /* 22 */ 1535 {7009, 24, 23}, /* 23 */ 1536 {7558, 25, 22}, /* 24 */ 1537 {8130, 26, 22}, /* 25 */ 1538 {8726, 27, 22}, /* 26 */ 1539 {9346, 28, 21}, /* 27 */ 1540 {9991, 29, 21}, /* 28 */ 1541 {10661, 30, 21}, /* 29 */ 1542 {11358, 31, 20}, /* 30 */ 1543 {12082, 32, 20}, /* 31 */ 1544 {12834, 33, 20}, /* 32 */ 1545 {13614, 34, 19}, /* 33 */ 1546 {14424, 35, 19}, /* 34 */ 1547 {15265, 36, 19}, /* 35 */ 1548 {16137, 37, 19}, /* 36 */ 1549 {17042, 38, 18}, /* 37 */ 1550 {17981, 39, 18}, /* 38 */ 1551 {18955, 40, 18}, /* 39 */ 1552 {19965, 41, 17}, /* 40 */ 1553 {21013, 42, 17}, /* 41 */ 1554 {22101, 43, 17}, /* 42 */ 1555 {23230, 44, 17}, /* 43 */ 1556 {24402, 45, 16}, /* 44 */ 1557 {25618, 46, 16}, /* 45 */ 1558 {26881, 47, 16}, /* 46 */ 1559 {28193, 48, 16}, /* 47 */ 1560 {29557, 49, 15}, /* 48 */ 1561 {30975, 50, 15}, /* 49 */ 1562 {32450, 51, 15}, /* 50 */ 1563 {33986, 52, 15}, /* 51 */ 1564 {35586, 53, 14}, /* 52 */ 1565 {37253, 54, 14}, /* 53 */ 1566 {38992, 55, 14}, /* 54 */ 1567 {40808, 56, 14}, /* 55 */ 1568 {42707, 57, 13}, /* 56 */ 1569 {44694, 58, 13}, /* 57 */ 1570 {46776, 59, 13}, /* 58 */ 1571 {48961, 60, 13}, /* 59 */ 1572 {51258, 61, 13}, /* 60 */ 1573 {53677, 62, 12}, /* 61 */ 1574 {56230, 63, 12}, /* 62 */ 1575 {58932, 64, 12}, /* 63 */ 1576 {61799, 65, 12}, /* 64 */ 1577 {64851, 66, 11}, /* 65 */ 1578 {68113, 67, 11}, /* 66 */ 1579 {71617, 68, 11}, /* 67 */ 1580 {75401, 69, 10}, /* 68 */ 1581 {79517, 70, 10}, /* 69 */ 1582 {84035, 71, 10}, /* 70 */ 1583 {89053, 72, 10}, /* 71 */ 1584 {94717, 73, 9} /* 72 */ 1585 }; 1586 1587 static void 1588 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 1589 { 1590 int cur_val, i, indx, incr; 1591 1592 cur_val = net->cwnd >> 10; 1593 indx = SCTP_HS_TABLE_SIZE - 1; 1594 #ifdef SCTP_DEBUG 1595 printf("HS CC CAlled.\n"); 1596 #endif 1597 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1598 /* normal mode */ 1599 if (net->net_ack > net->mtu) { 1600 net->cwnd += net->mtu; 1601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1602 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 1603 } 1604 } else { 1605 net->cwnd += net->net_ack; 1606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1607 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 1608 } 1609 } 1610 } else { 1611 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 1612 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 1613 indx = i; 1614 break; 1615 } 1616 } 1617 net->last_hs_used = indx; 1618 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 1619 net->cwnd += incr; 1620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1621 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 1622 } 1623 } 1624 } 1625 1626 static void 1627 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 1628 { 1629 int cur_val, i, indx; 1630 int old_cwnd = net->cwnd; 1631 1632 cur_val = net->cwnd >> 10; 1633 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1634 /* normal mode */ 1635 net->ssthresh = net->cwnd / 2; 1636 if (net->ssthresh < (net->mtu * 2)) { 1637 net->ssthresh = 2 * net->mtu; 1638 } 1639 net->cwnd = net->ssthresh; 1640 } else { 1641 /* drop by the proper amount */ 1642 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 1643 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 1644 net->cwnd = net->ssthresh; 1645 /* now where are we */ 1646 indx = net->last_hs_used; 1647 cur_val = net->cwnd >> 10; 1648 /* reset where we are in the table */ 1649 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 1650 /* feel out of hs */ 1651 net->last_hs_used = 0; 1652 } else { 1653 for (i = indx; i >= 1; i--) { 1654 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 1655 break; 1656 } 1657 } 1658 net->last_hs_used = indx; 1659 } 1660 } 1661 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1662 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 1663 } 1664 } 1665 1666 static void 1667 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 1668 struct sctp_association *asoc) 1669 { 1670 struct sctp_nets *net; 1671 1672 /* 1673 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 1674 * (net->fast_retran_loss_recovery == 0))) 1675 */ 1676 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1677 if ((asoc->fast_retran_loss_recovery == 0) || 1678 (asoc->sctp_cmt_on_off > 0)) { 1679 /* out of a RFC2582 Fast recovery window? */ 1680 if (net->net_ack > 0) { 1681 /* 1682 * per section 7.2.3, are there any 1683 * destinations that had a fast retransmit 1684 * to them. If so what we need to do is 1685 * adjust ssthresh and cwnd. 1686 */ 1687 struct sctp_tmit_chunk *lchk; 1688 1689 sctp_hs_cwnd_decrease(stcb, net); 1690 1691 lchk = TAILQ_FIRST(&asoc->send_queue); 1692 1693 net->partial_bytes_acked = 0; 1694 /* Turn on fast recovery window */ 1695 asoc->fast_retran_loss_recovery = 1; 1696 if (lchk == NULL) { 1697 /* Mark end of the window */ 1698 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1699 } else { 1700 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1701 } 1702 1703 /* 1704 * CMT fast recovery -- per destination 1705 * recovery variable. 1706 */ 1707 net->fast_retran_loss_recovery = 1; 1708 1709 if (lchk == NULL) { 1710 /* Mark end of the window */ 1711 net->fast_recovery_tsn = asoc->sending_seq - 1; 1712 } else { 1713 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1714 } 1715 1716 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1717 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 1718 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1719 stcb->sctp_ep, stcb, net); 1720 } 1721 } else if (net->net_ack > 0) { 1722 /* 1723 * Mark a peg that we WOULD have done a cwnd 1724 * reduction but RFC2582 prevented this action. 1725 */ 1726 SCTP_STAT_INCR(sctps_fastretransinrtt); 1727 } 1728 } 1729 } 1730 1731 static void 1732 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 1733 struct sctp_association *asoc, 1734 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 1735 { 1736 struct sctp_nets *net; 1737 1738 /******************************/ 1739 /* update cwnd and Early FR */ 1740 /******************************/ 1741 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1742 1743 #ifdef JANA_CMT_FAST_RECOVERY 1744 /* 1745 * CMT fast recovery code. Need to debug. 1746 */ 1747 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1748 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 1749 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 1750 net->will_exit_fast_recovery = 1; 1751 } 1752 } 1753 #endif 1754 /* if nothing was acked on this destination skip it */ 1755 if (net->net_ack == 0) { 1756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1757 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1758 } 1759 continue; 1760 } 1761 #ifdef JANA_CMT_FAST_RECOVERY 1762 /* 1763 * CMT fast recovery code 1764 */ 1765 /* 1766 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 1767 * && net->will_exit_fast_recovery == 0) { @@@ Do something 1768 * } else if (sctp_cmt_on_off == 0 && 1769 * asoc->fast_retran_loss_recovery && will_exit == 0) { 1770 */ 1771 #endif 1772 1773 if (asoc->fast_retran_loss_recovery && 1774 (will_exit == 0) && 1775 (asoc->sctp_cmt_on_off == 0)) { 1776 /* 1777 * If we are in loss recovery we skip any cwnd 1778 * update 1779 */ 1780 return; 1781 } 1782 /* 1783 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1784 * moved. 1785 */ 1786 if (accum_moved || 1787 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 1788 /* If the cumulative ack moved we can proceed */ 1789 if (net->cwnd <= net->ssthresh) { 1790 /* We are in slow start */ 1791 if (net->flight_size + net->net_ack >= net->cwnd) { 1792 1793 sctp_hs_cwnd_increase(stcb, net); 1794 1795 } else { 1796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1797 sctp_log_cwnd(stcb, net, net->net_ack, 1798 SCTP_CWND_LOG_NOADV_SS); 1799 } 1800 } 1801 } else { 1802 /* We are in congestion avoidance */ 1803 net->partial_bytes_acked += net->net_ack; 1804 if ((net->flight_size + net->net_ack >= net->cwnd) && 1805 (net->partial_bytes_acked >= net->cwnd)) { 1806 net->partial_bytes_acked -= net->cwnd; 1807 net->cwnd += net->mtu; 1808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1809 sctp_log_cwnd(stcb, net, net->mtu, 1810 SCTP_CWND_LOG_FROM_CA); 1811 } 1812 } else { 1813 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1814 sctp_log_cwnd(stcb, net, net->net_ack, 1815 SCTP_CWND_LOG_NOADV_CA); 1816 } 1817 } 1818 } 1819 } else { 1820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1821 sctp_log_cwnd(stcb, net, net->mtu, 1822 SCTP_CWND_LOG_NO_CUMACK); 1823 } 1824 } 1825 } 1826 } 1827 1828 1829 /* 1830 * H-TCP congestion control. The algorithm is detailed in: 1831 * R.N.Shorten, D.J.Leith: 1832 * "H-TCP: TCP for high-speed and long-distance networks" 1833 * Proc. PFLDnet, Argonne, 2004. 1834 * http://www.hamilton.ie/net/htcp3.pdf 1835 */ 1836 1837 1838 static int use_rtt_scaling = 1; 1839 static int use_bandwidth_switch = 1; 1840 1841 static inline int 1842 between(uint32_t seq1, uint32_t seq2, uint32_t seq3) 1843 { 1844 return seq3 - seq2 >= seq1 - seq2; 1845 } 1846 1847 static inline uint32_t 1848 htcp_cong_time(struct htcp *ca) 1849 { 1850 return sctp_get_tick_count() - ca->last_cong; 1851 } 1852 1853 static inline uint32_t 1854 htcp_ccount(struct htcp *ca) 1855 { 1856 return htcp_cong_time(ca) / ca->minRTT; 1857 } 1858 1859 static inline void 1860 htcp_reset(struct htcp *ca) 1861 { 1862 ca->undo_last_cong = ca->last_cong; 1863 ca->undo_maxRTT = ca->maxRTT; 1864 ca->undo_old_maxB = ca->old_maxB; 1865 ca->last_cong = sctp_get_tick_count(); 1866 } 1867 1868 #ifdef SCTP_NOT_USED 1869 1870 static uint32_t 1871 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 1872 { 1873 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong; 1874 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT; 1875 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB; 1876 return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu); 1877 } 1878 1879 #endif 1880 1881 static inline void 1882 measure_rtt(struct sctp_nets *net) 1883 { 1884 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT; 1885 1886 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1887 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT) 1888 net->cc_mod.htcp_ca.minRTT = srtt; 1889 1890 /* max RTT */ 1891 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { 1892 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) 1893 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; 1894 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20)) 1895 net->cc_mod.htcp_ca.maxRTT = srtt; 1896 } 1897 } 1898 1899 static void 1900 measure_achieved_throughput(struct sctp_nets *net) 1901 { 1902 uint32_t now = sctp_get_tick_count(); 1903 1904 if (net->fast_retran_ip == 0) 1905 net->cc_mod.htcp_ca.bytes_acked = net->net_ack; 1906 1907 if (!use_bandwidth_switch) 1908 return; 1909 1910 /* achieved throughput calculations */ 1911 /* JRS - not 100% sure of this statement */ 1912 if (net->fast_retran_ip == 1) { 1913 net->cc_mod.htcp_ca.bytecount = 0; 1914 net->cc_mod.htcp_ca.lasttime = now; 1915 return; 1916 } 1917 net->cc_mod.htcp_ca.bytecount += net->net_ack; 1918 1919 if (net->cc_mod.htcp_ca.bytecount >= net->cwnd - ((net->cc_mod.htcp_ca.alpha >> 7 ? : 1) * net->mtu) 1920 && now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT 1921 && net->cc_mod.htcp_ca.minRTT > 0) { 1922 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime); 1923 1924 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) { 1925 /* just after backoff */ 1926 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi; 1927 } else { 1928 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4; 1929 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB) 1930 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi; 1931 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB) 1932 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB; 1933 } 1934 net->cc_mod.htcp_ca.bytecount = 0; 1935 net->cc_mod.htcp_ca.lasttime = now; 1936 } 1937 } 1938 1939 static inline void 1940 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1941 { 1942 if (use_bandwidth_switch) { 1943 uint32_t maxB = ca->maxB; 1944 uint32_t old_maxB = ca->old_maxB; 1945 1946 ca->old_maxB = ca->maxB; 1947 1948 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1949 ca->beta = BETA_MIN; 1950 ca->modeswitch = 0; 1951 return; 1952 } 1953 } 1954 if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) { 1955 ca->beta = (minRTT << 7) / maxRTT; 1956 if (ca->beta < BETA_MIN) 1957 ca->beta = BETA_MIN; 1958 else if (ca->beta > BETA_MAX) 1959 ca->beta = BETA_MAX; 1960 } else { 1961 ca->beta = BETA_MIN; 1962 ca->modeswitch = 1; 1963 } 1964 } 1965 1966 static inline void 1967 htcp_alpha_update(struct htcp *ca) 1968 { 1969 uint32_t minRTT = ca->minRTT; 1970 uint32_t factor = 1; 1971 uint32_t diff = htcp_cong_time(ca); 1972 1973 if (diff > (uint32_t) hz) { 1974 diff -= hz; 1975 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1976 } 1977 if (use_rtt_scaling && minRTT) { 1978 uint32_t scale = (hz << 3) / (10 * minRTT); 1979 1980 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1981 * interval [0.5,10]<<3 */ 1982 factor = (factor << 3) / scale; 1983 if (!factor) 1984 factor = 1; 1985 } 1986 ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 1987 if (!ca->alpha) 1988 ca->alpha = ALPHA_BASE; 1989 } 1990 1991 /* After we have the rtt data to calculate beta, we'd still prefer to wait one 1992 * rtt before we adjust our beta to ensure we are working from a consistent 1993 * data. 1994 * 1995 * This function should be called when we hit a congestion event since only at 1996 * that point do we really have a real sense of maxRTT (the queues en route 1997 * were getting just too full now). 1998 */ 1999 static void 2000 htcp_param_update(struct sctp_nets *net) 2001 { 2002 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT; 2003 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT; 2004 2005 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT); 2006 htcp_alpha_update(&net->cc_mod.htcp_ca); 2007 2008 /* 2009 * add slowly fading memory for maxRTT to accommodate routing 2010 * changes etc 2011 */ 2012 if (minRTT > 0 && maxRTT > minRTT) 2013 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 2014 } 2015 2016 static uint32_t 2017 htcp_recalc_ssthresh(struct sctp_nets *net) 2018 { 2019 htcp_param_update(net); 2020 return max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu); 2021 } 2022 2023 static void 2024 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 2025 { 2026 /*- 2027 * How to handle these functions? 2028 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 2029 * return; 2030 */ 2031 if (net->cwnd <= net->ssthresh) { 2032 /* We are in slow start */ 2033 if (net->flight_size + net->net_ack >= net->cwnd) { 2034 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 2035 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 2036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2037 sctp_log_cwnd(stcb, net, net->mtu, 2038 SCTP_CWND_LOG_FROM_SS); 2039 } 2040 } else { 2041 net->cwnd += net->net_ack; 2042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2043 sctp_log_cwnd(stcb, net, net->net_ack, 2044 SCTP_CWND_LOG_FROM_SS); 2045 } 2046 } 2047 } else { 2048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2049 sctp_log_cwnd(stcb, net, net->net_ack, 2050 SCTP_CWND_LOG_NOADV_SS); 2051 } 2052 } 2053 } else { 2054 measure_rtt(net); 2055 2056 /* 2057 * In dangerous area, increase slowly. In theory this is 2058 * net->cwnd += alpha / net->cwnd 2059 */ 2060 /* What is snd_cwnd_cnt?? */ 2061 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 2062 /*- 2063 * Does SCTP have a cwnd clamp? 2064 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 2065 */ 2066 net->cwnd += net->mtu; 2067 net->partial_bytes_acked = 0; 2068 htcp_alpha_update(&net->cc_mod.htcp_ca); 2069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2070 sctp_log_cwnd(stcb, net, net->mtu, 2071 SCTP_CWND_LOG_FROM_CA); 2072 } 2073 } else { 2074 net->partial_bytes_acked += net->net_ack; 2075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2076 sctp_log_cwnd(stcb, net, net->net_ack, 2077 SCTP_CWND_LOG_NOADV_CA); 2078 } 2079 } 2080 2081 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2082 } 2083 } 2084 2085 #ifdef SCTP_NOT_USED 2086 /* Lower bound on congestion window. */ 2087 static uint32_t 2088 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 2089 { 2090 return net->ssthresh; 2091 } 2092 2093 #endif 2094 2095 static void 2096 htcp_init(struct sctp_nets *net) 2097 { 2098 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp)); 2099 net->cc_mod.htcp_ca.alpha = ALPHA_BASE; 2100 net->cc_mod.htcp_ca.beta = BETA_MIN; 2101 net->cc_mod.htcp_ca.bytes_acked = net->mtu; 2102 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count(); 2103 } 2104 2105 static void 2106 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 2107 { 2108 /* 2109 * We take the max of the burst limit times a MTU or the 2110 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 2111 */ 2112 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 2113 net->ssthresh = stcb->asoc.peers_rwnd; 2114 htcp_init(net); 2115 2116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 2117 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 2118 } 2119 } 2120 2121 static void 2122 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 2123 struct sctp_association *asoc, 2124 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) 2125 { 2126 struct sctp_nets *net; 2127 2128 /******************************/ 2129 /* update cwnd and Early FR */ 2130 /******************************/ 2131 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2132 2133 #ifdef JANA_CMT_FAST_RECOVERY 2134 /* 2135 * CMT fast recovery code. Need to debug. 2136 */ 2137 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 2138 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) || 2139 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) { 2140 net->will_exit_fast_recovery = 1; 2141 } 2142 } 2143 #endif 2144 /* if nothing was acked on this destination skip it */ 2145 if (net->net_ack == 0) { 2146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2147 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 2148 } 2149 continue; 2150 } 2151 #ifdef JANA_CMT_FAST_RECOVERY 2152 /* 2153 * CMT fast recovery code 2154 */ 2155 /* 2156 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery 2157 * && net->will_exit_fast_recovery == 0) { @@@ Do something 2158 * } else if (sctp_cmt_on_off == 0 && 2159 * asoc->fast_retran_loss_recovery && will_exit == 0) { 2160 */ 2161 #endif 2162 2163 if (asoc->fast_retran_loss_recovery && 2164 will_exit == 0 && 2165 (asoc->sctp_cmt_on_off == 0)) { 2166 /* 2167 * If we are in loss recovery we skip any cwnd 2168 * update 2169 */ 2170 return; 2171 } 2172 /* 2173 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 2174 * moved. 2175 */ 2176 if (accum_moved || 2177 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) { 2178 htcp_cong_avoid(stcb, net); 2179 measure_achieved_throughput(net); 2180 } else { 2181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2182 sctp_log_cwnd(stcb, net, net->mtu, 2183 SCTP_CWND_LOG_NO_CUMACK); 2184 } 2185 } 2186 } 2187 } 2188 2189 static void 2190 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 2191 struct sctp_association *asoc) 2192 { 2193 struct sctp_nets *net; 2194 2195 /* 2196 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) && 2197 * (net->fast_retran_loss_recovery == 0))) 2198 */ 2199 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 2200 if ((asoc->fast_retran_loss_recovery == 0) || 2201 (asoc->sctp_cmt_on_off > 0)) { 2202 /* out of a RFC2582 Fast recovery window? */ 2203 if (net->net_ack > 0) { 2204 /* 2205 * per section 7.2.3, are there any 2206 * destinations that had a fast retransmit 2207 * to them. If so what we need to do is 2208 * adjust ssthresh and cwnd. 2209 */ 2210 struct sctp_tmit_chunk *lchk; 2211 int old_cwnd = net->cwnd; 2212 2213 /* JRS - reset as if state were changed */ 2214 htcp_reset(&net->cc_mod.htcp_ca); 2215 net->ssthresh = htcp_recalc_ssthresh(net); 2216 net->cwnd = net->ssthresh; 2217 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2218 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 2219 SCTP_CWND_LOG_FROM_FR); 2220 } 2221 lchk = TAILQ_FIRST(&asoc->send_queue); 2222 2223 net->partial_bytes_acked = 0; 2224 /* Turn on fast recovery window */ 2225 asoc->fast_retran_loss_recovery = 1; 2226 if (lchk == NULL) { 2227 /* Mark end of the window */ 2228 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 2229 } else { 2230 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2231 } 2232 2233 /* 2234 * CMT fast recovery -- per destination 2235 * recovery variable. 2236 */ 2237 net->fast_retran_loss_recovery = 1; 2238 2239 if (lchk == NULL) { 2240 /* Mark end of the window */ 2241 net->fast_recovery_tsn = asoc->sending_seq - 1; 2242 } else { 2243 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 2244 } 2245 2246 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 2247 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 2248 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 2249 stcb->sctp_ep, stcb, net); 2250 } 2251 } else if (net->net_ack > 0) { 2252 /* 2253 * Mark a peg that we WOULD have done a cwnd 2254 * reduction but RFC2582 prevented this action. 2255 */ 2256 SCTP_STAT_INCR(sctps_fastretransinrtt); 2257 } 2258 } 2259 } 2260 2261 static void 2262 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 2263 struct sctp_nets *net) 2264 { 2265 int old_cwnd = net->cwnd; 2266 2267 /* JRS - reset as if the state were being changed to timeout */ 2268 htcp_reset(&net->cc_mod.htcp_ca); 2269 net->ssthresh = htcp_recalc_ssthresh(net); 2270 net->cwnd = net->mtu; 2271 net->partial_bytes_acked = 0; 2272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2273 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 2274 } 2275 } 2276 2277 static void 2278 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 2279 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED) 2280 { 2281 int old_cwnd; 2282 2283 old_cwnd = net->cwnd; 2284 2285 /* JRS - reset hctp as if state changed */ 2286 if (in_window == 0) { 2287 htcp_reset(&net->cc_mod.htcp_ca); 2288 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2289 net->ssthresh = htcp_recalc_ssthresh(net); 2290 if (net->ssthresh < net->mtu) { 2291 net->ssthresh = net->mtu; 2292 /* here back off the timer as well, to slow us down */ 2293 net->RTO <<= 1; 2294 } 2295 net->cwnd = net->ssthresh; 2296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 2297 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2298 } 2299 } 2300 } 2301 2302 struct sctp_cc_functions sctp_cc_functions[] = { 2303 { 2304 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2305 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack, 2306 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2307 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2308 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2309 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2310 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2311 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2312 }, 2313 { 2314 .sctp_set_initial_cc_param = sctp_set_initial_cc_param, 2315 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack, 2316 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2317 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr, 2318 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2319 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo, 2320 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2321 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2322 }, 2323 { 2324 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param, 2325 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack, 2326 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2327 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr, 2328 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout, 2329 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo, 2330 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2331 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2332 }, 2333 { 2334 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param, 2335 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack, 2336 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common, 2337 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr, 2338 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout, 2339 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo, 2340 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped, 2341 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output, 2342 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted, 2343 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged, 2344 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins, 2345 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack, 2346 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option, 2347 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated 2348 } 2349 }; 2350