1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org> 5 * Copyright (c) 2010 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * This software was developed by Lawrence Stewart while studying at the Centre 9 * for Advanced Internet Architectures, Swinburne University of Technology, made 10 * possible in part by a grant from the Cisco University Research Program Fund 11 * at Community Foundation Silicon Valley. 12 * 13 * Portions of this software were developed at the Centre for Advanced 14 * Internet Architectures, Swinburne University of Technology, Melbourne, 15 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * An implementation of the CUBIC congestion control algorithm for FreeBSD, 41 * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha. 42 * Originally released as part of the NewTCP research project at Swinburne 43 * University of Technology's Centre for Advanced Internet Architectures, 44 * Melbourne, Australia, which was made possible in part by a grant from the 45 * Cisco University Research Program Fund at Community Foundation Silicon 46 * Valley. More details are available at: 47 * http://caia.swin.edu.au/urp/newtcp/ 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/limits.h> 56 #include <sys/malloc.h> 57 #include <sys/module.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/sysctl.h> 61 #include <sys/systm.h> 62 63 #include <net/vnet.h> 64 65 #include <netinet/tcp.h> 66 #include <netinet/tcp_seq.h> 67 #include <netinet/tcp_timer.h> 68 #include <netinet/tcp_var.h> 69 #include <netinet/cc/cc.h> 70 #include <netinet/cc/cc_cubic.h> 71 #include <netinet/cc/cc_module.h> 72 73 static void cubic_ack_received(struct cc_var *ccv, uint16_t type); 74 static void cubic_cb_destroy(struct cc_var *ccv); 75 static int cubic_cb_init(struct cc_var *ccv); 76 static void cubic_cong_signal(struct cc_var *ccv, uint32_t type); 77 static void cubic_conn_init(struct cc_var *ccv); 78 static int cubic_mod_init(void); 79 static void cubic_post_recovery(struct cc_var *ccv); 80 static void cubic_record_rtt(struct cc_var *ccv); 81 static void cubic_ssthresh_update(struct cc_var *ccv); 82 static void cubic_after_idle(struct cc_var *ccv); 83 84 struct cubic { 85 /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */ 86 int64_t K; 87 /* Sum of RTT samples across an epoch in ticks. */ 88 int64_t sum_rtt_ticks; 89 /* cwnd at the most recent congestion event. */ 90 unsigned long max_cwnd; 91 /* cwnd at the previous congestion event. */ 92 unsigned long prev_max_cwnd; 93 /* various flags */ 94 uint32_t flags; 95 #define CUBICFLAG_CONG_EVENT 0x00000001 /* congestion experienced */ 96 #define CUBICFLAG_IN_SLOWSTART 0x00000002 /* in slow start */ 97 #define CUBICFLAG_IN_APPLIMIT 0x00000004 /* application limited */ 98 /* Minimum observed rtt in ticks. */ 99 int min_rtt_ticks; 100 /* Mean observed rtt between congestion epochs. */ 101 int mean_rtt_ticks; 102 /* ACKs since last congestion event. */ 103 int epoch_ack_count; 104 /* Time of last congestion event in ticks. */ 105 int t_last_cong; 106 }; 107 108 static MALLOC_DEFINE(M_CUBIC, "cubic data", 109 "Per connection data required for the CUBIC congestion control algorithm"); 110 111 struct cc_algo cubic_cc_algo = { 112 .name = "cubic", 113 .ack_received = cubic_ack_received, 114 .cb_destroy = cubic_cb_destroy, 115 .cb_init = cubic_cb_init, 116 .cong_signal = cubic_cong_signal, 117 .conn_init = cubic_conn_init, 118 .mod_init = cubic_mod_init, 119 .post_recovery = cubic_post_recovery, 120 .after_idle = cubic_after_idle, 121 }; 122 123 static void 124 cubic_ack_received(struct cc_var *ccv, uint16_t type) 125 { 126 struct cubic *cubic_data; 127 unsigned long w_tf, w_cubic_next; 128 int ticks_since_cong; 129 130 cubic_data = ccv->cc_data; 131 cubic_record_rtt(ccv); 132 133 /* 134 * For a regular ACK and we're not in cong/fast recovery and 135 * we're cwnd limited, always recalculate cwnd. 136 */ 137 if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 138 (ccv->flags & CCF_CWND_LIMITED)) { 139 /* Use the logic in NewReno ack_received() for slow start. */ 140 if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 141 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) { 142 cubic_data->flags |= CUBICFLAG_IN_SLOWSTART; 143 newreno_cc_algo.ack_received(ccv, type); 144 } else { 145 if (cubic_data->flags & (CUBICFLAG_IN_SLOWSTART | 146 CUBICFLAG_IN_APPLIMIT)) { 147 cubic_data->flags &= ~(CUBICFLAG_IN_SLOWSTART | 148 CUBICFLAG_IN_APPLIMIT); 149 cubic_data->t_last_cong = ticks; 150 cubic_data->K = cubic_k(cubic_data->max_cwnd / 151 CCV(ccv, t_maxseg)); 152 } 153 if ((ticks_since_cong = 154 ticks - cubic_data->t_last_cong) < 0) { 155 /* 156 * dragging t_last_cong along 157 */ 158 ticks_since_cong = INT_MAX; 159 cubic_data->t_last_cong = ticks - INT_MAX; 160 } 161 /* 162 * The mean RTT is used to best reflect the equations in 163 * the I-D. Using min_rtt in the tf_cwnd calculation 164 * causes w_tf to grow much faster than it should if the 165 * RTT is dominated by network buffering rather than 166 * propagation delay. 167 */ 168 w_tf = tf_cwnd(ticks_since_cong, 169 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 170 CCV(ccv, t_maxseg)); 171 172 w_cubic_next = cubic_cwnd(ticks_since_cong + 173 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 174 CCV(ccv, t_maxseg), cubic_data->K); 175 176 ccv->flags &= ~CCF_ABC_SENTAWND; 177 178 if (w_cubic_next < w_tf) { 179 /* 180 * TCP-friendly region, follow tf 181 * cwnd growth. 182 */ 183 if (CCV(ccv, snd_cwnd) < w_tf) 184 CCV(ccv, snd_cwnd) = ulmin(w_tf, INT_MAX); 185 } else if (CCV(ccv, snd_cwnd) < w_cubic_next) { 186 /* 187 * Concave or convex region, follow CUBIC 188 * cwnd growth. 189 * Only update snd_cwnd, if it doesn't shrink. 190 */ 191 CCV(ccv, snd_cwnd) = ulmin(w_cubic_next, 192 INT_MAX); 193 } 194 195 /* 196 * If we're not in slow start and we're probing for a 197 * new cwnd limit at the start of a connection 198 * (happens when hostcache has a relevant entry), 199 * keep updating our current estimate of the 200 * max_cwnd. 201 */ 202 if (((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) && 203 cubic_data->max_cwnd < CCV(ccv, snd_cwnd)) { 204 cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 205 cubic_data->K = cubic_k(cubic_data->max_cwnd / 206 CCV(ccv, t_maxseg)); 207 } 208 } 209 } else if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 210 !(ccv->flags & CCF_CWND_LIMITED)) { 211 cubic_data->flags |= CUBICFLAG_IN_APPLIMIT; 212 } 213 } 214 215 /* 216 * This is a Cubic specific implementation of after_idle. 217 * - Reset cwnd by calling New Reno implementation of after_idle. 218 * - Reset t_last_cong. 219 */ 220 static void 221 cubic_after_idle(struct cc_var *ccv) 222 { 223 struct cubic *cubic_data; 224 225 cubic_data = ccv->cc_data; 226 227 cubic_data->max_cwnd = ulmax(cubic_data->max_cwnd, CCV(ccv, snd_cwnd)); 228 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); 229 230 newreno_cc_algo.after_idle(ccv); 231 cubic_data->t_last_cong = ticks; 232 } 233 234 static void 235 cubic_cb_destroy(struct cc_var *ccv) 236 { 237 free(ccv->cc_data, M_CUBIC); 238 } 239 240 static int 241 cubic_cb_init(struct cc_var *ccv) 242 { 243 struct cubic *cubic_data; 244 245 cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO); 246 247 if (cubic_data == NULL) 248 return (ENOMEM); 249 250 /* Init some key variables with sensible defaults. */ 251 cubic_data->t_last_cong = ticks; 252 cubic_data->min_rtt_ticks = TCPTV_SRTTBASE; 253 cubic_data->mean_rtt_ticks = 1; 254 255 ccv->cc_data = cubic_data; 256 257 return (0); 258 } 259 260 /* 261 * Perform any necessary tasks before we enter congestion recovery. 262 */ 263 static void 264 cubic_cong_signal(struct cc_var *ccv, uint32_t type) 265 { 266 struct cubic *cubic_data; 267 268 cubic_data = ccv->cc_data; 269 270 switch (type) { 271 case CC_NDUPACK: 272 if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { 273 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 274 cubic_ssthresh_update(ccv); 275 cubic_data->flags |= CUBICFLAG_CONG_EVENT; 276 cubic_data->t_last_cong = ticks; 277 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); 278 } 279 ENTER_RECOVERY(CCV(ccv, t_flags)); 280 } 281 break; 282 283 case CC_ECN: 284 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 285 cubic_ssthresh_update(ccv); 286 cubic_data->flags |= CUBICFLAG_CONG_EVENT; 287 cubic_data->t_last_cong = ticks; 288 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); 289 CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh); 290 ENTER_CONGRECOVERY(CCV(ccv, t_flags)); 291 } 292 break; 293 294 case CC_RTO: 295 /* 296 * Grab the current time and record it so we know when the 297 * most recent congestion event was. Only record it when the 298 * timeout has fired more than once, as there is a reasonable 299 * chance the first one is a false alarm and may not indicate 300 * congestion. 301 * This will put Cubic firmly into the concave / TCP friendly 302 * region, for a slower ramp-up after two consecutive RTOs. 303 */ 304 if (CCV(ccv, t_rxtshift) >= 2) { 305 cubic_data->flags |= CUBICFLAG_CONG_EVENT; 306 cubic_data->t_last_cong = ticks; 307 cubic_data->max_cwnd = CCV(ccv, snd_cwnd_prev); 308 cubic_data->K = cubic_k(cubic_data->max_cwnd / 309 CCV(ccv, t_maxseg)); 310 } 311 break; 312 } 313 } 314 315 static void 316 cubic_conn_init(struct cc_var *ccv) 317 { 318 struct cubic *cubic_data; 319 320 cubic_data = ccv->cc_data; 321 322 /* 323 * Ensure we have a sane initial value for max_cwnd recorded. Without 324 * this here bad things happen when entries from the TCP hostcache 325 * get used. 326 */ 327 cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 328 } 329 330 static int 331 cubic_mod_init(void) 332 { 333 return (0); 334 } 335 336 /* 337 * Perform any necessary tasks before we exit congestion recovery. 338 */ 339 static void 340 cubic_post_recovery(struct cc_var *ccv) 341 { 342 struct cubic *cubic_data; 343 int pipe; 344 345 cubic_data = ccv->cc_data; 346 pipe = 0; 347 348 if (IN_FASTRECOVERY(CCV(ccv, t_flags))) { 349 /* 350 * If inflight data is less than ssthresh, set cwnd 351 * conservatively to avoid a burst of data, as suggested in 352 * the NewReno RFC. Otherwise, use the CUBIC method. 353 * 354 * XXXLAS: Find a way to do this without needing curack 355 */ 356 if (V_tcp_do_rfc6675_pipe) 357 pipe = tcp_compute_pipe(ccv->ccvc.tcp); 358 else 359 pipe = CCV(ccv, snd_max) - ccv->curack; 360 361 if (pipe < CCV(ccv, snd_ssthresh)) 362 /* 363 * Ensure that cwnd does not collapse to 1 MSS under 364 * adverse conditions. Implements RFC6582 365 */ 366 CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) + 367 CCV(ccv, t_maxseg); 368 else 369 /* Update cwnd based on beta and adjusted max_cwnd. */ 370 CCV(ccv, snd_cwnd) = max(((uint64_t)cubic_data->max_cwnd * 371 CUBIC_BETA) >> CUBIC_SHIFT, 372 2 * CCV(ccv, t_maxseg)); 373 } 374 375 /* Calculate the average RTT between congestion epochs. */ 376 if (cubic_data->epoch_ack_count > 0 && 377 cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) { 378 cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks / 379 cubic_data->epoch_ack_count); 380 } 381 382 cubic_data->epoch_ack_count = 0; 383 cubic_data->sum_rtt_ticks = 0; 384 } 385 386 /* 387 * Record the min RTT and sum samples for the epoch average RTT calculation. 388 */ 389 static void 390 cubic_record_rtt(struct cc_var *ccv) 391 { 392 struct cubic *cubic_data; 393 int t_srtt_ticks; 394 395 /* Ignore srtt until a min number of samples have been taken. */ 396 if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) { 397 cubic_data = ccv->cc_data; 398 t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE; 399 400 /* 401 * Record the current SRTT as our minrtt if it's the smallest 402 * we've seen or minrtt is currently equal to its initialised 403 * value. 404 * 405 * XXXLAS: Should there be some hysteresis for minrtt? 406 */ 407 if ((t_srtt_ticks < cubic_data->min_rtt_ticks || 408 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) { 409 cubic_data->min_rtt_ticks = max(1, t_srtt_ticks); 410 411 /* 412 * If the connection is within its first congestion 413 * epoch, ensure we prime mean_rtt_ticks with a 414 * reasonable value until the epoch average RTT is 415 * calculated in cubic_post_recovery(). 416 */ 417 if (cubic_data->min_rtt_ticks > 418 cubic_data->mean_rtt_ticks) 419 cubic_data->mean_rtt_ticks = 420 cubic_data->min_rtt_ticks; 421 } 422 423 /* Sum samples for epoch average RTT calculation. */ 424 cubic_data->sum_rtt_ticks += t_srtt_ticks; 425 cubic_data->epoch_ack_count++; 426 } 427 } 428 429 /* 430 * Update the ssthresh in the event of congestion. 431 */ 432 static void 433 cubic_ssthresh_update(struct cc_var *ccv) 434 { 435 struct cubic *cubic_data; 436 uint32_t ssthresh; 437 uint32_t cwnd; 438 439 cubic_data = ccv->cc_data; 440 cwnd = CCV(ccv, snd_cwnd); 441 442 /* Fast convergence heuristic. */ 443 if (cwnd < cubic_data->max_cwnd) { 444 cwnd = ((uint64_t)cwnd * CUBIC_FC_FACTOR) >> CUBIC_SHIFT; 445 } 446 cubic_data->prev_max_cwnd = cubic_data->max_cwnd; 447 cubic_data->max_cwnd = cwnd; 448 449 /* 450 * On the first congestion event, set ssthresh to cwnd * 0.5 451 * and reduce max_cwnd to cwnd * beta. This aligns the cubic concave 452 * region appropriately. On subsequent congestion events, set 453 * ssthresh to cwnd * beta. 454 */ 455 if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) { 456 ssthresh = cwnd >> 1; 457 cubic_data->max_cwnd = ((uint64_t)cwnd * 458 CUBIC_BETA) >> CUBIC_SHIFT; 459 } else { 460 ssthresh = ((uint64_t)cwnd * 461 CUBIC_BETA) >> CUBIC_SHIFT; 462 } 463 CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * CCV(ccv, t_maxseg)); 464 } 465 466 DECLARE_CC_MODULE(cubic, &cubic_cc_algo); 467 MODULE_VERSION(cubic, 1); 468