1 /*- 2 * Copyright (c) 2007-2008 3 * Swinburne University of Technology, Melbourne, Australia 4 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 5 * Copyright (c) 2010 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * This software was developed at the Centre for Advanced Internet 9 * Architectures, Swinburne University of Technology, by Lawrence Stewart and 10 * James Healy, made possible in part by a grant from the Cisco University 11 * Research Program Fund at Community Foundation Silicon Valley. 12 * 13 * Portions of this software were developed at the Centre for Advanced 14 * Internet Architectures, Swinburne University of Technology, Melbourne, 15 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * An implementation of the H-TCP congestion control algorithm for FreeBSD, 41 * based on the Internet Draft "draft-leith-tcp-htcp-06.txt" by Leith and 42 * Shorten. Originally released as part of the NewTCP research project at 43 * Swinburne University of Technology's Centre for Advanced Internet 44 * Architectures, Melbourne, Australia, which was made possible in part by a 45 * grant from the Cisco University Research Program Fund at Community Foundation 46 * Silicon Valley. More details are available at: 47 * http://caia.swin.edu.au/urp/newtcp/ 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/limits.h> 56 #include <sys/malloc.h> 57 #include <sys/module.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/sysctl.h> 61 #include <sys/systm.h> 62 63 #include <net/vnet.h> 64 65 #include <netinet/cc.h> 66 #include <netinet/tcp_seq.h> 67 #include <netinet/tcp_timer.h> 68 #include <netinet/tcp_var.h> 69 70 #include <netinet/cc/cc_module.h> 71 72 /* Fixed point math shifts. */ 73 #define HTCP_SHIFT 8 74 #define HTCP_ALPHA_INC_SHIFT 4 75 76 #define HTCP_INIT_ALPHA 1 77 #define HTCP_DELTA_L hz /* 1 sec in ticks. */ 78 #define HTCP_MINBETA 128 /* 0.5 << HTCP_SHIFT. */ 79 #define HTCP_MAXBETA 204 /* ~0.8 << HTCP_SHIFT. */ 80 #define HTCP_MINROWE 26 /* ~0.1 << HTCP_SHIFT. */ 81 #define HTCP_MAXROWE 512 /* 2 << HTCP_SHIFT. */ 82 83 /* RTT_ref (ms) used in the calculation of alpha if RTT scaling is enabled. */ 84 #define HTCP_RTT_REF 100 85 86 /* Don't trust SRTT until this many samples have been taken. */ 87 #define HTCP_MIN_RTT_SAMPLES 8 88 89 /* 90 * HTCP_CALC_ALPHA performs a fixed point math calculation to determine the 91 * value of alpha, based on the function defined in the HTCP spec. 92 * 93 * i.e. 1 + 10(delta - delta_l) + ((delta - delta_l) / 2) ^ 2 94 * 95 * "diff" is passed in to the macro as "delta - delta_l" and is expected to be 96 * in units of ticks. 97 * 98 * The joyousnous of fixed point maths means our function implementation looks a 99 * little funky... 100 * 101 * In order to maintain some precision in the calculations, a fixed point shift 102 * HTCP_ALPHA_INC_SHIFT is used to ensure the integer divisions don't 103 * truncate the results too badly. 104 * 105 * The "16" value is the "1" term in the alpha function shifted up by 106 * HTCP_ALPHA_INC_SHIFT 107 * 108 * The "160" value is the "10" multiplier in the alpha function multiplied by 109 * 2^HTCP_ALPHA_INC_SHIFT 110 * 111 * Specifying these as constants reduces the computations required. After 112 * up-shifting all the terms in the function and performing the required 113 * calculations, we down-shift the final result by HTCP_ALPHA_INC_SHIFT to 114 * ensure it is back in the correct range. 115 * 116 * The "hz" terms are required as kernels can be configured to run with 117 * different tick timers, which we have to adjust for in the alpha calculation 118 * (which originally was defined in terms of seconds). 119 * 120 * We also have to be careful to constrain the value of diff such that it won't 121 * overflow whilst performing the calculation. The middle term i.e. (160 * diff) 122 * / hz is the limiting factor in the calculation. We must constrain diff to be 123 * less than the max size of an int divided by the constant 160 figure 124 * i.e. diff < INT_MAX / 160 125 * 126 * NB: Changing HTCP_ALPHA_INC_SHIFT will require you to MANUALLY update the 127 * constants used in this function! 128 */ 129 #define HTCP_CALC_ALPHA(diff) \ 130 ((\ 131 (16) + \ 132 ((160 * (diff)) / hz) + \ 133 (((diff) / hz) * (((diff) << HTCP_ALPHA_INC_SHIFT) / (4 * hz))) \ 134 ) >> HTCP_ALPHA_INC_SHIFT) 135 136 static void htcp_ack_received(struct cc_var *ccv, uint16_t type); 137 static void htcp_cb_destroy(struct cc_var *ccv); 138 static int htcp_cb_init(struct cc_var *ccv); 139 static void htcp_cong_signal(struct cc_var *ccv, uint32_t type); 140 static int htcp_mod_init(void); 141 static void htcp_post_recovery(struct cc_var *ccv); 142 static void htcp_recalc_alpha(struct cc_var *ccv); 143 static void htcp_recalc_beta(struct cc_var *ccv); 144 static void htcp_record_rtt(struct cc_var *ccv); 145 static void htcp_ssthresh_update(struct cc_var *ccv); 146 147 struct htcp { 148 /* cwnd before entering cong recovery. */ 149 unsigned long prev_cwnd; 150 /* cwnd additive increase parameter. */ 151 int alpha; 152 /* cwnd multiplicative decrease parameter. */ 153 int beta; 154 /* Largest rtt seen for the flow. */ 155 int maxrtt; 156 /* Shortest rtt seen for the flow. */ 157 int minrtt; 158 /* Time of last congestion event in ticks. */ 159 int t_last_cong; 160 }; 161 162 static int htcp_rtt_ref; 163 /* 164 * The maximum number of ticks the value of diff can reach in 165 * htcp_recalc_alpha() before alpha will stop increasing due to overflow. 166 * See comment above HTCP_CALC_ALPHA for more info. 167 */ 168 static int htcp_max_diff = INT_MAX / ((1 << HTCP_ALPHA_INC_SHIFT) * 10); 169 170 /* Per-netstack vars. */ 171 static VNET_DEFINE(u_int, htcp_adaptive_backoff) = 0; 172 static VNET_DEFINE(u_int, htcp_rtt_scaling) = 0; 173 #define V_htcp_adaptive_backoff VNET(htcp_adaptive_backoff) 174 #define V_htcp_rtt_scaling VNET(htcp_rtt_scaling) 175 176 static MALLOC_DEFINE(M_HTCP, "htcp data", 177 "Per connection data required for the HTCP congestion control algorithm"); 178 179 struct cc_algo htcp_cc_algo = { 180 .name = "htcp", 181 .ack_received = htcp_ack_received, 182 .cb_destroy = htcp_cb_destroy, 183 .cb_init = htcp_cb_init, 184 .cong_signal = htcp_cong_signal, 185 .mod_init = htcp_mod_init, 186 .post_recovery = htcp_post_recovery, 187 }; 188 189 static void 190 htcp_ack_received(struct cc_var *ccv, uint16_t type) 191 { 192 struct htcp *htcp_data; 193 194 htcp_data = ccv->cc_data; 195 htcp_record_rtt(ccv); 196 197 /* 198 * Regular ACK and we're not in cong/fast recovery and we're cwnd 199 * limited and we're either not doing ABC or are slow starting or are 200 * doing ABC and we've sent a cwnd's worth of bytes. 201 */ 202 if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 203 (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 || 204 CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 205 (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) { 206 htcp_recalc_beta(ccv); 207 htcp_recalc_alpha(ccv); 208 /* 209 * Use the logic in NewReno ack_received() for slow start and 210 * for the first HTCP_DELTA_L ticks after either the flow starts 211 * or a congestion event (when alpha equals 1). 212 */ 213 if (htcp_data->alpha == 1 || 214 CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh)) 215 newreno_cc_algo.ack_received(ccv, type); 216 else { 217 if (V_tcp_do_rfc3465) { 218 /* Increment cwnd by alpha segments. */ 219 CCV(ccv, snd_cwnd) += htcp_data->alpha * 220 CCV(ccv, t_maxseg); 221 ccv->flags &= ~CCF_ABC_SENTAWND; 222 } else 223 /* 224 * Increment cwnd by alpha/cwnd segments to 225 * approximate an increase of alpha segments 226 * per RTT. 227 */ 228 CCV(ccv, snd_cwnd) += (((htcp_data->alpha << 229 HTCP_SHIFT) / (CCV(ccv, snd_cwnd) / 230 CCV(ccv, t_maxseg))) * CCV(ccv, t_maxseg)) 231 >> HTCP_SHIFT; 232 } 233 } 234 } 235 236 static void 237 htcp_cb_destroy(struct cc_var *ccv) 238 { 239 240 if (ccv->cc_data != NULL) 241 free(ccv->cc_data, M_HTCP); 242 } 243 244 static int 245 htcp_cb_init(struct cc_var *ccv) 246 { 247 struct htcp *htcp_data; 248 249 htcp_data = malloc(sizeof(struct htcp), M_HTCP, M_NOWAIT); 250 251 if (htcp_data == NULL) 252 return (ENOMEM); 253 254 /* Init some key variables with sensible defaults. */ 255 htcp_data->alpha = HTCP_INIT_ALPHA; 256 htcp_data->beta = HTCP_MINBETA; 257 htcp_data->maxrtt = TCPTV_SRTTBASE; 258 htcp_data->minrtt = TCPTV_SRTTBASE; 259 htcp_data->prev_cwnd = 0; 260 htcp_data->t_last_cong = ticks; 261 262 ccv->cc_data = htcp_data; 263 264 return (0); 265 } 266 267 /* 268 * Perform any necessary tasks before we enter congestion recovery. 269 */ 270 static void 271 htcp_cong_signal(struct cc_var *ccv, uint32_t type) 272 { 273 struct htcp *htcp_data; 274 275 htcp_data = ccv->cc_data; 276 277 switch (type) { 278 case CC_NDUPACK: 279 if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { 280 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 281 /* 282 * Apply hysteresis to maxrtt to ensure 283 * reductions in the RTT are reflected in our 284 * measurements. 285 */ 286 htcp_data->maxrtt = (htcp_data->minrtt + 287 (htcp_data->maxrtt - htcp_data->minrtt) * 288 95) / 100; 289 htcp_ssthresh_update(ccv); 290 htcp_data->t_last_cong = ticks; 291 htcp_data->prev_cwnd = CCV(ccv, snd_cwnd); 292 } 293 ENTER_RECOVERY(CCV(ccv, t_flags)); 294 } 295 break; 296 297 case CC_ECN: 298 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 299 /* 300 * Apply hysteresis to maxrtt to ensure reductions in 301 * the RTT are reflected in our measurements. 302 */ 303 htcp_data->maxrtt = (htcp_data->minrtt + (htcp_data->maxrtt - 304 htcp_data->minrtt) * 95) / 100; 305 htcp_ssthresh_update(ccv); 306 CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh); 307 htcp_data->t_last_cong = ticks; 308 htcp_data->prev_cwnd = CCV(ccv, snd_cwnd); 309 ENTER_CONGRECOVERY(CCV(ccv, t_flags)); 310 } 311 break; 312 313 case CC_RTO: 314 /* 315 * Grab the current time and record it so we know when the 316 * most recent congestion event was. Only record it when the 317 * timeout has fired more than once, as there is a reasonable 318 * chance the first one is a false alarm and may not indicate 319 * congestion. 320 */ 321 if (CCV(ccv, t_rxtshift) >= 2) 322 htcp_data->t_last_cong = ticks; 323 break; 324 } 325 } 326 327 static int 328 htcp_mod_init(void) 329 { 330 331 htcp_cc_algo.after_idle = newreno_cc_algo.after_idle; 332 333 /* 334 * HTCP_RTT_REF is defined in ms, and t_srtt in the tcpcb is stored in 335 * units of TCP_RTT_SCALE*hz. Scale HTCP_RTT_REF to be in the same units 336 * as t_srtt. 337 */ 338 htcp_rtt_ref = (HTCP_RTT_REF * TCP_RTT_SCALE * hz) / 1000; 339 340 return (0); 341 } 342 343 /* 344 * Perform any necessary tasks before we exit congestion recovery. 345 */ 346 static void 347 htcp_post_recovery(struct cc_var *ccv) 348 { 349 struct htcp *htcp_data; 350 351 htcp_data = ccv->cc_data; 352 353 if (IN_FASTRECOVERY(CCV(ccv, t_flags))) { 354 /* 355 * If inflight data is less than ssthresh, set cwnd 356 * conservatively to avoid a burst of data, as suggested in the 357 * NewReno RFC. Otherwise, use the HTCP method. 358 * 359 * XXXLAS: Find a way to do this without needing curack 360 */ 361 if (SEQ_GT(ccv->curack + CCV(ccv, snd_ssthresh), 362 CCV(ccv, snd_max))) 363 CCV(ccv, snd_cwnd) = CCV(ccv, snd_max) - ccv->curack + 364 CCV(ccv, t_maxseg); 365 else 366 CCV(ccv, snd_cwnd) = max(1, ((htcp_data->beta * 367 htcp_data->prev_cwnd / CCV(ccv, t_maxseg)) 368 >> HTCP_SHIFT)) * CCV(ccv, t_maxseg); 369 } 370 } 371 372 static void 373 htcp_recalc_alpha(struct cc_var *ccv) 374 { 375 struct htcp *htcp_data; 376 int alpha, diff, now; 377 378 htcp_data = ccv->cc_data; 379 now = ticks; 380 381 /* 382 * If ticks has wrapped around (will happen approximately once every 49 383 * days on a machine with the default kern.hz=1000) and a flow straddles 384 * the wrap point, our alpha calcs will be completely wrong. We cut our 385 * losses and restart alpha from scratch by setting t_last_cong = now - 386 * HTCP_DELTA_L. 387 * 388 * This does not deflate our cwnd at all. It simply slows the rate cwnd 389 * is growing by until alpha regains the value it held prior to taking 390 * this drastic measure. 391 */ 392 if (now < htcp_data->t_last_cong) 393 htcp_data->t_last_cong = now - HTCP_DELTA_L; 394 395 diff = now - htcp_data->t_last_cong - HTCP_DELTA_L; 396 397 /* Cap alpha if the value of diff would overflow HTCP_CALC_ALPHA(). */ 398 if (diff < htcp_max_diff) { 399 /* 400 * If it has been more than HTCP_DELTA_L ticks since congestion, 401 * increase alpha according to the function defined in the spec. 402 */ 403 if (diff > 0) { 404 alpha = HTCP_CALC_ALPHA(diff); 405 406 /* 407 * Adaptive backoff fairness adjustment: 408 * 2 * (1 - beta) * alpha_raw 409 */ 410 if (V_htcp_adaptive_backoff) 411 alpha = max(1, (2 * ((1 << HTCP_SHIFT) - 412 htcp_data->beta) * alpha) >> HTCP_SHIFT); 413 414 /* 415 * RTT scaling: (RTT / RTT_ref) * alpha 416 * alpha will be the raw value from HTCP_CALC_ALPHA() if 417 * adaptive backoff is off, or the adjusted value if 418 * adaptive backoff is on. 419 */ 420 if (V_htcp_rtt_scaling) 421 alpha = max(1, (min(max(HTCP_MINROWE, 422 (CCV(ccv, t_srtt) << HTCP_SHIFT) / 423 htcp_rtt_ref), HTCP_MAXROWE) * alpha) 424 >> HTCP_SHIFT); 425 426 } else 427 alpha = 1; 428 429 htcp_data->alpha = alpha; 430 } 431 } 432 433 static void 434 htcp_recalc_beta(struct cc_var *ccv) 435 { 436 struct htcp *htcp_data; 437 438 htcp_data = ccv->cc_data; 439 440 /* 441 * TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so 442 * we only calc beta if the connection's SRTT has been changed from its 443 * inital value. beta is bounded to ensure it is always between 444 * HTCP_MINBETA and HTCP_MAXBETA. 445 */ 446 if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE && 447 htcp_data->maxrtt != TCPTV_SRTTBASE) 448 htcp_data->beta = min(max(HTCP_MINBETA, 449 (htcp_data->minrtt << HTCP_SHIFT) / htcp_data->maxrtt), 450 HTCP_MAXBETA); 451 else 452 htcp_data->beta = HTCP_MINBETA; 453 } 454 455 /* 456 * Record the minimum and maximum RTT seen for the connection. These are used in 457 * the calculation of beta if adaptive backoff is enabled. 458 */ 459 static void 460 htcp_record_rtt(struct cc_var *ccv) 461 { 462 struct htcp *htcp_data; 463 464 htcp_data = ccv->cc_data; 465 466 /* XXXLAS: Should there be some hysteresis for minrtt? */ 467 468 /* 469 * Record the current SRTT as our minrtt if it's the smallest we've seen 470 * or minrtt is currently equal to its initialised value. Ignore SRTT 471 * until a min number of samples have been taken. 472 */ 473 if ((CCV(ccv, t_srtt) < htcp_data->minrtt || 474 htcp_data->minrtt == TCPTV_SRTTBASE) && 475 (CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES)) 476 htcp_data->minrtt = CCV(ccv, t_srtt); 477 478 /* 479 * Record the current SRTT as our maxrtt if it's the largest we've 480 * seen. Ignore SRTT until a min number of samples have been taken. 481 */ 482 if (CCV(ccv, t_srtt) > htcp_data->maxrtt 483 && CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES) 484 htcp_data->maxrtt = CCV(ccv, t_srtt); 485 } 486 487 /* 488 * Update the ssthresh in the event of congestion. 489 */ 490 static void 491 htcp_ssthresh_update(struct cc_var *ccv) 492 { 493 struct htcp *htcp_data; 494 495 htcp_data = ccv->cc_data; 496 497 /* 498 * On the first congestion event, set ssthresh to cwnd * 0.5, on 499 * subsequent congestion events, set it to cwnd * beta. 500 */ 501 if (CCV(ccv, snd_ssthresh) == TCP_MAXWIN << TCP_MAX_WINSHIFT) 502 CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * HTCP_MINBETA) 503 >> HTCP_SHIFT; 504 else { 505 htcp_recalc_beta(ccv); 506 CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * htcp_data->beta) 507 >> HTCP_SHIFT; 508 } 509 } 510 511 512 SYSCTL_DECL(_net_inet_tcp_cc_htcp); 513 SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, htcp, CTLFLAG_RW, 514 NULL, "H-TCP related settings"); 515 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, adaptive_backoff, 516 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_adaptive_backoff), 0, 517 "enable H-TCP adaptive backoff"); 518 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, rtt_scaling, 519 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_rtt_scaling), 0, 520 "enable H-TCP RTT scaling"); 521 522 DECLARE_CC_MODULE(htcp, &htcp_cc_algo); 523