1 /* Bottleneck Bandwidth and RTT (BBR) congestion control 2 * 3 * BBR congestion control computes the sending rate based on the delivery 4 * rate (throughput) estimated from ACKs. In a nutshell: 5 * 6 * On each ACK, update our model of the network path: 7 * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips) 8 * min_rtt = windowed_min(rtt, 10 seconds) 9 * pacing_rate = pacing_gain * bottleneck_bandwidth 10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) 11 * 12 * The core algorithm does not react directly to packet losses or delays, 13 * although BBR may adjust the size of next send per ACK when loss is 14 * observed, or adjust the sending rate if it estimates there is a 15 * traffic policer, in order to keep the drop rate reasonable. 16 * 17 * BBR is described in detail in: 18 * "BBR: Congestion-Based Congestion Control", 19 * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh, 20 * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016. 21 * 22 * There is a public e-mail list for discussing BBR development and testing: 23 * https://groups.google.com/forum/#!forum/bbr-dev 24 * 25 * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled, 26 * since pacing is integral to the BBR design and implementation. 27 * BBR without pacing would not function properly, and may incur unnecessary 28 * high packet loss rates. 29 */ 30 #include <linux/module.h> 31 #include <net/tcp.h> 32 #include <linux/inet_diag.h> 33 #include <linux/inet.h> 34 #include <linux/random.h> 35 #include <linux/win_minmax.h> 36 37 /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth 38 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps. 39 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32. 40 * Since the minimum window is >=4 packets, the lower bound isn't 41 * an issue. The upper bound isn't an issue with existing technologies. 42 */ 43 #define BW_SCALE 24 44 #define BW_UNIT (1 << BW_SCALE) 45 46 #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */ 47 #define BBR_UNIT (1 << BBR_SCALE) 48 49 /* BBR has the following modes for deciding how fast to send: */ 50 enum bbr_mode { 51 BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */ 52 BBR_DRAIN, /* drain any queue created during startup */ 53 BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */ 54 BBR_PROBE_RTT, /* cut cwnd to min to probe min_rtt */ 55 }; 56 57 /* BBR congestion control block */ 58 struct bbr { 59 u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */ 60 u32 min_rtt_stamp; /* timestamp of min_rtt_us */ 61 u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */ 62 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ 63 u32 rtt_cnt; /* count of packet-timed rounds elapsed */ 64 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ 65 struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */ 66 u32 mode:3, /* current bbr_mode in state machine */ 67 prev_ca_state:3, /* CA state on previous ACK */ 68 packet_conservation:1, /* use packet conservation? */ 69 restore_cwnd:1, /* decided to revert cwnd to old value */ 70 round_start:1, /* start of packet-timed tx->ack round? */ 71 tso_segs_goal:7, /* segments we want in each skb we send */ 72 idle_restart:1, /* restarting after idle? */ 73 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ 74 unused:5, 75 lt_is_sampling:1, /* taking long-term ("LT") samples now? */ 76 lt_rtt_cnt:7, /* round trips in long-term interval */ 77 lt_use_bw:1; /* use lt_bw as our bw estimate? */ 78 u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */ 79 u32 lt_last_delivered; /* LT intvl start: tp->delivered */ 80 u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */ 81 u32 lt_last_lost; /* LT intvl start: tp->lost */ 82 u32 pacing_gain:10, /* current gain for setting pacing rate */ 83 cwnd_gain:10, /* current gain for setting cwnd */ 84 full_bw_cnt:3, /* number of rounds without large bw gains */ 85 cycle_idx:3, /* current index in pacing_gain cycle array */ 86 unused_b:6; 87 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ 88 u32 full_bw; /* recent bw, to estimate if pipe is full */ 89 }; 90 91 #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ 92 93 /* Window length of bw filter (in rounds): */ 94 static const int bbr_bw_rtts = CYCLE_LEN + 2; 95 /* Window length of min_rtt filter (in sec): */ 96 static const u32 bbr_min_rtt_win_sec = 10; 97 /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */ 98 static const u32 bbr_probe_rtt_mode_ms = 200; 99 /* Skip TSO below the following bandwidth (bits/sec): */ 100 static const int bbr_min_tso_rate = 1200000; 101 102 /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain 103 * that will allow a smoothly increasing pacing rate that will double each RTT 104 * and send the same number of packets per RTT that an un-paced, slow-starting 105 * Reno or CUBIC flow would: 106 */ 107 static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; 108 /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain 109 * the queue created in BBR_STARTUP in a single round: 110 */ 111 static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885; 112 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ 113 static const int bbr_cwnd_gain = BBR_UNIT * 2; 114 /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */ 115 static const int bbr_pacing_gain[] = { 116 BBR_UNIT * 5 / 4, /* probe for more available bw */ 117 BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */ 118 BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */ 119 BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */ 120 }; 121 /* Randomize the starting gain cycling phase over N phases: */ 122 static const u32 bbr_cycle_rand = 7; 123 124 /* Try to keep at least this many packets in flight, if things go smoothly. For 125 * smooth functioning, a sliding window protocol ACKing every other packet 126 * needs at least 4 packets in flight: 127 */ 128 static const u32 bbr_cwnd_min_target = 4; 129 130 /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */ 131 /* If bw has increased significantly (1.25x), there may be more bw available: */ 132 static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4; 133 /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */ 134 static const u32 bbr_full_bw_cnt = 3; 135 136 /* "long-term" ("LT") bandwidth estimator parameters... */ 137 /* The minimum number of rounds in an LT bw sampling interval: */ 138 static const u32 bbr_lt_intvl_min_rtts = 4; 139 /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */ 140 static const u32 bbr_lt_loss_thresh = 50; 141 /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */ 142 static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8; 143 /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */ 144 static const u32 bbr_lt_bw_diff = 4000 / 8; 145 /* If we estimate we're policed, use lt_bw for this many round trips: */ 146 static const u32 bbr_lt_bw_max_rtts = 48; 147 148 /* Do we estimate that STARTUP filled the pipe? */ 149 static bool bbr_full_bw_reached(const struct sock *sk) 150 { 151 const struct bbr *bbr = inet_csk_ca(sk); 152 153 return bbr->full_bw_cnt >= bbr_full_bw_cnt; 154 } 155 156 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ 157 static u32 bbr_max_bw(const struct sock *sk) 158 { 159 struct bbr *bbr = inet_csk_ca(sk); 160 161 return minmax_get(&bbr->bw); 162 } 163 164 /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */ 165 static u32 bbr_bw(const struct sock *sk) 166 { 167 struct bbr *bbr = inet_csk_ca(sk); 168 169 return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); 170 } 171 172 /* Return rate in bytes per second, optionally with a gain. 173 * The order here is chosen carefully to avoid overflow of u64. This should 174 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. 175 */ 176 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) 177 { 178 rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache); 179 rate *= gain; 180 rate >>= BBR_SCALE; 181 rate *= USEC_PER_SEC; 182 return rate >> BW_SCALE; 183 } 184 185 /* Pace using current bw estimate and a gain factor. In order to help drive the 186 * network toward lower queues while maintaining high utilization and low 187 * latency, the average pacing rate aims to be slightly (~1%) lower than the 188 * estimated bandwidth. This is an important aspect of the design. In this 189 * implementation this slightly lower pacing rate is achieved implicitly by not 190 * including link-layer headers in the packet size used for the pacing rate. 191 */ 192 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) 193 { 194 struct bbr *bbr = inet_csk_ca(sk); 195 u64 rate = bw; 196 197 rate = bbr_rate_bytes_per_sec(sk, rate, gain); 198 rate = min_t(u64, rate, sk->sk_max_pacing_rate); 199 if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate) 200 sk->sk_pacing_rate = rate; 201 } 202 203 /* Return count of segments we want in the skbs we send, or 0 for default. */ 204 static u32 bbr_tso_segs_goal(struct sock *sk) 205 { 206 struct bbr *bbr = inet_csk_ca(sk); 207 208 return bbr->tso_segs_goal; 209 } 210 211 static void bbr_set_tso_segs_goal(struct sock *sk) 212 { 213 struct tcp_sock *tp = tcp_sk(sk); 214 struct bbr *bbr = inet_csk_ca(sk); 215 u32 min_segs; 216 217 min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; 218 bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs), 219 0x7FU); 220 } 221 222 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ 223 static void bbr_save_cwnd(struct sock *sk) 224 { 225 struct tcp_sock *tp = tcp_sk(sk); 226 struct bbr *bbr = inet_csk_ca(sk); 227 228 if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) 229 bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */ 230 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ 231 bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd); 232 } 233 234 static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) 235 { 236 struct tcp_sock *tp = tcp_sk(sk); 237 struct bbr *bbr = inet_csk_ca(sk); 238 239 if (event == CA_EVENT_TX_START && tp->app_limited) { 240 bbr->idle_restart = 1; 241 /* Avoid pointless buffer overflows: pace at est. bw if we don't 242 * need more speed (we're restarting from idle and app-limited). 243 */ 244 if (bbr->mode == BBR_PROBE_BW) 245 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); 246 } 247 } 248 249 /* Find target cwnd. Right-size the cwnd based on min RTT and the 250 * estimated bottleneck bandwidth: 251 * 252 * cwnd = bw * min_rtt * gain = BDP * gain 253 * 254 * The key factor, gain, controls the amount of queue. While a small gain 255 * builds a smaller queue, it becomes more vulnerable to noise in RTT 256 * measurements (e.g., delayed ACKs or other ACK compression effects). This 257 * noise may cause BBR to under-estimate the rate. 258 * 259 * To achieve full performance in high-speed paths, we budget enough cwnd to 260 * fit full-sized skbs in-flight on both end hosts to fully utilize the path: 261 * - one skb in sending host Qdisc, 262 * - one skb in sending host TSO/GSO engine 263 * - one skb being received by receiver host LRO/GRO/delayed-ACK engine 264 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because 265 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, 266 * which allows 2 outstanding 2-packet sequences, to try to keep pipe 267 * full even with ACK-every-other-packet delayed ACKs. 268 */ 269 static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) 270 { 271 struct bbr *bbr = inet_csk_ca(sk); 272 u32 cwnd; 273 u64 w; 274 275 /* If we've never had a valid RTT sample, cap cwnd at the initial 276 * default. This should only happen when the connection is not using TCP 277 * timestamps and has retransmitted all of the SYN/SYNACK/data packets 278 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which 279 * case we need to slow-start up toward something safe: TCP_INIT_CWND. 280 */ 281 if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */ 282 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/ 283 284 w = (u64)bw * bbr->min_rtt_us; 285 286 /* Apply a gain to the given value, then remove the BW_SCALE shift. */ 287 cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; 288 289 /* Allow enough full-sized skbs in flight to utilize end systems. */ 290 cwnd += 3 * bbr->tso_segs_goal; 291 292 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ 293 cwnd = (cwnd + 1) & ~1U; 294 295 return cwnd; 296 } 297 298 /* An optimization in BBR to reduce losses: On the first round of recovery, we 299 * follow the packet conservation principle: send P packets per P packets acked. 300 * After that, we slow-start and send at most 2*P packets per P packets acked. 301 * After recovery finishes, or upon undo, we restore the cwnd we had when 302 * recovery started (capped by the target cwnd based on estimated BDP). 303 * 304 * TODO(ycheng/ncardwell): implement a rate-based approach. 305 */ 306 static bool bbr_set_cwnd_to_recover_or_restore( 307 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) 308 { 309 struct tcp_sock *tp = tcp_sk(sk); 310 struct bbr *bbr = inet_csk_ca(sk); 311 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; 312 u32 cwnd = tp->snd_cwnd; 313 314 /* An ACK for P pkts should release at most 2*P packets. We do this 315 * in two steps. First, here we deduct the number of lost packets. 316 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd. 317 */ 318 if (rs->losses > 0) 319 cwnd = max_t(s32, cwnd - rs->losses, 1); 320 321 if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) { 322 /* Starting 1st round of Recovery, so do packet conservation. */ 323 bbr->packet_conservation = 1; 324 bbr->next_rtt_delivered = tp->delivered; /* start round now */ 325 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */ 326 cwnd = tcp_packets_in_flight(tp) + acked; 327 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { 328 /* Exiting loss recovery; restore cwnd saved before recovery. */ 329 bbr->restore_cwnd = 1; 330 bbr->packet_conservation = 0; 331 } 332 bbr->prev_ca_state = state; 333 334 if (bbr->restore_cwnd) { 335 /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ 336 cwnd = max(cwnd, bbr->prior_cwnd); 337 bbr->restore_cwnd = 0; 338 } 339 340 if (bbr->packet_conservation) { 341 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); 342 return true; /* yes, using packet conservation */ 343 } 344 *new_cwnd = cwnd; 345 return false; 346 } 347 348 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss 349 * has drawn us down below target), or snap down to target if we're above it. 350 */ 351 static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, 352 u32 acked, u32 bw, int gain) 353 { 354 struct tcp_sock *tp = tcp_sk(sk); 355 struct bbr *bbr = inet_csk_ca(sk); 356 u32 cwnd = 0, target_cwnd = 0; 357 358 if (!acked) 359 return; 360 361 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) 362 goto done; 363 364 /* If we're below target cwnd, slow start cwnd toward target cwnd. */ 365 target_cwnd = bbr_target_cwnd(sk, bw, gain); 366 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ 367 cwnd = min(cwnd + acked, target_cwnd); 368 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) 369 cwnd = cwnd + acked; 370 cwnd = max(cwnd, bbr_cwnd_min_target); 371 372 done: 373 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */ 374 if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ 375 tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target); 376 } 377 378 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */ 379 static bool bbr_is_next_cycle_phase(struct sock *sk, 380 const struct rate_sample *rs) 381 { 382 struct tcp_sock *tp = tcp_sk(sk); 383 struct bbr *bbr = inet_csk_ca(sk); 384 bool is_full_length = 385 skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) > 386 bbr->min_rtt_us; 387 u32 inflight, bw; 388 389 /* The pacing_gain of 1.0 paces at the estimated bw to try to fully 390 * use the pipe without increasing the queue. 391 */ 392 if (bbr->pacing_gain == BBR_UNIT) 393 return is_full_length; /* just use wall clock time */ 394 395 inflight = rs->prior_in_flight; /* what was in-flight before ACK? */ 396 bw = bbr_max_bw(sk); 397 398 /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at 399 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is 400 * small (e.g. on a LAN). We do not persist if packets are lost, since 401 * a path with small buffers may not hold that much. 402 */ 403 if (bbr->pacing_gain > BBR_UNIT) 404 return is_full_length && 405 (rs->losses || /* perhaps pacing_gain*BDP won't fit */ 406 inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain)); 407 408 /* A pacing_gain < 1.0 tries to drain extra queue we added if bw 409 * probing didn't find more bw. If inflight falls to match BDP then we 410 * estimate queue is drained; persisting would underutilize the pipe. 411 */ 412 return is_full_length || 413 inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT); 414 } 415 416 static void bbr_advance_cycle_phase(struct sock *sk) 417 { 418 struct tcp_sock *tp = tcp_sk(sk); 419 struct bbr *bbr = inet_csk_ca(sk); 420 421 bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); 422 bbr->cycle_mstamp = tp->delivered_mstamp; 423 bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx]; 424 } 425 426 /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */ 427 static void bbr_update_cycle_phase(struct sock *sk, 428 const struct rate_sample *rs) 429 { 430 struct bbr *bbr = inet_csk_ca(sk); 431 432 if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw && 433 bbr_is_next_cycle_phase(sk, rs)) 434 bbr_advance_cycle_phase(sk); 435 } 436 437 static void bbr_reset_startup_mode(struct sock *sk) 438 { 439 struct bbr *bbr = inet_csk_ca(sk); 440 441 bbr->mode = BBR_STARTUP; 442 bbr->pacing_gain = bbr_high_gain; 443 bbr->cwnd_gain = bbr_high_gain; 444 } 445 446 static void bbr_reset_probe_bw_mode(struct sock *sk) 447 { 448 struct bbr *bbr = inet_csk_ca(sk); 449 450 bbr->mode = BBR_PROBE_BW; 451 bbr->pacing_gain = BBR_UNIT; 452 bbr->cwnd_gain = bbr_cwnd_gain; 453 bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand); 454 bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */ 455 } 456 457 static void bbr_reset_mode(struct sock *sk) 458 { 459 if (!bbr_full_bw_reached(sk)) 460 bbr_reset_startup_mode(sk); 461 else 462 bbr_reset_probe_bw_mode(sk); 463 } 464 465 /* Start a new long-term sampling interval. */ 466 static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) 467 { 468 struct tcp_sock *tp = tcp_sk(sk); 469 struct bbr *bbr = inet_csk_ca(sk); 470 471 bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies; 472 bbr->lt_last_delivered = tp->delivered; 473 bbr->lt_last_lost = tp->lost; 474 bbr->lt_rtt_cnt = 0; 475 } 476 477 /* Completely reset long-term bandwidth sampling. */ 478 static void bbr_reset_lt_bw_sampling(struct sock *sk) 479 { 480 struct bbr *bbr = inet_csk_ca(sk); 481 482 bbr->lt_bw = 0; 483 bbr->lt_use_bw = 0; 484 bbr->lt_is_sampling = false; 485 bbr_reset_lt_bw_sampling_interval(sk); 486 } 487 488 /* Long-term bw sampling interval is done. Estimate whether we're policed. */ 489 static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw) 490 { 491 struct bbr *bbr = inet_csk_ca(sk); 492 u32 diff; 493 494 if (bbr->lt_bw) { /* do we have bw from a previous interval? */ 495 /* Is new bw close to the lt_bw from the previous interval? */ 496 diff = abs(bw - bbr->lt_bw); 497 if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || 498 (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <= 499 bbr_lt_bw_diff)) { 500 /* All criteria are met; estimate we're policed. */ 501 bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */ 502 bbr->lt_use_bw = 1; 503 bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ 504 bbr->lt_rtt_cnt = 0; 505 return; 506 } 507 } 508 bbr->lt_bw = bw; 509 bbr_reset_lt_bw_sampling_interval(sk); 510 } 511 512 /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of 513 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and 514 * explicitly models their policed rate, to reduce unnecessary losses. We 515 * estimate that we're policed if we see 2 consecutive sampling intervals with 516 * consistent throughput and high packet loss. If we think we're being policed, 517 * set lt_bw to the "long-term" average delivery rate from those 2 intervals. 518 */ 519 static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) 520 { 521 struct tcp_sock *tp = tcp_sk(sk); 522 struct bbr *bbr = inet_csk_ca(sk); 523 u32 lost, delivered; 524 u64 bw; 525 s32 t; 526 527 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ 528 if (bbr->mode == BBR_PROBE_BW && bbr->round_start && 529 ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) { 530 bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */ 531 bbr_reset_probe_bw_mode(sk); /* restart gain cycling */ 532 } 533 return; 534 } 535 536 /* Wait for the first loss before sampling, to let the policer exhaust 537 * its tokens and estimate the steady-state rate allowed by the policer. 538 * Starting samples earlier includes bursts that over-estimate the bw. 539 */ 540 if (!bbr->lt_is_sampling) { 541 if (!rs->losses) 542 return; 543 bbr_reset_lt_bw_sampling_interval(sk); 544 bbr->lt_is_sampling = true; 545 } 546 547 /* To avoid underestimates, reset sampling if we run out of data. */ 548 if (rs->is_app_limited) { 549 bbr_reset_lt_bw_sampling(sk); 550 return; 551 } 552 553 if (bbr->round_start) 554 bbr->lt_rtt_cnt++; /* count round trips in this interval */ 555 if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts) 556 return; /* sampling interval needs to be longer */ 557 if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) { 558 bbr_reset_lt_bw_sampling(sk); /* interval is too long */ 559 return; 560 } 561 562 /* End sampling interval when a packet is lost, so we estimate the 563 * policer tokens were exhausted. Stopping the sampling before the 564 * tokens are exhausted under-estimates the policed rate. 565 */ 566 if (!rs->losses) 567 return; 568 569 /* Calculate packets lost and delivered in sampling interval. */ 570 lost = tp->lost - bbr->lt_last_lost; 571 delivered = tp->delivered - bbr->lt_last_delivered; 572 /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */ 573 if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered) 574 return; 575 576 /* Find average delivery rate in this sampling interval. */ 577 t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp); 578 if (t < 1) 579 return; /* interval is less than one jiffy, so wait */ 580 t = jiffies_to_usecs(t); 581 /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */ 582 if (t < 1) { 583 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ 584 return; 585 } 586 bw = (u64)delivered * BW_UNIT; 587 do_div(bw, t); 588 bbr_lt_bw_interval_done(sk, bw); 589 } 590 591 /* Estimate the bandwidth based on how fast packets are delivered */ 592 static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) 593 { 594 struct tcp_sock *tp = tcp_sk(sk); 595 struct bbr *bbr = inet_csk_ca(sk); 596 u64 bw; 597 598 bbr->round_start = 0; 599 if (rs->delivered < 0 || rs->interval_us <= 0) 600 return; /* Not a valid observation */ 601 602 /* See if we've reached the next RTT */ 603 if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) { 604 bbr->next_rtt_delivered = tp->delivered; 605 bbr->rtt_cnt++; 606 bbr->round_start = 1; 607 bbr->packet_conservation = 0; 608 } 609 610 bbr_lt_bw_sampling(sk, rs); 611 612 /* Divide delivered by the interval to find a (lower bound) bottleneck 613 * bandwidth sample. Delivered is in packets and interval_us in uS and 614 * ratio will be <<1 for most connections. So delivered is first scaled. 615 */ 616 bw = (u64)rs->delivered * BW_UNIT; 617 do_div(bw, rs->interval_us); 618 619 /* If this sample is application-limited, it is likely to have a very 620 * low delivered count that represents application behavior rather than 621 * the available network rate. Such a sample could drag down estimated 622 * bw, causing needless slow-down. Thus, to continue to send at the 623 * last measured network rate, we filter out app-limited samples unless 624 * they describe the path bw at least as well as our bw model. 625 * 626 * So the goal during app-limited phase is to proceed with the best 627 * network rate no matter how long. We automatically leave this 628 * phase when app writes faster than the network can deliver :) 629 */ 630 if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) { 631 /* Incorporate new sample into our max bw filter. */ 632 minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw); 633 } 634 } 635 636 /* Estimate when the pipe is full, using the change in delivery rate: BBR 637 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by 638 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited 639 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the 640 * higher rwin, 3: we get higher delivery rate samples. Or transient 641 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar 642 * design goal, but uses delay and inter-ACK spacing instead of bandwidth. 643 */ 644 static void bbr_check_full_bw_reached(struct sock *sk, 645 const struct rate_sample *rs) 646 { 647 struct bbr *bbr = inet_csk_ca(sk); 648 u32 bw_thresh; 649 650 if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited) 651 return; 652 653 bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE; 654 if (bbr_max_bw(sk) >= bw_thresh) { 655 bbr->full_bw = bbr_max_bw(sk); 656 bbr->full_bw_cnt = 0; 657 return; 658 } 659 ++bbr->full_bw_cnt; 660 } 661 662 /* If pipe is probably full, drain the queue and then enter steady-state. */ 663 static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) 664 { 665 struct bbr *bbr = inet_csk_ca(sk); 666 667 if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { 668 bbr->mode = BBR_DRAIN; /* drain queue we created */ 669 bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */ 670 bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */ 671 } /* fall through to check if in-flight is already small: */ 672 if (bbr->mode == BBR_DRAIN && 673 tcp_packets_in_flight(tcp_sk(sk)) <= 674 bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT)) 675 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ 676 } 677 678 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and 679 * periodically drain the bottleneck queue, to converge to measure the true 680 * min_rtt (unloaded propagation delay). This allows the flows to keep queues 681 * small (reducing queuing delay and packet loss) and achieve fairness among 682 * BBR flows. 683 * 684 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires, 685 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets. 686 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed 687 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and 688 * re-enter the previous mode. BBR uses 200ms to approximately bound the 689 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s). 690 * 691 * Note that flows need only pay 2% if they are busy sending over the last 10 692 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have 693 * natural silences or low-rate periods within 10 seconds where the rate is low 694 * enough for long enough to drain its queue in the bottleneck. We pick up 695 * these min RTT measurements opportunistically with our min_rtt filter. :-) 696 */ 697 static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) 698 { 699 struct tcp_sock *tp = tcp_sk(sk); 700 struct bbr *bbr = inet_csk_ca(sk); 701 bool filter_expired; 702 703 /* Track min RTT seen in the min_rtt_win_sec filter window: */ 704 filter_expired = after(tcp_time_stamp, 705 bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); 706 if (rs->rtt_us >= 0 && 707 (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) { 708 bbr->min_rtt_us = rs->rtt_us; 709 bbr->min_rtt_stamp = tcp_time_stamp; 710 } 711 712 if (bbr_probe_rtt_mode_ms > 0 && filter_expired && 713 !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) { 714 bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */ 715 bbr->pacing_gain = BBR_UNIT; 716 bbr->cwnd_gain = BBR_UNIT; 717 bbr_save_cwnd(sk); /* note cwnd so we can restore it */ 718 bbr->probe_rtt_done_stamp = 0; 719 } 720 721 if (bbr->mode == BBR_PROBE_RTT) { 722 /* Ignore low rate samples during this mode. */ 723 tp->app_limited = 724 (tp->delivered + tcp_packets_in_flight(tp)) ? : 1; 725 /* Maintain min packets in flight for max(200 ms, 1 round). */ 726 if (!bbr->probe_rtt_done_stamp && 727 tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) { 728 bbr->probe_rtt_done_stamp = tcp_time_stamp + 729 msecs_to_jiffies(bbr_probe_rtt_mode_ms); 730 bbr->probe_rtt_round_done = 0; 731 bbr->next_rtt_delivered = tp->delivered; 732 } else if (bbr->probe_rtt_done_stamp) { 733 if (bbr->round_start) 734 bbr->probe_rtt_round_done = 1; 735 if (bbr->probe_rtt_round_done && 736 after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) { 737 bbr->min_rtt_stamp = tcp_time_stamp; 738 bbr->restore_cwnd = 1; /* snap to prior_cwnd */ 739 bbr_reset_mode(sk); 740 } 741 } 742 } 743 bbr->idle_restart = 0; 744 } 745 746 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) 747 { 748 bbr_update_bw(sk, rs); 749 bbr_update_cycle_phase(sk, rs); 750 bbr_check_full_bw_reached(sk, rs); 751 bbr_check_drain(sk, rs); 752 bbr_update_min_rtt(sk, rs); 753 } 754 755 static void bbr_main(struct sock *sk, const struct rate_sample *rs) 756 { 757 struct bbr *bbr = inet_csk_ca(sk); 758 u32 bw; 759 760 bbr_update_model(sk, rs); 761 762 bw = bbr_bw(sk); 763 bbr_set_pacing_rate(sk, bw, bbr->pacing_gain); 764 bbr_set_tso_segs_goal(sk); 765 bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain); 766 } 767 768 static void bbr_init(struct sock *sk) 769 { 770 struct tcp_sock *tp = tcp_sk(sk); 771 struct bbr *bbr = inet_csk_ca(sk); 772 u64 bw; 773 774 bbr->prior_cwnd = 0; 775 bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ 776 bbr->rtt_cnt = 0; 777 bbr->next_rtt_delivered = 0; 778 bbr->prev_ca_state = TCP_CA_Open; 779 bbr->packet_conservation = 0; 780 781 bbr->probe_rtt_done_stamp = 0; 782 bbr->probe_rtt_round_done = 0; 783 bbr->min_rtt_us = tcp_min_rtt(tp); 784 bbr->min_rtt_stamp = tcp_time_stamp; 785 786 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ 787 788 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ 789 bw = (u64)tp->snd_cwnd * BW_UNIT; 790 do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC); 791 sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */ 792 bbr_set_pacing_rate(sk, bw, bbr_high_gain); 793 794 bbr->restore_cwnd = 0; 795 bbr->round_start = 0; 796 bbr->idle_restart = 0; 797 bbr->full_bw = 0; 798 bbr->full_bw_cnt = 0; 799 bbr->cycle_mstamp.v64 = 0; 800 bbr->cycle_idx = 0; 801 bbr_reset_lt_bw_sampling(sk); 802 bbr_reset_startup_mode(sk); 803 } 804 805 static u32 bbr_sndbuf_expand(struct sock *sk) 806 { 807 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */ 808 return 3; 809 } 810 811 /* In theory BBR does not need to undo the cwnd since it does not 812 * always reduce cwnd on losses (see bbr_main()). Keep it for now. 813 */ 814 static u32 bbr_undo_cwnd(struct sock *sk) 815 { 816 return tcp_sk(sk)->snd_cwnd; 817 } 818 819 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */ 820 static u32 bbr_ssthresh(struct sock *sk) 821 { 822 bbr_save_cwnd(sk); 823 return TCP_INFINITE_SSTHRESH; /* BBR does not use ssthresh */ 824 } 825 826 static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr, 827 union tcp_cc_info *info) 828 { 829 if (ext & (1 << (INET_DIAG_BBRINFO - 1)) || 830 ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 831 struct tcp_sock *tp = tcp_sk(sk); 832 struct bbr *bbr = inet_csk_ca(sk); 833 u64 bw = bbr_bw(sk); 834 835 bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE; 836 memset(&info->bbr, 0, sizeof(info->bbr)); 837 info->bbr.bbr_bw_lo = (u32)bw; 838 info->bbr.bbr_bw_hi = (u32)(bw >> 32); 839 info->bbr.bbr_min_rtt = bbr->min_rtt_us; 840 info->bbr.bbr_pacing_gain = bbr->pacing_gain; 841 info->bbr.bbr_cwnd_gain = bbr->cwnd_gain; 842 *attr = INET_DIAG_BBRINFO; 843 return sizeof(info->bbr); 844 } 845 return 0; 846 } 847 848 static void bbr_set_state(struct sock *sk, u8 new_state) 849 { 850 struct bbr *bbr = inet_csk_ca(sk); 851 852 if (new_state == TCP_CA_Loss) { 853 struct rate_sample rs = { .losses = 1 }; 854 855 bbr->prev_ca_state = TCP_CA_Loss; 856 bbr->full_bw = 0; 857 bbr->round_start = 1; /* treat RTO like end of a round */ 858 bbr_lt_bw_sampling(sk, &rs); 859 } 860 } 861 862 static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { 863 .flags = TCP_CONG_NON_RESTRICTED, 864 .name = "bbr", 865 .owner = THIS_MODULE, 866 .init = bbr_init, 867 .cong_control = bbr_main, 868 .sndbuf_expand = bbr_sndbuf_expand, 869 .undo_cwnd = bbr_undo_cwnd, 870 .cwnd_event = bbr_cwnd_event, 871 .ssthresh = bbr_ssthresh, 872 .tso_segs_goal = bbr_tso_segs_goal, 873 .get_info = bbr_get_info, 874 .set_state = bbr_set_state, 875 }; 876 877 static int __init bbr_register(void) 878 { 879 BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); 880 return tcp_register_congestion_control(&tcp_bbr_cong_ops); 881 } 882 883 static void __exit bbr_unregister(void) 884 { 885 tcp_unregister_congestion_control(&tcp_bbr_cong_ops); 886 } 887 888 module_init(bbr_register); 889 module_exit(bbr_unregister); 890 891 MODULE_AUTHOR("Van Jacobson <vanj@google.com>"); 892 MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>"); 893 MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>"); 894 MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>"); 895 MODULE_LICENSE("Dual BSD/GPL"); 896 MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)"); 897