xref: /linux/net/ipv4/tcp_bbr.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /* Bottleneck Bandwidth and RTT (BBR) congestion control
2  *
3  * BBR congestion control computes the sending rate based on the delivery
4  * rate (throughput) estimated from ACKs. In a nutshell:
5  *
6  *   On each ACK, update our model of the network path:
7  *      bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
8  *      min_rtt = windowed_min(rtt, 10 seconds)
9  *   pacing_rate = pacing_gain * bottleneck_bandwidth
10  *   cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
11  *
12  * The core algorithm does not react directly to packet losses or delays,
13  * although BBR may adjust the size of next send per ACK when loss is
14  * observed, or adjust the sending rate if it estimates there is a
15  * traffic policer, in order to keep the drop rate reasonable.
16  *
17  * Here is a state transition diagram for BBR:
18  *
19  *             |
20  *             V
21  *    +---> STARTUP  ----+
22  *    |        |         |
23  *    |        V         |
24  *    |      DRAIN   ----+
25  *    |        |         |
26  *    |        V         |
27  *    +---> PROBE_BW ----+
28  *    |      ^    |      |
29  *    |      |    |      |
30  *    |      +----+      |
31  *    |                  |
32  *    +---- PROBE_RTT <--+
33  *
34  * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
35  * When it estimates the pipe is full, it enters DRAIN to drain the queue.
36  * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
37  * A long-lived BBR flow spends the vast majority of its time remaining
38  * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
39  * in a fair manner, with a small, bounded queue. *If* a flow has been
40  * continuously sending for the entire min_rtt window, and hasn't seen an RTT
41  * sample that matches or decreases its min_rtt estimate for 10 seconds, then
42  * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
43  * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
44  * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
45  * otherwise we enter STARTUP to try to fill the pipe.
46  *
47  * BBR is described in detail in:
48  *   "BBR: Congestion-Based Congestion Control",
49  *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
50  *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
51  *
52  * There is a public e-mail list for discussing BBR development and testing:
53  *   https://groups.google.com/forum/#!forum/bbr-dev
54  *
55  * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
56  * otherwise TCP stack falls back to an internal pacing using one high
57  * resolution timer per TCP socket and may use more resources.
58  */
59 #include <linux/module.h>
60 #include <net/tcp.h>
61 #include <linux/inet_diag.h>
62 #include <linux/inet.h>
63 #include <linux/random.h>
64 #include <linux/win_minmax.h>
65 
66 /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
67  * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
68  * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
69  * Since the minimum window is >=4 packets, the lower bound isn't
70  * an issue. The upper bound isn't an issue with existing technologies.
71  */
72 #define BW_SCALE 24
73 #define BW_UNIT (1 << BW_SCALE)
74 
75 #define BBR_SCALE 8	/* scaling factor for fractions in BBR (e.g. gains) */
76 #define BBR_UNIT (1 << BBR_SCALE)
77 
78 /* BBR has the following modes for deciding how fast to send: */
79 enum bbr_mode {
80 	BBR_STARTUP,	/* ramp up sending rate rapidly to fill pipe */
81 	BBR_DRAIN,	/* drain any queue created during startup */
82 	BBR_PROBE_BW,	/* discover, share bw: pace around estimated bw */
83 	BBR_PROBE_RTT,	/* cut inflight to min to probe min_rtt */
84 };
85 
86 /* BBR congestion control block */
87 struct bbr {
88 	u32	min_rtt_us;	        /* min RTT in min_rtt_win_sec window */
89 	u32	min_rtt_stamp;	        /* timestamp of min_rtt_us */
90 	u32	probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
91 	struct minmax bw;	/* Max recent delivery rate in pkts/uS << 24 */
92 	u32	rtt_cnt;	    /* count of packet-timed rounds elapsed */
93 	u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
94 	u64	cycle_mstamp;	     /* time of this cycle phase start */
95 	u32     mode:3,		     /* current bbr_mode in state machine */
96 		prev_ca_state:3,     /* CA state on previous ACK */
97 		packet_conservation:1,  /* use packet conservation? */
98 		round_start:1,	     /* start of packet-timed tx->ack round? */
99 		idle_restart:1,	     /* restarting after idle? */
100 		probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
101 		unused:13,
102 		lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
103 		lt_rtt_cnt:7,	     /* round trips in long-term interval */
104 		lt_use_bw:1;	     /* use lt_bw as our bw estimate? */
105 	u32	lt_bw;		     /* LT est delivery rate in pkts/uS << 24 */
106 	u32	lt_last_delivered;   /* LT intvl start: tp->delivered */
107 	u32	lt_last_stamp;	     /* LT intvl start: tp->delivered_mstamp */
108 	u32	lt_last_lost;	     /* LT intvl start: tp->lost */
109 	u32	pacing_gain:10,	/* current gain for setting pacing rate */
110 		cwnd_gain:10,	/* current gain for setting cwnd */
111 		full_bw_reached:1,   /* reached full bw in Startup? */
112 		full_bw_cnt:2,	/* number of rounds without large bw gains */
113 		cycle_idx:3,	/* current index in pacing_gain cycle array */
114 		has_seen_rtt:1, /* have we seen an RTT sample yet? */
115 		unused_b:5;
116 	u32	prior_cwnd;	/* prior cwnd upon entering loss recovery */
117 	u32	full_bw;	/* recent bw, to estimate if pipe is full */
118 };
119 
120 #define CYCLE_LEN	8	/* number of phases in a pacing gain cycle */
121 
122 /* Window length of bw filter (in rounds): */
123 static const int bbr_bw_rtts = CYCLE_LEN + 2;
124 /* Window length of min_rtt filter (in sec): */
125 static const u32 bbr_min_rtt_win_sec = 10;
126 /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
127 static const u32 bbr_probe_rtt_mode_ms = 200;
128 /* Skip TSO below the following bandwidth (bits/sec): */
129 static const int bbr_min_tso_rate = 1200000;
130 
131 /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
132  * that will allow a smoothly increasing pacing rate that will double each RTT
133  * and send the same number of packets per RTT that an un-paced, slow-starting
134  * Reno or CUBIC flow would:
135  */
136 static const int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
137 /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
138  * the queue created in BBR_STARTUP in a single round:
139  */
140 static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
141 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
142 static const int bbr_cwnd_gain  = BBR_UNIT * 2;
143 /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
144 static const int bbr_pacing_gain[] = {
145 	BBR_UNIT * 5 / 4,	/* probe for more available bw */
146 	BBR_UNIT * 3 / 4,	/* drain queue and/or yield bw to other flows */
147 	BBR_UNIT, BBR_UNIT, BBR_UNIT,	/* cruise at 1.0*bw to utilize pipe, */
148 	BBR_UNIT, BBR_UNIT, BBR_UNIT	/* without creating excess queue... */
149 };
150 /* Randomize the starting gain cycling phase over N phases: */
151 static const u32 bbr_cycle_rand = 7;
152 
153 /* Try to keep at least this many packets in flight, if things go smoothly. For
154  * smooth functioning, a sliding window protocol ACKing every other packet
155  * needs at least 4 packets in flight:
156  */
157 static const u32 bbr_cwnd_min_target = 4;
158 
159 /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
160 /* If bw has increased significantly (1.25x), there may be more bw available: */
161 static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
162 /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
163 static const u32 bbr_full_bw_cnt = 3;
164 
165 /* "long-term" ("LT") bandwidth estimator parameters... */
166 /* The minimum number of rounds in an LT bw sampling interval: */
167 static const u32 bbr_lt_intvl_min_rtts = 4;
168 /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
169 static const u32 bbr_lt_loss_thresh = 50;
170 /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
171 static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
172 /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
173 static const u32 bbr_lt_bw_diff = 4000 / 8;
174 /* If we estimate we're policed, use lt_bw for this many round trips: */
175 static const u32 bbr_lt_bw_max_rtts = 48;
176 
177 static void bbr_check_probe_rtt_done(struct sock *sk);
178 
179 /* Do we estimate that STARTUP filled the pipe? */
180 static bool bbr_full_bw_reached(const struct sock *sk)
181 {
182 	const struct bbr *bbr = inet_csk_ca(sk);
183 
184 	return bbr->full_bw_reached;
185 }
186 
187 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
188 static u32 bbr_max_bw(const struct sock *sk)
189 {
190 	struct bbr *bbr = inet_csk_ca(sk);
191 
192 	return minmax_get(&bbr->bw);
193 }
194 
195 /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
196 static u32 bbr_bw(const struct sock *sk)
197 {
198 	struct bbr *bbr = inet_csk_ca(sk);
199 
200 	return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
201 }
202 
203 /* Return rate in bytes per second, optionally with a gain.
204  * The order here is chosen carefully to avoid overflow of u64. This should
205  * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
206  */
207 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
208 {
209 	unsigned int mss = tcp_sk(sk)->mss_cache;
210 
211 	if (!tcp_needs_internal_pacing(sk))
212 		mss = tcp_mss_to_mtu(sk, mss);
213 	rate *= mss;
214 	rate *= gain;
215 	rate >>= BBR_SCALE;
216 	rate *= USEC_PER_SEC;
217 	return rate >> BW_SCALE;
218 }
219 
220 /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
221 static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
222 {
223 	u64 rate = bw;
224 
225 	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
226 	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
227 	return rate;
228 }
229 
230 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
231 static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
232 {
233 	struct tcp_sock *tp = tcp_sk(sk);
234 	struct bbr *bbr = inet_csk_ca(sk);
235 	u64 bw;
236 	u32 rtt_us;
237 
238 	if (tp->srtt_us) {		/* any RTT sample yet? */
239 		rtt_us = max(tp->srtt_us >> 3, 1U);
240 		bbr->has_seen_rtt = 1;
241 	} else {			 /* no RTT sample yet */
242 		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
243 	}
244 	bw = (u64)tp->snd_cwnd * BW_UNIT;
245 	do_div(bw, rtt_us);
246 	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
247 }
248 
249 /* Pace using current bw estimate and a gain factor. In order to help drive the
250  * network toward lower queues while maintaining high utilization and low
251  * latency, the average pacing rate aims to be slightly (~1%) lower than the
252  * estimated bandwidth. This is an important aspect of the design. In this
253  * implementation this slightly lower pacing rate is achieved implicitly by not
254  * including link-layer headers in the packet size used for the pacing rate.
255  */
256 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
257 {
258 	struct tcp_sock *tp = tcp_sk(sk);
259 	struct bbr *bbr = inet_csk_ca(sk);
260 	u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
261 
262 	if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
263 		bbr_init_pacing_rate_from_rtt(sk);
264 	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
265 		sk->sk_pacing_rate = rate;
266 }
267 
268 /* override sysctl_tcp_min_tso_segs */
269 static u32 bbr_min_tso_segs(struct sock *sk)
270 {
271 	return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
272 }
273 
274 static u32 bbr_tso_segs_goal(struct sock *sk)
275 {
276 	struct tcp_sock *tp = tcp_sk(sk);
277 	u32 segs, bytes;
278 
279 	/* Sort of tcp_tso_autosize() but ignoring
280 	 * driver provided sk_gso_max_size.
281 	 */
282 	bytes = min_t(u32, sk->sk_pacing_rate >> sk->sk_pacing_shift,
283 		      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
284 	segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
285 
286 	return min(segs, 0x7FU);
287 }
288 
289 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
290 static void bbr_save_cwnd(struct sock *sk)
291 {
292 	struct tcp_sock *tp = tcp_sk(sk);
293 	struct bbr *bbr = inet_csk_ca(sk);
294 
295 	if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
296 		bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
297 	else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
298 		bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
299 }
300 
301 static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
302 {
303 	struct tcp_sock *tp = tcp_sk(sk);
304 	struct bbr *bbr = inet_csk_ca(sk);
305 
306 	if (event == CA_EVENT_TX_START && tp->app_limited) {
307 		bbr->idle_restart = 1;
308 		/* Avoid pointless buffer overflows: pace at est. bw if we don't
309 		 * need more speed (we're restarting from idle and app-limited).
310 		 */
311 		if (bbr->mode == BBR_PROBE_BW)
312 			bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
313 		else if (bbr->mode == BBR_PROBE_RTT)
314 			bbr_check_probe_rtt_done(sk);
315 	}
316 }
317 
318 /* Find target cwnd. Right-size the cwnd based on min RTT and the
319  * estimated bottleneck bandwidth:
320  *
321  * cwnd = bw * min_rtt * gain = BDP * gain
322  *
323  * The key factor, gain, controls the amount of queue. While a small gain
324  * builds a smaller queue, it becomes more vulnerable to noise in RTT
325  * measurements (e.g., delayed ACKs or other ACK compression effects). This
326  * noise may cause BBR to under-estimate the rate.
327  *
328  * To achieve full performance in high-speed paths, we budget enough cwnd to
329  * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
330  *   - one skb in sending host Qdisc,
331  *   - one skb in sending host TSO/GSO engine
332  *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
333  * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
334  * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
335  * which allows 2 outstanding 2-packet sequences, to try to keep pipe
336  * full even with ACK-every-other-packet delayed ACKs.
337  */
338 static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
339 {
340 	struct bbr *bbr = inet_csk_ca(sk);
341 	u32 cwnd;
342 	u64 w;
343 
344 	/* If we've never had a valid RTT sample, cap cwnd at the initial
345 	 * default. This should only happen when the connection is not using TCP
346 	 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
347 	 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
348 	 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
349 	 */
350 	if (unlikely(bbr->min_rtt_us == ~0U))	 /* no valid RTT samples yet? */
351 		return TCP_INIT_CWND;  /* be safe: cap at default initial cwnd*/
352 
353 	w = (u64)bw * bbr->min_rtt_us;
354 
355 	/* Apply a gain to the given value, then remove the BW_SCALE shift. */
356 	cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
357 
358 	/* Allow enough full-sized skbs in flight to utilize end systems. */
359 	cwnd += 3 * bbr_tso_segs_goal(sk);
360 
361 	/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
362 	cwnd = (cwnd + 1) & ~1U;
363 
364 	/* Ensure gain cycling gets inflight above BDP even for small BDPs. */
365 	if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
366 		cwnd += 2;
367 
368 	return cwnd;
369 }
370 
371 /* An optimization in BBR to reduce losses: On the first round of recovery, we
372  * follow the packet conservation principle: send P packets per P packets acked.
373  * After that, we slow-start and send at most 2*P packets per P packets acked.
374  * After recovery finishes, or upon undo, we restore the cwnd we had when
375  * recovery started (capped by the target cwnd based on estimated BDP).
376  *
377  * TODO(ycheng/ncardwell): implement a rate-based approach.
378  */
379 static bool bbr_set_cwnd_to_recover_or_restore(
380 	struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
381 {
382 	struct tcp_sock *tp = tcp_sk(sk);
383 	struct bbr *bbr = inet_csk_ca(sk);
384 	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
385 	u32 cwnd = tp->snd_cwnd;
386 
387 	/* An ACK for P pkts should release at most 2*P packets. We do this
388 	 * in two steps. First, here we deduct the number of lost packets.
389 	 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
390 	 */
391 	if (rs->losses > 0)
392 		cwnd = max_t(s32, cwnd - rs->losses, 1);
393 
394 	if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
395 		/* Starting 1st round of Recovery, so do packet conservation. */
396 		bbr->packet_conservation = 1;
397 		bbr->next_rtt_delivered = tp->delivered;  /* start round now */
398 		/* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
399 		cwnd = tcp_packets_in_flight(tp) + acked;
400 	} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
401 		/* Exiting loss recovery; restore cwnd saved before recovery. */
402 		cwnd = max(cwnd, bbr->prior_cwnd);
403 		bbr->packet_conservation = 0;
404 	}
405 	bbr->prev_ca_state = state;
406 
407 	if (bbr->packet_conservation) {
408 		*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
409 		return true;	/* yes, using packet conservation */
410 	}
411 	*new_cwnd = cwnd;
412 	return false;
413 }
414 
415 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
416  * has drawn us down below target), or snap down to target if we're above it.
417  */
418 static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
419 			 u32 acked, u32 bw, int gain)
420 {
421 	struct tcp_sock *tp = tcp_sk(sk);
422 	struct bbr *bbr = inet_csk_ca(sk);
423 	u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
424 
425 	if (!acked)
426 		goto done;  /* no packet fully ACKed; just apply caps */
427 
428 	if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
429 		goto done;
430 
431 	/* If we're below target cwnd, slow start cwnd toward target cwnd. */
432 	target_cwnd = bbr_target_cwnd(sk, bw, gain);
433 	if (bbr_full_bw_reached(sk))  /* only cut cwnd if we filled the pipe */
434 		cwnd = min(cwnd + acked, target_cwnd);
435 	else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
436 		cwnd = cwnd + acked;
437 	cwnd = max(cwnd, bbr_cwnd_min_target);
438 
439 done:
440 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);	/* apply global cap */
441 	if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
442 		tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
443 }
444 
445 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
446 static bool bbr_is_next_cycle_phase(struct sock *sk,
447 				    const struct rate_sample *rs)
448 {
449 	struct tcp_sock *tp = tcp_sk(sk);
450 	struct bbr *bbr = inet_csk_ca(sk);
451 	bool is_full_length =
452 		tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
453 		bbr->min_rtt_us;
454 	u32 inflight, bw;
455 
456 	/* The pacing_gain of 1.0 paces at the estimated bw to try to fully
457 	 * use the pipe without increasing the queue.
458 	 */
459 	if (bbr->pacing_gain == BBR_UNIT)
460 		return is_full_length;		/* just use wall clock time */
461 
462 	inflight = rs->prior_in_flight;  /* what was in-flight before ACK? */
463 	bw = bbr_max_bw(sk);
464 
465 	/* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
466 	 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
467 	 * small (e.g. on a LAN). We do not persist if packets are lost, since
468 	 * a path with small buffers may not hold that much.
469 	 */
470 	if (bbr->pacing_gain > BBR_UNIT)
471 		return is_full_length &&
472 			(rs->losses ||  /* perhaps pacing_gain*BDP won't fit */
473 			 inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
474 
475 	/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
476 	 * probing didn't find more bw. If inflight falls to match BDP then we
477 	 * estimate queue is drained; persisting would underutilize the pipe.
478 	 */
479 	return is_full_length ||
480 		inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
481 }
482 
483 static void bbr_advance_cycle_phase(struct sock *sk)
484 {
485 	struct tcp_sock *tp = tcp_sk(sk);
486 	struct bbr *bbr = inet_csk_ca(sk);
487 
488 	bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
489 	bbr->cycle_mstamp = tp->delivered_mstamp;
490 	bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
491 					    bbr_pacing_gain[bbr->cycle_idx];
492 }
493 
494 /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
495 static void bbr_update_cycle_phase(struct sock *sk,
496 				   const struct rate_sample *rs)
497 {
498 	struct bbr *bbr = inet_csk_ca(sk);
499 
500 	if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
501 		bbr_advance_cycle_phase(sk);
502 }
503 
504 static void bbr_reset_startup_mode(struct sock *sk)
505 {
506 	struct bbr *bbr = inet_csk_ca(sk);
507 
508 	bbr->mode = BBR_STARTUP;
509 	bbr->pacing_gain = bbr_high_gain;
510 	bbr->cwnd_gain	 = bbr_high_gain;
511 }
512 
513 static void bbr_reset_probe_bw_mode(struct sock *sk)
514 {
515 	struct bbr *bbr = inet_csk_ca(sk);
516 
517 	bbr->mode = BBR_PROBE_BW;
518 	bbr->pacing_gain = BBR_UNIT;
519 	bbr->cwnd_gain = bbr_cwnd_gain;
520 	bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
521 	bbr_advance_cycle_phase(sk);	/* flip to next phase of gain cycle */
522 }
523 
524 static void bbr_reset_mode(struct sock *sk)
525 {
526 	if (!bbr_full_bw_reached(sk))
527 		bbr_reset_startup_mode(sk);
528 	else
529 		bbr_reset_probe_bw_mode(sk);
530 }
531 
532 /* Start a new long-term sampling interval. */
533 static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
534 {
535 	struct tcp_sock *tp = tcp_sk(sk);
536 	struct bbr *bbr = inet_csk_ca(sk);
537 
538 	bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
539 	bbr->lt_last_delivered = tp->delivered;
540 	bbr->lt_last_lost = tp->lost;
541 	bbr->lt_rtt_cnt = 0;
542 }
543 
544 /* Completely reset long-term bandwidth sampling. */
545 static void bbr_reset_lt_bw_sampling(struct sock *sk)
546 {
547 	struct bbr *bbr = inet_csk_ca(sk);
548 
549 	bbr->lt_bw = 0;
550 	bbr->lt_use_bw = 0;
551 	bbr->lt_is_sampling = false;
552 	bbr_reset_lt_bw_sampling_interval(sk);
553 }
554 
555 /* Long-term bw sampling interval is done. Estimate whether we're policed. */
556 static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
557 {
558 	struct bbr *bbr = inet_csk_ca(sk);
559 	u32 diff;
560 
561 	if (bbr->lt_bw) {  /* do we have bw from a previous interval? */
562 		/* Is new bw close to the lt_bw from the previous interval? */
563 		diff = abs(bw - bbr->lt_bw);
564 		if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
565 		    (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
566 		     bbr_lt_bw_diff)) {
567 			/* All criteria are met; estimate we're policed. */
568 			bbr->lt_bw = (bw + bbr->lt_bw) >> 1;  /* avg 2 intvls */
569 			bbr->lt_use_bw = 1;
570 			bbr->pacing_gain = BBR_UNIT;  /* try to avoid drops */
571 			bbr->lt_rtt_cnt = 0;
572 			return;
573 		}
574 	}
575 	bbr->lt_bw = bw;
576 	bbr_reset_lt_bw_sampling_interval(sk);
577 }
578 
579 /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
580  * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
581  * explicitly models their policed rate, to reduce unnecessary losses. We
582  * estimate that we're policed if we see 2 consecutive sampling intervals with
583  * consistent throughput and high packet loss. If we think we're being policed,
584  * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
585  */
586 static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
587 {
588 	struct tcp_sock *tp = tcp_sk(sk);
589 	struct bbr *bbr = inet_csk_ca(sk);
590 	u32 lost, delivered;
591 	u64 bw;
592 	u32 t;
593 
594 	if (bbr->lt_use_bw) {	/* already using long-term rate, lt_bw? */
595 		if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
596 		    ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
597 			bbr_reset_lt_bw_sampling(sk);    /* stop using lt_bw */
598 			bbr_reset_probe_bw_mode(sk);  /* restart gain cycling */
599 		}
600 		return;
601 	}
602 
603 	/* Wait for the first loss before sampling, to let the policer exhaust
604 	 * its tokens and estimate the steady-state rate allowed by the policer.
605 	 * Starting samples earlier includes bursts that over-estimate the bw.
606 	 */
607 	if (!bbr->lt_is_sampling) {
608 		if (!rs->losses)
609 			return;
610 		bbr_reset_lt_bw_sampling_interval(sk);
611 		bbr->lt_is_sampling = true;
612 	}
613 
614 	/* To avoid underestimates, reset sampling if we run out of data. */
615 	if (rs->is_app_limited) {
616 		bbr_reset_lt_bw_sampling(sk);
617 		return;
618 	}
619 
620 	if (bbr->round_start)
621 		bbr->lt_rtt_cnt++;	/* count round trips in this interval */
622 	if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
623 		return;		/* sampling interval needs to be longer */
624 	if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
625 		bbr_reset_lt_bw_sampling(sk);  /* interval is too long */
626 		return;
627 	}
628 
629 	/* End sampling interval when a packet is lost, so we estimate the
630 	 * policer tokens were exhausted. Stopping the sampling before the
631 	 * tokens are exhausted under-estimates the policed rate.
632 	 */
633 	if (!rs->losses)
634 		return;
635 
636 	/* Calculate packets lost and delivered in sampling interval. */
637 	lost = tp->lost - bbr->lt_last_lost;
638 	delivered = tp->delivered - bbr->lt_last_delivered;
639 	/* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
640 	if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
641 		return;
642 
643 	/* Find average delivery rate in this sampling interval. */
644 	t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
645 	if ((s32)t < 1)
646 		return;		/* interval is less than one ms, so wait */
647 	/* Check if can multiply without overflow */
648 	if (t >= ~0U / USEC_PER_MSEC) {
649 		bbr_reset_lt_bw_sampling(sk);  /* interval too long; reset */
650 		return;
651 	}
652 	t *= USEC_PER_MSEC;
653 	bw = (u64)delivered * BW_UNIT;
654 	do_div(bw, t);
655 	bbr_lt_bw_interval_done(sk, bw);
656 }
657 
658 /* Estimate the bandwidth based on how fast packets are delivered */
659 static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
660 {
661 	struct tcp_sock *tp = tcp_sk(sk);
662 	struct bbr *bbr = inet_csk_ca(sk);
663 	u64 bw;
664 
665 	bbr->round_start = 0;
666 	if (rs->delivered < 0 || rs->interval_us <= 0)
667 		return; /* Not a valid observation */
668 
669 	/* See if we've reached the next RTT */
670 	if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
671 		bbr->next_rtt_delivered = tp->delivered;
672 		bbr->rtt_cnt++;
673 		bbr->round_start = 1;
674 		bbr->packet_conservation = 0;
675 	}
676 
677 	bbr_lt_bw_sampling(sk, rs);
678 
679 	/* Divide delivered by the interval to find a (lower bound) bottleneck
680 	 * bandwidth sample. Delivered is in packets and interval_us in uS and
681 	 * ratio will be <<1 for most connections. So delivered is first scaled.
682 	 */
683 	bw = (u64)rs->delivered * BW_UNIT;
684 	do_div(bw, rs->interval_us);
685 
686 	/* If this sample is application-limited, it is likely to have a very
687 	 * low delivered count that represents application behavior rather than
688 	 * the available network rate. Such a sample could drag down estimated
689 	 * bw, causing needless slow-down. Thus, to continue to send at the
690 	 * last measured network rate, we filter out app-limited samples unless
691 	 * they describe the path bw at least as well as our bw model.
692 	 *
693 	 * So the goal during app-limited phase is to proceed with the best
694 	 * network rate no matter how long. We automatically leave this
695 	 * phase when app writes faster than the network can deliver :)
696 	 */
697 	if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
698 		/* Incorporate new sample into our max bw filter. */
699 		minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
700 	}
701 }
702 
703 /* Estimate when the pipe is full, using the change in delivery rate: BBR
704  * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
705  * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
706  * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
707  * higher rwin, 3: we get higher delivery rate samples. Or transient
708  * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
709  * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
710  */
711 static void bbr_check_full_bw_reached(struct sock *sk,
712 				      const struct rate_sample *rs)
713 {
714 	struct bbr *bbr = inet_csk_ca(sk);
715 	u32 bw_thresh;
716 
717 	if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
718 		return;
719 
720 	bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
721 	if (bbr_max_bw(sk) >= bw_thresh) {
722 		bbr->full_bw = bbr_max_bw(sk);
723 		bbr->full_bw_cnt = 0;
724 		return;
725 	}
726 	++bbr->full_bw_cnt;
727 	bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
728 }
729 
730 /* If pipe is probably full, drain the queue and then enter steady-state. */
731 static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
732 {
733 	struct bbr *bbr = inet_csk_ca(sk);
734 
735 	if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
736 		bbr->mode = BBR_DRAIN;	/* drain queue we created */
737 		bbr->pacing_gain = bbr_drain_gain;	/* pace slow to drain */
738 		bbr->cwnd_gain = bbr_high_gain;	/* maintain cwnd */
739 		tcp_sk(sk)->snd_ssthresh =
740 				bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
741 	}	/* fall through to check if in-flight is already small: */
742 	if (bbr->mode == BBR_DRAIN &&
743 	    tcp_packets_in_flight(tcp_sk(sk)) <=
744 	    bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
745 		bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
746 }
747 
748 static void bbr_check_probe_rtt_done(struct sock *sk)
749 {
750 	struct tcp_sock *tp = tcp_sk(sk);
751 	struct bbr *bbr = inet_csk_ca(sk);
752 
753 	if (!(bbr->probe_rtt_done_stamp &&
754 	      after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
755 		return;
756 
757 	bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
758 	tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
759 	bbr_reset_mode(sk);
760 }
761 
762 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
763  * periodically drain the bottleneck queue, to converge to measure the true
764  * min_rtt (unloaded propagation delay). This allows the flows to keep queues
765  * small (reducing queuing delay and packet loss) and achieve fairness among
766  * BBR flows.
767  *
768  * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
769  * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
770  * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
771  * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
772  * re-enter the previous mode. BBR uses 200ms to approximately bound the
773  * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
774  *
775  * Note that flows need only pay 2% if they are busy sending over the last 10
776  * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
777  * natural silences or low-rate periods within 10 seconds where the rate is low
778  * enough for long enough to drain its queue in the bottleneck. We pick up
779  * these min RTT measurements opportunistically with our min_rtt filter. :-)
780  */
781 static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
782 {
783 	struct tcp_sock *tp = tcp_sk(sk);
784 	struct bbr *bbr = inet_csk_ca(sk);
785 	bool filter_expired;
786 
787 	/* Track min RTT seen in the min_rtt_win_sec filter window: */
788 	filter_expired = after(tcp_jiffies32,
789 			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
790 	if (rs->rtt_us >= 0 &&
791 	    (rs->rtt_us <= bbr->min_rtt_us ||
792 	     (filter_expired && !rs->is_ack_delayed))) {
793 		bbr->min_rtt_us = rs->rtt_us;
794 		bbr->min_rtt_stamp = tcp_jiffies32;
795 	}
796 
797 	if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
798 	    !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
799 		bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
800 		bbr->pacing_gain = BBR_UNIT;
801 		bbr->cwnd_gain = BBR_UNIT;
802 		bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
803 		bbr->probe_rtt_done_stamp = 0;
804 	}
805 
806 	if (bbr->mode == BBR_PROBE_RTT) {
807 		/* Ignore low rate samples during this mode. */
808 		tp->app_limited =
809 			(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
810 		/* Maintain min packets in flight for max(200 ms, 1 round). */
811 		if (!bbr->probe_rtt_done_stamp &&
812 		    tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
813 			bbr->probe_rtt_done_stamp = tcp_jiffies32 +
814 				msecs_to_jiffies(bbr_probe_rtt_mode_ms);
815 			bbr->probe_rtt_round_done = 0;
816 			bbr->next_rtt_delivered = tp->delivered;
817 		} else if (bbr->probe_rtt_done_stamp) {
818 			if (bbr->round_start)
819 				bbr->probe_rtt_round_done = 1;
820 			if (bbr->probe_rtt_round_done)
821 				bbr_check_probe_rtt_done(sk);
822 		}
823 	}
824 	/* Restart after idle ends only once we process a new S/ACK for data */
825 	if (rs->delivered > 0)
826 		bbr->idle_restart = 0;
827 }
828 
829 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
830 {
831 	bbr_update_bw(sk, rs);
832 	bbr_update_cycle_phase(sk, rs);
833 	bbr_check_full_bw_reached(sk, rs);
834 	bbr_check_drain(sk, rs);
835 	bbr_update_min_rtt(sk, rs);
836 }
837 
838 static void bbr_main(struct sock *sk, const struct rate_sample *rs)
839 {
840 	struct bbr *bbr = inet_csk_ca(sk);
841 	u32 bw;
842 
843 	bbr_update_model(sk, rs);
844 
845 	bw = bbr_bw(sk);
846 	bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
847 	bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
848 }
849 
850 static void bbr_init(struct sock *sk)
851 {
852 	struct tcp_sock *tp = tcp_sk(sk);
853 	struct bbr *bbr = inet_csk_ca(sk);
854 
855 	bbr->prior_cwnd = 0;
856 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
857 	bbr->rtt_cnt = 0;
858 	bbr->next_rtt_delivered = 0;
859 	bbr->prev_ca_state = TCP_CA_Open;
860 	bbr->packet_conservation = 0;
861 
862 	bbr->probe_rtt_done_stamp = 0;
863 	bbr->probe_rtt_round_done = 0;
864 	bbr->min_rtt_us = tcp_min_rtt(tp);
865 	bbr->min_rtt_stamp = tcp_jiffies32;
866 
867 	minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
868 
869 	bbr->has_seen_rtt = 0;
870 	bbr_init_pacing_rate_from_rtt(sk);
871 
872 	bbr->round_start = 0;
873 	bbr->idle_restart = 0;
874 	bbr->full_bw_reached = 0;
875 	bbr->full_bw = 0;
876 	bbr->full_bw_cnt = 0;
877 	bbr->cycle_mstamp = 0;
878 	bbr->cycle_idx = 0;
879 	bbr_reset_lt_bw_sampling(sk);
880 	bbr_reset_startup_mode(sk);
881 
882 	cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
883 }
884 
885 static u32 bbr_sndbuf_expand(struct sock *sk)
886 {
887 	/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
888 	return 3;
889 }
890 
891 /* In theory BBR does not need to undo the cwnd since it does not
892  * always reduce cwnd on losses (see bbr_main()). Keep it for now.
893  */
894 static u32 bbr_undo_cwnd(struct sock *sk)
895 {
896 	struct bbr *bbr = inet_csk_ca(sk);
897 
898 	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
899 	bbr->full_bw_cnt = 0;
900 	bbr_reset_lt_bw_sampling(sk);
901 	return tcp_sk(sk)->snd_cwnd;
902 }
903 
904 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
905 static u32 bbr_ssthresh(struct sock *sk)
906 {
907 	bbr_save_cwnd(sk);
908 	return tcp_sk(sk)->snd_ssthresh;
909 }
910 
911 static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
912 			   union tcp_cc_info *info)
913 {
914 	if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
915 	    ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
916 		struct tcp_sock *tp = tcp_sk(sk);
917 		struct bbr *bbr = inet_csk_ca(sk);
918 		u64 bw = bbr_bw(sk);
919 
920 		bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
921 		memset(&info->bbr, 0, sizeof(info->bbr));
922 		info->bbr.bbr_bw_lo		= (u32)bw;
923 		info->bbr.bbr_bw_hi		= (u32)(bw >> 32);
924 		info->bbr.bbr_min_rtt		= bbr->min_rtt_us;
925 		info->bbr.bbr_pacing_gain	= bbr->pacing_gain;
926 		info->bbr.bbr_cwnd_gain		= bbr->cwnd_gain;
927 		*attr = INET_DIAG_BBRINFO;
928 		return sizeof(info->bbr);
929 	}
930 	return 0;
931 }
932 
933 static void bbr_set_state(struct sock *sk, u8 new_state)
934 {
935 	struct bbr *bbr = inet_csk_ca(sk);
936 
937 	if (new_state == TCP_CA_Loss) {
938 		struct rate_sample rs = { .losses = 1 };
939 
940 		bbr->prev_ca_state = TCP_CA_Loss;
941 		bbr->full_bw = 0;
942 		bbr->round_start = 1;	/* treat RTO like end of a round */
943 		bbr_lt_bw_sampling(sk, &rs);
944 	}
945 }
946 
947 static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
948 	.flags		= TCP_CONG_NON_RESTRICTED,
949 	.name		= "bbr",
950 	.owner		= THIS_MODULE,
951 	.init		= bbr_init,
952 	.cong_control	= bbr_main,
953 	.sndbuf_expand	= bbr_sndbuf_expand,
954 	.undo_cwnd	= bbr_undo_cwnd,
955 	.cwnd_event	= bbr_cwnd_event,
956 	.ssthresh	= bbr_ssthresh,
957 	.min_tso_segs	= bbr_min_tso_segs,
958 	.get_info	= bbr_get_info,
959 	.set_state	= bbr_set_state,
960 };
961 
962 static int __init bbr_register(void)
963 {
964 	BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
965 	return tcp_register_congestion_control(&tcp_bbr_cong_ops);
966 }
967 
968 static void __exit bbr_unregister(void)
969 {
970 	tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
971 }
972 
973 module_init(bbr_register);
974 module_exit(bbr_unregister);
975 
976 MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
977 MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
978 MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
979 MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
980 MODULE_LICENSE("Dual BSD/GPL");
981 MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
982