xref: /linux/net/ipv4/tcp_bbr.c (revision 232aa8ec3ed979d4716891540c03a806ecab0c37)
10f8782eaSNeal Cardwell /* Bottleneck Bandwidth and RTT (BBR) congestion control
20f8782eaSNeal Cardwell  *
30f8782eaSNeal Cardwell  * BBR congestion control computes the sending rate based on the delivery
40f8782eaSNeal Cardwell  * rate (throughput) estimated from ACKs. In a nutshell:
50f8782eaSNeal Cardwell  *
60f8782eaSNeal Cardwell  *   On each ACK, update our model of the network path:
70f8782eaSNeal Cardwell  *      bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
80f8782eaSNeal Cardwell  *      min_rtt = windowed_min(rtt, 10 seconds)
90f8782eaSNeal Cardwell  *   pacing_rate = pacing_gain * bottleneck_bandwidth
100f8782eaSNeal Cardwell  *   cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
110f8782eaSNeal Cardwell  *
120f8782eaSNeal Cardwell  * The core algorithm does not react directly to packet losses or delays,
130f8782eaSNeal Cardwell  * although BBR may adjust the size of next send per ACK when loss is
140f8782eaSNeal Cardwell  * observed, or adjust the sending rate if it estimates there is a
150f8782eaSNeal Cardwell  * traffic policer, in order to keep the drop rate reasonable.
160f8782eaSNeal Cardwell  *
179b9375b5SNeal Cardwell  * Here is a state transition diagram for BBR:
189b9375b5SNeal Cardwell  *
199b9375b5SNeal Cardwell  *             |
209b9375b5SNeal Cardwell  *             V
219b9375b5SNeal Cardwell  *    +---> STARTUP  ----+
229b9375b5SNeal Cardwell  *    |        |         |
239b9375b5SNeal Cardwell  *    |        V         |
249b9375b5SNeal Cardwell  *    |      DRAIN   ----+
259b9375b5SNeal Cardwell  *    |        |         |
269b9375b5SNeal Cardwell  *    |        V         |
279b9375b5SNeal Cardwell  *    +---> PROBE_BW ----+
289b9375b5SNeal Cardwell  *    |      ^    |      |
299b9375b5SNeal Cardwell  *    |      |    |      |
309b9375b5SNeal Cardwell  *    |      +----+      |
319b9375b5SNeal Cardwell  *    |                  |
329b9375b5SNeal Cardwell  *    +---- PROBE_RTT <--+
339b9375b5SNeal Cardwell  *
349b9375b5SNeal Cardwell  * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
359b9375b5SNeal Cardwell  * When it estimates the pipe is full, it enters DRAIN to drain the queue.
369b9375b5SNeal Cardwell  * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
379b9375b5SNeal Cardwell  * A long-lived BBR flow spends the vast majority of its time remaining
389b9375b5SNeal Cardwell  * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
399b9375b5SNeal Cardwell  * in a fair manner, with a small, bounded queue. *If* a flow has been
409b9375b5SNeal Cardwell  * continuously sending for the entire min_rtt window, and hasn't seen an RTT
419b9375b5SNeal Cardwell  * sample that matches or decreases its min_rtt estimate for 10 seconds, then
429b9375b5SNeal Cardwell  * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
439b9375b5SNeal Cardwell  * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
449b9375b5SNeal Cardwell  * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
459b9375b5SNeal Cardwell  * otherwise we enter STARTUP to try to fill the pipe.
469b9375b5SNeal Cardwell  *
470f8782eaSNeal Cardwell  * BBR is described in detail in:
480f8782eaSNeal Cardwell  *   "BBR: Congestion-Based Congestion Control",
490f8782eaSNeal Cardwell  *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
500f8782eaSNeal Cardwell  *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
510f8782eaSNeal Cardwell  *
520f8782eaSNeal Cardwell  * There is a public e-mail list for discussing BBR development and testing:
530f8782eaSNeal Cardwell  *   https://groups.google.com/forum/#!forum/bbr-dev
540f8782eaSNeal Cardwell  *
55218af599SEric Dumazet  * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
56218af599SEric Dumazet  * otherwise TCP stack falls back to an internal pacing using one high
57218af599SEric Dumazet  * resolution timer per TCP socket and may use more resources.
580f8782eaSNeal Cardwell  */
590f8782eaSNeal Cardwell #include <linux/module.h>
600f8782eaSNeal Cardwell #include <net/tcp.h>
610f8782eaSNeal Cardwell #include <linux/inet_diag.h>
620f8782eaSNeal Cardwell #include <linux/inet.h>
630f8782eaSNeal Cardwell #include <linux/random.h>
640f8782eaSNeal Cardwell #include <linux/win_minmax.h>
650f8782eaSNeal Cardwell 
660f8782eaSNeal Cardwell /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
670f8782eaSNeal Cardwell  * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
680f8782eaSNeal Cardwell  * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
690f8782eaSNeal Cardwell  * Since the minimum window is >=4 packets, the lower bound isn't
700f8782eaSNeal Cardwell  * an issue. The upper bound isn't an issue with existing technologies.
710f8782eaSNeal Cardwell  */
720f8782eaSNeal Cardwell #define BW_SCALE 24
730f8782eaSNeal Cardwell #define BW_UNIT (1 << BW_SCALE)
740f8782eaSNeal Cardwell 
750f8782eaSNeal Cardwell #define BBR_SCALE 8	/* scaling factor for fractions in BBR (e.g. gains) */
760f8782eaSNeal Cardwell #define BBR_UNIT (1 << BBR_SCALE)
770f8782eaSNeal Cardwell 
780f8782eaSNeal Cardwell /* BBR has the following modes for deciding how fast to send: */
790f8782eaSNeal Cardwell enum bbr_mode {
800f8782eaSNeal Cardwell 	BBR_STARTUP,	/* ramp up sending rate rapidly to fill pipe */
810f8782eaSNeal Cardwell 	BBR_DRAIN,	/* drain any queue created during startup */
820f8782eaSNeal Cardwell 	BBR_PROBE_BW,	/* discover, share bw: pace around estimated bw */
839b9375b5SNeal Cardwell 	BBR_PROBE_RTT,	/* cut inflight to min to probe min_rtt */
840f8782eaSNeal Cardwell };
850f8782eaSNeal Cardwell 
860f8782eaSNeal Cardwell /* BBR congestion control block */
870f8782eaSNeal Cardwell struct bbr {
880f8782eaSNeal Cardwell 	u32	min_rtt_us;	        /* min RTT in min_rtt_win_sec window */
890f8782eaSNeal Cardwell 	u32	min_rtt_stamp;	        /* timestamp of min_rtt_us */
900f8782eaSNeal Cardwell 	u32	probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
910f8782eaSNeal Cardwell 	struct minmax bw;	/* Max recent delivery rate in pkts/uS << 24 */
920f8782eaSNeal Cardwell 	u32	rtt_cnt;	    /* count of packet-timed rounds elapsed */
930f8782eaSNeal Cardwell 	u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
949a568de4SEric Dumazet 	u64	cycle_mstamp;	     /* time of this cycle phase start */
950f8782eaSNeal Cardwell 	u32     mode:3,		     /* current bbr_mode in state machine */
960f8782eaSNeal Cardwell 		prev_ca_state:3,     /* CA state on previous ACK */
970f8782eaSNeal Cardwell 		packet_conservation:1,  /* use packet conservation? */
980f8782eaSNeal Cardwell 		round_start:1,	     /* start of packet-timed tx->ack round? */
990f8782eaSNeal Cardwell 		idle_restart:1,	     /* restarting after idle? */
1000f8782eaSNeal Cardwell 		probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
101fb998862SKevin Yang 		unused:13,
1020f8782eaSNeal Cardwell 		lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
1030f8782eaSNeal Cardwell 		lt_rtt_cnt:7,	     /* round trips in long-term interval */
1040f8782eaSNeal Cardwell 		lt_use_bw:1;	     /* use lt_bw as our bw estimate? */
1050f8782eaSNeal Cardwell 	u32	lt_bw;		     /* LT est delivery rate in pkts/uS << 24 */
1060f8782eaSNeal Cardwell 	u32	lt_last_delivered;   /* LT intvl start: tp->delivered */
1070f8782eaSNeal Cardwell 	u32	lt_last_stamp;	     /* LT intvl start: tp->delivered_mstamp */
1080f8782eaSNeal Cardwell 	u32	lt_last_lost;	     /* LT intvl start: tp->lost */
1090f8782eaSNeal Cardwell 	u32	pacing_gain:10,	/* current gain for setting pacing rate */
1100f8782eaSNeal Cardwell 		cwnd_gain:10,	/* current gain for setting cwnd */
111c589e69bSNeal Cardwell 		full_bw_reached:1,   /* reached full bw in Startup? */
112c589e69bSNeal Cardwell 		full_bw_cnt:2,	/* number of rounds without large bw gains */
1130f8782eaSNeal Cardwell 		cycle_idx:3,	/* current index in pacing_gain cycle array */
11432984565SNeal Cardwell 		has_seen_rtt:1, /* have we seen an RTT sample yet? */
11532984565SNeal Cardwell 		unused_b:5;
1160f8782eaSNeal Cardwell 	u32	prior_cwnd;	/* prior cwnd upon entering loss recovery */
1170f8782eaSNeal Cardwell 	u32	full_bw;	/* recent bw, to estimate if pipe is full */
1180f8782eaSNeal Cardwell };
1190f8782eaSNeal Cardwell 
1200f8782eaSNeal Cardwell #define CYCLE_LEN	8	/* number of phases in a pacing gain cycle */
1210f8782eaSNeal Cardwell 
1220f8782eaSNeal Cardwell /* Window length of bw filter (in rounds): */
1230f8782eaSNeal Cardwell static const int bbr_bw_rtts = CYCLE_LEN + 2;
1240f8782eaSNeal Cardwell /* Window length of min_rtt filter (in sec): */
1250f8782eaSNeal Cardwell static const u32 bbr_min_rtt_win_sec = 10;
1260f8782eaSNeal Cardwell /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
1270f8782eaSNeal Cardwell static const u32 bbr_probe_rtt_mode_ms = 200;
1280f8782eaSNeal Cardwell /* Skip TSO below the following bandwidth (bits/sec): */
1290f8782eaSNeal Cardwell static const int bbr_min_tso_rate = 1200000;
1300f8782eaSNeal Cardwell 
1311106a5adSNeal Cardwell /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
1321106a5adSNeal Cardwell  * In order to help drive the network toward lower queues and low latency while
1331106a5adSNeal Cardwell  * maintaining high utilization, the average pacing rate aims to be slightly
1341106a5adSNeal Cardwell  * lower than the estimated bandwidth. This is an important aspect of the
1351106a5adSNeal Cardwell  * design.
1361106a5adSNeal Cardwell  */
13797ec3eb3SNeal Cardwell static const int bbr_pacing_margin_percent = 1;
138ab408b6dSEric Dumazet 
1390f8782eaSNeal Cardwell /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
1400f8782eaSNeal Cardwell  * that will allow a smoothly increasing pacing rate that will double each RTT
1410f8782eaSNeal Cardwell  * and send the same number of packets per RTT that an un-paced, slow-starting
1420f8782eaSNeal Cardwell  * Reno or CUBIC flow would:
1430f8782eaSNeal Cardwell  */
1440f8782eaSNeal Cardwell static const int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
1450f8782eaSNeal Cardwell /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
1460f8782eaSNeal Cardwell  * the queue created in BBR_STARTUP in a single round:
1470f8782eaSNeal Cardwell  */
1480f8782eaSNeal Cardwell static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
1490f8782eaSNeal Cardwell /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
1500f8782eaSNeal Cardwell static const int bbr_cwnd_gain  = BBR_UNIT * 2;
1510f8782eaSNeal Cardwell /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
1520f8782eaSNeal Cardwell static const int bbr_pacing_gain[] = {
1530f8782eaSNeal Cardwell 	BBR_UNIT * 5 / 4,	/* probe for more available bw */
1540f8782eaSNeal Cardwell 	BBR_UNIT * 3 / 4,	/* drain queue and/or yield bw to other flows */
1550f8782eaSNeal Cardwell 	BBR_UNIT, BBR_UNIT, BBR_UNIT,	/* cruise at 1.0*bw to utilize pipe, */
1560f8782eaSNeal Cardwell 	BBR_UNIT, BBR_UNIT, BBR_UNIT	/* without creating excess queue... */
1570f8782eaSNeal Cardwell };
1580f8782eaSNeal Cardwell /* Randomize the starting gain cycling phase over N phases: */
1590f8782eaSNeal Cardwell static const u32 bbr_cycle_rand = 7;
1600f8782eaSNeal Cardwell 
1610f8782eaSNeal Cardwell /* Try to keep at least this many packets in flight, if things go smoothly. For
1620f8782eaSNeal Cardwell  * smooth functioning, a sliding window protocol ACKing every other packet
1630f8782eaSNeal Cardwell  * needs at least 4 packets in flight:
1640f8782eaSNeal Cardwell  */
1650f8782eaSNeal Cardwell static const u32 bbr_cwnd_min_target = 4;
1660f8782eaSNeal Cardwell 
1670f8782eaSNeal Cardwell /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
1680f8782eaSNeal Cardwell /* If bw has increased significantly (1.25x), there may be more bw available: */
1690f8782eaSNeal Cardwell static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
1700f8782eaSNeal Cardwell /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
1710f8782eaSNeal Cardwell static const u32 bbr_full_bw_cnt = 3;
1720f8782eaSNeal Cardwell 
1730f8782eaSNeal Cardwell /* "long-term" ("LT") bandwidth estimator parameters... */
1740f8782eaSNeal Cardwell /* The minimum number of rounds in an LT bw sampling interval: */
1750f8782eaSNeal Cardwell static const u32 bbr_lt_intvl_min_rtts = 4;
1760f8782eaSNeal Cardwell /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
1770f8782eaSNeal Cardwell static const u32 bbr_lt_loss_thresh = 50;
1780f8782eaSNeal Cardwell /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
1790f8782eaSNeal Cardwell static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
1800f8782eaSNeal Cardwell /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
1810f8782eaSNeal Cardwell static const u32 bbr_lt_bw_diff = 4000 / 8;
1820f8782eaSNeal Cardwell /* If we estimate we're policed, use lt_bw for this many round trips: */
1830f8782eaSNeal Cardwell static const u32 bbr_lt_bw_max_rtts = 48;
1840f8782eaSNeal Cardwell 
1855490b32dSKevin Yang static void bbr_check_probe_rtt_done(struct sock *sk);
1865490b32dSKevin Yang 
1870f8782eaSNeal Cardwell /* Do we estimate that STARTUP filled the pipe? */
1880f8782eaSNeal Cardwell static bool bbr_full_bw_reached(const struct sock *sk)
1890f8782eaSNeal Cardwell {
1900f8782eaSNeal Cardwell 	const struct bbr *bbr = inet_csk_ca(sk);
1910f8782eaSNeal Cardwell 
192c589e69bSNeal Cardwell 	return bbr->full_bw_reached;
1930f8782eaSNeal Cardwell }
1940f8782eaSNeal Cardwell 
1950f8782eaSNeal Cardwell /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
1960f8782eaSNeal Cardwell static u32 bbr_max_bw(const struct sock *sk)
1970f8782eaSNeal Cardwell {
1980f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
1990f8782eaSNeal Cardwell 
2000f8782eaSNeal Cardwell 	return minmax_get(&bbr->bw);
2010f8782eaSNeal Cardwell }
2020f8782eaSNeal Cardwell 
2030f8782eaSNeal Cardwell /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
2040f8782eaSNeal Cardwell static u32 bbr_bw(const struct sock *sk)
2050f8782eaSNeal Cardwell {
2060f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
2070f8782eaSNeal Cardwell 
2080f8782eaSNeal Cardwell 	return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
2090f8782eaSNeal Cardwell }
2100f8782eaSNeal Cardwell 
2110f8782eaSNeal Cardwell /* Return rate in bytes per second, optionally with a gain.
2120f8782eaSNeal Cardwell  * The order here is chosen carefully to avoid overflow of u64. This should
2130f8782eaSNeal Cardwell  * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
2140f8782eaSNeal Cardwell  */
2150f8782eaSNeal Cardwell static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
2160f8782eaSNeal Cardwell {
217cadefe5fSEric Dumazet 	unsigned int mss = tcp_sk(sk)->mss_cache;
218cadefe5fSEric Dumazet 
219cadefe5fSEric Dumazet 	rate *= mss;
2200f8782eaSNeal Cardwell 	rate *= gain;
2210f8782eaSNeal Cardwell 	rate >>= BBR_SCALE;
22297ec3eb3SNeal Cardwell 	rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
2230f8782eaSNeal Cardwell 	return rate >> BW_SCALE;
2240f8782eaSNeal Cardwell }
2250f8782eaSNeal Cardwell 
226f19fd62dSNeal Cardwell /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
22776a9ebe8SEric Dumazet static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
228f19fd62dSNeal Cardwell {
229f19fd62dSNeal Cardwell 	u64 rate = bw;
230f19fd62dSNeal Cardwell 
231f19fd62dSNeal Cardwell 	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
232f19fd62dSNeal Cardwell 	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
233f19fd62dSNeal Cardwell 	return rate;
234f19fd62dSNeal Cardwell }
235f19fd62dSNeal Cardwell 
23679135b89SNeal Cardwell /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
23779135b89SNeal Cardwell static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
23879135b89SNeal Cardwell {
23979135b89SNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
24032984565SNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
24179135b89SNeal Cardwell 	u64 bw;
24279135b89SNeal Cardwell 	u32 rtt_us;
24379135b89SNeal Cardwell 
24479135b89SNeal Cardwell 	if (tp->srtt_us) {		/* any RTT sample yet? */
24579135b89SNeal Cardwell 		rtt_us = max(tp->srtt_us >> 3, 1U);
24632984565SNeal Cardwell 		bbr->has_seen_rtt = 1;
24779135b89SNeal Cardwell 	} else {			 /* no RTT sample yet */
24879135b89SNeal Cardwell 		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
24979135b89SNeal Cardwell 	}
25079135b89SNeal Cardwell 	bw = (u64)tp->snd_cwnd * BW_UNIT;
25179135b89SNeal Cardwell 	do_div(bw, rtt_us);
25279135b89SNeal Cardwell 	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
25379135b89SNeal Cardwell }
25479135b89SNeal Cardwell 
2551106a5adSNeal Cardwell /* Pace using current bw estimate and a gain factor. */
2560f8782eaSNeal Cardwell static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
2570f8782eaSNeal Cardwell {
25832984565SNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
25932984565SNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
26076a9ebe8SEric Dumazet 	unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
2610f8782eaSNeal Cardwell 
26232984565SNeal Cardwell 	if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
26332984565SNeal Cardwell 		bbr_init_pacing_rate_from_rtt(sk);
2644aea287eSNeal Cardwell 	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
2650f8782eaSNeal Cardwell 		sk->sk_pacing_rate = rate;
2660f8782eaSNeal Cardwell }
2670f8782eaSNeal Cardwell 
268dcb8c9b4SEric Dumazet /* override sysctl_tcp_min_tso_segs */
269dcb8c9b4SEric Dumazet static u32 bbr_min_tso_segs(struct sock *sk)
2700f8782eaSNeal Cardwell {
271dcb8c9b4SEric Dumazet 	return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
2720f8782eaSNeal Cardwell }
2730f8782eaSNeal Cardwell 
27471abf467SEric Dumazet static u32 bbr_tso_segs_goal(struct sock *sk)
2750f8782eaSNeal Cardwell {
2760f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
277dcb8c9b4SEric Dumazet 	u32 segs, bytes;
2780f8782eaSNeal Cardwell 
279dcb8c9b4SEric Dumazet 	/* Sort of tcp_tso_autosize() but ignoring
280dcb8c9b4SEric Dumazet 	 * driver provided sk_gso_max_size.
281dcb8c9b4SEric Dumazet 	 */
28276a9ebe8SEric Dumazet 	bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
283dcb8c9b4SEric Dumazet 		      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
284dcb8c9b4SEric Dumazet 	segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
285dcb8c9b4SEric Dumazet 
28671abf467SEric Dumazet 	return min(segs, 0x7FU);
2870f8782eaSNeal Cardwell }
2880f8782eaSNeal Cardwell 
2890f8782eaSNeal Cardwell /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
2900f8782eaSNeal Cardwell static void bbr_save_cwnd(struct sock *sk)
2910f8782eaSNeal Cardwell {
2920f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
2930f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
2940f8782eaSNeal Cardwell 
2950f8782eaSNeal Cardwell 	if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
2960f8782eaSNeal Cardwell 		bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
2970f8782eaSNeal Cardwell 	else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
2980f8782eaSNeal Cardwell 		bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
2990f8782eaSNeal Cardwell }
3000f8782eaSNeal Cardwell 
3010f8782eaSNeal Cardwell static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
3020f8782eaSNeal Cardwell {
3030f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
3040f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
3050f8782eaSNeal Cardwell 
3060f8782eaSNeal Cardwell 	if (event == CA_EVENT_TX_START && tp->app_limited) {
3070f8782eaSNeal Cardwell 		bbr->idle_restart = 1;
3080f8782eaSNeal Cardwell 		/* Avoid pointless buffer overflows: pace at est. bw if we don't
3090f8782eaSNeal Cardwell 		 * need more speed (we're restarting from idle and app-limited).
3100f8782eaSNeal Cardwell 		 */
3110f8782eaSNeal Cardwell 		if (bbr->mode == BBR_PROBE_BW)
3120f8782eaSNeal Cardwell 			bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
3135490b32dSKevin Yang 		else if (bbr->mode == BBR_PROBE_RTT)
3145490b32dSKevin Yang 			bbr_check_probe_rtt_done(sk);
3150f8782eaSNeal Cardwell 	}
3160f8782eaSNeal Cardwell }
3170f8782eaSNeal Cardwell 
318*232aa8ecSPriyaranjan Jha /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
3190f8782eaSNeal Cardwell  *
320*232aa8ecSPriyaranjan Jha  * bdp = bw * min_rtt * gain
3210f8782eaSNeal Cardwell  *
3220f8782eaSNeal Cardwell  * The key factor, gain, controls the amount of queue. While a small gain
3230f8782eaSNeal Cardwell  * builds a smaller queue, it becomes more vulnerable to noise in RTT
3240f8782eaSNeal Cardwell  * measurements (e.g., delayed ACKs or other ACK compression effects). This
3250f8782eaSNeal Cardwell  * noise may cause BBR to under-estimate the rate.
3260f8782eaSNeal Cardwell  */
327*232aa8ecSPriyaranjan Jha static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
3280f8782eaSNeal Cardwell {
3290f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
330*232aa8ecSPriyaranjan Jha 	u32 bdp;
3310f8782eaSNeal Cardwell 	u64 w;
3320f8782eaSNeal Cardwell 
3330f8782eaSNeal Cardwell 	/* If we've never had a valid RTT sample, cap cwnd at the initial
3340f8782eaSNeal Cardwell 	 * default. This should only happen when the connection is not using TCP
3350f8782eaSNeal Cardwell 	 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
3360f8782eaSNeal Cardwell 	 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
3370f8782eaSNeal Cardwell 	 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
3380f8782eaSNeal Cardwell 	 */
3390f8782eaSNeal Cardwell 	if (unlikely(bbr->min_rtt_us == ~0U))	 /* no valid RTT samples yet? */
3400f8782eaSNeal Cardwell 		return TCP_INIT_CWND;  /* be safe: cap at default initial cwnd*/
3410f8782eaSNeal Cardwell 
3420f8782eaSNeal Cardwell 	w = (u64)bw * bbr->min_rtt_us;
3430f8782eaSNeal Cardwell 
3440f8782eaSNeal Cardwell 	/* Apply a gain to the given value, then remove the BW_SCALE shift. */
345*232aa8ecSPriyaranjan Jha 	bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
346*232aa8ecSPriyaranjan Jha 
347*232aa8ecSPriyaranjan Jha 	return bdp;
348*232aa8ecSPriyaranjan Jha }
349*232aa8ecSPriyaranjan Jha 
350*232aa8ecSPriyaranjan Jha /* To achieve full performance in high-speed paths, we budget enough cwnd to
351*232aa8ecSPriyaranjan Jha  * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
352*232aa8ecSPriyaranjan Jha  *   - one skb in sending host Qdisc,
353*232aa8ecSPriyaranjan Jha  *   - one skb in sending host TSO/GSO engine
354*232aa8ecSPriyaranjan Jha  *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
355*232aa8ecSPriyaranjan Jha  * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
356*232aa8ecSPriyaranjan Jha  * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
357*232aa8ecSPriyaranjan Jha  * which allows 2 outstanding 2-packet sequences, to try to keep pipe
358*232aa8ecSPriyaranjan Jha  * full even with ACK-every-other-packet delayed ACKs.
359*232aa8ecSPriyaranjan Jha  */
360*232aa8ecSPriyaranjan Jha static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
361*232aa8ecSPriyaranjan Jha {
362*232aa8ecSPriyaranjan Jha 	struct bbr *bbr = inet_csk_ca(sk);
3630f8782eaSNeal Cardwell 
3640f8782eaSNeal Cardwell 	/* Allow enough full-sized skbs in flight to utilize end systems. */
36571abf467SEric Dumazet 	cwnd += 3 * bbr_tso_segs_goal(sk);
3660f8782eaSNeal Cardwell 
3670f8782eaSNeal Cardwell 	/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
3680f8782eaSNeal Cardwell 	cwnd = (cwnd + 1) & ~1U;
3690f8782eaSNeal Cardwell 
370383d4709SNeal Cardwell 	/* Ensure gain cycling gets inflight above BDP even for small BDPs. */
371383d4709SNeal Cardwell 	if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
372383d4709SNeal Cardwell 		cwnd += 2;
373383d4709SNeal Cardwell 
3740f8782eaSNeal Cardwell 	return cwnd;
3750f8782eaSNeal Cardwell }
3760f8782eaSNeal Cardwell 
377*232aa8ecSPriyaranjan Jha /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
378*232aa8ecSPriyaranjan Jha static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
379*232aa8ecSPriyaranjan Jha {
380*232aa8ecSPriyaranjan Jha 	u32 inflight;
381*232aa8ecSPriyaranjan Jha 
382*232aa8ecSPriyaranjan Jha 	inflight = bbr_bdp(sk, bw, gain);
383*232aa8ecSPriyaranjan Jha 	inflight = bbr_quantization_budget(sk, inflight, gain);
384*232aa8ecSPriyaranjan Jha 
385*232aa8ecSPriyaranjan Jha 	return inflight;
386*232aa8ecSPriyaranjan Jha }
387*232aa8ecSPriyaranjan Jha 
388a87c83d5SNeal Cardwell /* With pacing at lower layers, there's often less data "in the network" than
389a87c83d5SNeal Cardwell  * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
390a87c83d5SNeal Cardwell  * we often have several skbs queued in the pacing layer with a pre-scheduled
391a87c83d5SNeal Cardwell  * earliest departure time (EDT). BBR adapts its pacing rate based on the
392a87c83d5SNeal Cardwell  * inflight level that it estimates has already been "baked in" by previous
393a87c83d5SNeal Cardwell  * departure time decisions. We calculate a rough estimate of the number of our
394a87c83d5SNeal Cardwell  * packets that might be in the network at the earliest departure time for the
395a87c83d5SNeal Cardwell  * next skb scheduled:
396a87c83d5SNeal Cardwell  *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
397a87c83d5SNeal Cardwell  * If we're increasing inflight, then we want to know if the transmit of the
398a87c83d5SNeal Cardwell  * EDT skb will push inflight above the target, so inflight_at_edt includes
399a87c83d5SNeal Cardwell  * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
400a87c83d5SNeal Cardwell  * then estimate if inflight will sink too low just before the EDT transmit.
401a87c83d5SNeal Cardwell  */
402a87c83d5SNeal Cardwell static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
403a87c83d5SNeal Cardwell {
404a87c83d5SNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
405a87c83d5SNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
406a87c83d5SNeal Cardwell 	u64 now_ns, edt_ns, interval_us;
407a87c83d5SNeal Cardwell 	u32 interval_delivered, inflight_at_edt;
408a87c83d5SNeal Cardwell 
409a87c83d5SNeal Cardwell 	now_ns = tp->tcp_clock_cache;
410a87c83d5SNeal Cardwell 	edt_ns = max(tp->tcp_wstamp_ns, now_ns);
411a87c83d5SNeal Cardwell 	interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
412a87c83d5SNeal Cardwell 	interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
413a87c83d5SNeal Cardwell 	inflight_at_edt = inflight_now;
414a87c83d5SNeal Cardwell 	if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
415a87c83d5SNeal Cardwell 		inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
416a87c83d5SNeal Cardwell 	if (interval_delivered >= inflight_at_edt)
417a87c83d5SNeal Cardwell 		return 0;
418a87c83d5SNeal Cardwell 	return inflight_at_edt - interval_delivered;
419a87c83d5SNeal Cardwell }
420a87c83d5SNeal Cardwell 
4210f8782eaSNeal Cardwell /* An optimization in BBR to reduce losses: On the first round of recovery, we
4220f8782eaSNeal Cardwell  * follow the packet conservation principle: send P packets per P packets acked.
4230f8782eaSNeal Cardwell  * After that, we slow-start and send at most 2*P packets per P packets acked.
4240f8782eaSNeal Cardwell  * After recovery finishes, or upon undo, we restore the cwnd we had when
4250f8782eaSNeal Cardwell  * recovery started (capped by the target cwnd based on estimated BDP).
4260f8782eaSNeal Cardwell  *
4270f8782eaSNeal Cardwell  * TODO(ycheng/ncardwell): implement a rate-based approach.
4280f8782eaSNeal Cardwell  */
4290f8782eaSNeal Cardwell static bool bbr_set_cwnd_to_recover_or_restore(
4300f8782eaSNeal Cardwell 	struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
4310f8782eaSNeal Cardwell {
4320f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
4330f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
4340f8782eaSNeal Cardwell 	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
4350f8782eaSNeal Cardwell 	u32 cwnd = tp->snd_cwnd;
4360f8782eaSNeal Cardwell 
4370f8782eaSNeal Cardwell 	/* An ACK for P pkts should release at most 2*P packets. We do this
4380f8782eaSNeal Cardwell 	 * in two steps. First, here we deduct the number of lost packets.
4390f8782eaSNeal Cardwell 	 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
4400f8782eaSNeal Cardwell 	 */
4410f8782eaSNeal Cardwell 	if (rs->losses > 0)
4420f8782eaSNeal Cardwell 		cwnd = max_t(s32, cwnd - rs->losses, 1);
4430f8782eaSNeal Cardwell 
4440f8782eaSNeal Cardwell 	if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
4450f8782eaSNeal Cardwell 		/* Starting 1st round of Recovery, so do packet conservation. */
4460f8782eaSNeal Cardwell 		bbr->packet_conservation = 1;
4470f8782eaSNeal Cardwell 		bbr->next_rtt_delivered = tp->delivered;  /* start round now */
4480f8782eaSNeal Cardwell 		/* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
4490f8782eaSNeal Cardwell 		cwnd = tcp_packets_in_flight(tp) + acked;
4500f8782eaSNeal Cardwell 	} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
4510f8782eaSNeal Cardwell 		/* Exiting loss recovery; restore cwnd saved before recovery. */
452fb998862SKevin Yang 		cwnd = max(cwnd, bbr->prior_cwnd);
4530f8782eaSNeal Cardwell 		bbr->packet_conservation = 0;
4540f8782eaSNeal Cardwell 	}
4550f8782eaSNeal Cardwell 	bbr->prev_ca_state = state;
4560f8782eaSNeal Cardwell 
4570f8782eaSNeal Cardwell 	if (bbr->packet_conservation) {
4580f8782eaSNeal Cardwell 		*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
4590f8782eaSNeal Cardwell 		return true;	/* yes, using packet conservation */
4600f8782eaSNeal Cardwell 	}
4610f8782eaSNeal Cardwell 	*new_cwnd = cwnd;
4620f8782eaSNeal Cardwell 	return false;
4630f8782eaSNeal Cardwell }
4640f8782eaSNeal Cardwell 
4650f8782eaSNeal Cardwell /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
4660f8782eaSNeal Cardwell  * has drawn us down below target), or snap down to target if we're above it.
4670f8782eaSNeal Cardwell  */
4680f8782eaSNeal Cardwell static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
4690f8782eaSNeal Cardwell 			 u32 acked, u32 bw, int gain)
4700f8782eaSNeal Cardwell {
4710f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
4720f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
4738e995bf1SKevin Yang 	u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
4740f8782eaSNeal Cardwell 
4750f8782eaSNeal Cardwell 	if (!acked)
4768e995bf1SKevin Yang 		goto done;  /* no packet fully ACKed; just apply caps */
4770f8782eaSNeal Cardwell 
4780f8782eaSNeal Cardwell 	if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
4790f8782eaSNeal Cardwell 		goto done;
4800f8782eaSNeal Cardwell 
4810f8782eaSNeal Cardwell 	/* If we're below target cwnd, slow start cwnd toward target cwnd. */
482*232aa8ecSPriyaranjan Jha 	target_cwnd = bbr_bdp(sk, bw, gain);
483*232aa8ecSPriyaranjan Jha 	target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
4840f8782eaSNeal Cardwell 	if (bbr_full_bw_reached(sk))  /* only cut cwnd if we filled the pipe */
4850f8782eaSNeal Cardwell 		cwnd = min(cwnd + acked, target_cwnd);
4860f8782eaSNeal Cardwell 	else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
4870f8782eaSNeal Cardwell 		cwnd = cwnd + acked;
4880f8782eaSNeal Cardwell 	cwnd = max(cwnd, bbr_cwnd_min_target);
4890f8782eaSNeal Cardwell 
4900f8782eaSNeal Cardwell done:
4910f8782eaSNeal Cardwell 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);	/* apply global cap */
4920f8782eaSNeal Cardwell 	if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
4930f8782eaSNeal Cardwell 		tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
4940f8782eaSNeal Cardwell }
4950f8782eaSNeal Cardwell 
4960f8782eaSNeal Cardwell /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
4970f8782eaSNeal Cardwell static bool bbr_is_next_cycle_phase(struct sock *sk,
4980f8782eaSNeal Cardwell 				    const struct rate_sample *rs)
4990f8782eaSNeal Cardwell {
5000f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
5010f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5020f8782eaSNeal Cardwell 	bool is_full_length =
5039a568de4SEric Dumazet 		tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
5040f8782eaSNeal Cardwell 		bbr->min_rtt_us;
5050f8782eaSNeal Cardwell 	u32 inflight, bw;
5060f8782eaSNeal Cardwell 
5070f8782eaSNeal Cardwell 	/* The pacing_gain of 1.0 paces at the estimated bw to try to fully
5080f8782eaSNeal Cardwell 	 * use the pipe without increasing the queue.
5090f8782eaSNeal Cardwell 	 */
5100f8782eaSNeal Cardwell 	if (bbr->pacing_gain == BBR_UNIT)
5110f8782eaSNeal Cardwell 		return is_full_length;		/* just use wall clock time */
5120f8782eaSNeal Cardwell 
513a87c83d5SNeal Cardwell 	inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
5140f8782eaSNeal Cardwell 	bw = bbr_max_bw(sk);
5150f8782eaSNeal Cardwell 
5160f8782eaSNeal Cardwell 	/* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
5170f8782eaSNeal Cardwell 	 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
5180f8782eaSNeal Cardwell 	 * small (e.g. on a LAN). We do not persist if packets are lost, since
5190f8782eaSNeal Cardwell 	 * a path with small buffers may not hold that much.
5200f8782eaSNeal Cardwell 	 */
5210f8782eaSNeal Cardwell 	if (bbr->pacing_gain > BBR_UNIT)
5220f8782eaSNeal Cardwell 		return is_full_length &&
5230f8782eaSNeal Cardwell 			(rs->losses ||  /* perhaps pacing_gain*BDP won't fit */
524*232aa8ecSPriyaranjan Jha 			 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
5250f8782eaSNeal Cardwell 
5260f8782eaSNeal Cardwell 	/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
5270f8782eaSNeal Cardwell 	 * probing didn't find more bw. If inflight falls to match BDP then we
5280f8782eaSNeal Cardwell 	 * estimate queue is drained; persisting would underutilize the pipe.
5290f8782eaSNeal Cardwell 	 */
5300f8782eaSNeal Cardwell 	return is_full_length ||
531*232aa8ecSPriyaranjan Jha 		inflight <= bbr_inflight(sk, bw, BBR_UNIT);
5320f8782eaSNeal Cardwell }
5330f8782eaSNeal Cardwell 
5340f8782eaSNeal Cardwell static void bbr_advance_cycle_phase(struct sock *sk)
5350f8782eaSNeal Cardwell {
5360f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
5370f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5380f8782eaSNeal Cardwell 
5390f8782eaSNeal Cardwell 	bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
5400f8782eaSNeal Cardwell 	bbr->cycle_mstamp = tp->delivered_mstamp;
5410f8782eaSNeal Cardwell }
5420f8782eaSNeal Cardwell 
5430f8782eaSNeal Cardwell /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
5440f8782eaSNeal Cardwell static void bbr_update_cycle_phase(struct sock *sk,
5450f8782eaSNeal Cardwell 				   const struct rate_sample *rs)
5460f8782eaSNeal Cardwell {
5470f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5480f8782eaSNeal Cardwell 
5493aff3b4bSNeal Cardwell 	if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
5500f8782eaSNeal Cardwell 		bbr_advance_cycle_phase(sk);
5510f8782eaSNeal Cardwell }
5520f8782eaSNeal Cardwell 
5530f8782eaSNeal Cardwell static void bbr_reset_startup_mode(struct sock *sk)
5540f8782eaSNeal Cardwell {
5550f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5560f8782eaSNeal Cardwell 
5570f8782eaSNeal Cardwell 	bbr->mode = BBR_STARTUP;
5580f8782eaSNeal Cardwell }
5590f8782eaSNeal Cardwell 
5600f8782eaSNeal Cardwell static void bbr_reset_probe_bw_mode(struct sock *sk)
5610f8782eaSNeal Cardwell {
5620f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5630f8782eaSNeal Cardwell 
5640f8782eaSNeal Cardwell 	bbr->mode = BBR_PROBE_BW;
5650f8782eaSNeal Cardwell 	bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
5660f8782eaSNeal Cardwell 	bbr_advance_cycle_phase(sk);	/* flip to next phase of gain cycle */
5670f8782eaSNeal Cardwell }
5680f8782eaSNeal Cardwell 
5690f8782eaSNeal Cardwell static void bbr_reset_mode(struct sock *sk)
5700f8782eaSNeal Cardwell {
5710f8782eaSNeal Cardwell 	if (!bbr_full_bw_reached(sk))
5720f8782eaSNeal Cardwell 		bbr_reset_startup_mode(sk);
5730f8782eaSNeal Cardwell 	else
5740f8782eaSNeal Cardwell 		bbr_reset_probe_bw_mode(sk);
5750f8782eaSNeal Cardwell }
5760f8782eaSNeal Cardwell 
5770f8782eaSNeal Cardwell /* Start a new long-term sampling interval. */
5780f8782eaSNeal Cardwell static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
5790f8782eaSNeal Cardwell {
5800f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
5810f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5820f8782eaSNeal Cardwell 
5839a568de4SEric Dumazet 	bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
5840f8782eaSNeal Cardwell 	bbr->lt_last_delivered = tp->delivered;
5850f8782eaSNeal Cardwell 	bbr->lt_last_lost = tp->lost;
5860f8782eaSNeal Cardwell 	bbr->lt_rtt_cnt = 0;
5870f8782eaSNeal Cardwell }
5880f8782eaSNeal Cardwell 
5890f8782eaSNeal Cardwell /* Completely reset long-term bandwidth sampling. */
5900f8782eaSNeal Cardwell static void bbr_reset_lt_bw_sampling(struct sock *sk)
5910f8782eaSNeal Cardwell {
5920f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
5930f8782eaSNeal Cardwell 
5940f8782eaSNeal Cardwell 	bbr->lt_bw = 0;
5950f8782eaSNeal Cardwell 	bbr->lt_use_bw = 0;
5960f8782eaSNeal Cardwell 	bbr->lt_is_sampling = false;
5970f8782eaSNeal Cardwell 	bbr_reset_lt_bw_sampling_interval(sk);
5980f8782eaSNeal Cardwell }
5990f8782eaSNeal Cardwell 
6000f8782eaSNeal Cardwell /* Long-term bw sampling interval is done. Estimate whether we're policed. */
6010f8782eaSNeal Cardwell static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
6020f8782eaSNeal Cardwell {
6030f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
6040f8782eaSNeal Cardwell 	u32 diff;
6050f8782eaSNeal Cardwell 
6060f8782eaSNeal Cardwell 	if (bbr->lt_bw) {  /* do we have bw from a previous interval? */
6070f8782eaSNeal Cardwell 		/* Is new bw close to the lt_bw from the previous interval? */
6080f8782eaSNeal Cardwell 		diff = abs(bw - bbr->lt_bw);
6090f8782eaSNeal Cardwell 		if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
6100f8782eaSNeal Cardwell 		    (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
6110f8782eaSNeal Cardwell 		     bbr_lt_bw_diff)) {
6120f8782eaSNeal Cardwell 			/* All criteria are met; estimate we're policed. */
6130f8782eaSNeal Cardwell 			bbr->lt_bw = (bw + bbr->lt_bw) >> 1;  /* avg 2 intvls */
6140f8782eaSNeal Cardwell 			bbr->lt_use_bw = 1;
6150f8782eaSNeal Cardwell 			bbr->pacing_gain = BBR_UNIT;  /* try to avoid drops */
6160f8782eaSNeal Cardwell 			bbr->lt_rtt_cnt = 0;
6170f8782eaSNeal Cardwell 			return;
6180f8782eaSNeal Cardwell 		}
6190f8782eaSNeal Cardwell 	}
6200f8782eaSNeal Cardwell 	bbr->lt_bw = bw;
6210f8782eaSNeal Cardwell 	bbr_reset_lt_bw_sampling_interval(sk);
6220f8782eaSNeal Cardwell }
6230f8782eaSNeal Cardwell 
6240f8782eaSNeal Cardwell /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
6250f8782eaSNeal Cardwell  * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
6260f8782eaSNeal Cardwell  * explicitly models their policed rate, to reduce unnecessary losses. We
6270f8782eaSNeal Cardwell  * estimate that we're policed if we see 2 consecutive sampling intervals with
6280f8782eaSNeal Cardwell  * consistent throughput and high packet loss. If we think we're being policed,
6290f8782eaSNeal Cardwell  * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
6300f8782eaSNeal Cardwell  */
6310f8782eaSNeal Cardwell static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
6320f8782eaSNeal Cardwell {
6330f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
6340f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
6350f8782eaSNeal Cardwell 	u32 lost, delivered;
6360f8782eaSNeal Cardwell 	u64 bw;
6379a568de4SEric Dumazet 	u32 t;
6380f8782eaSNeal Cardwell 
6390f8782eaSNeal Cardwell 	if (bbr->lt_use_bw) {	/* already using long-term rate, lt_bw? */
6400f8782eaSNeal Cardwell 		if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
6410f8782eaSNeal Cardwell 		    ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
6420f8782eaSNeal Cardwell 			bbr_reset_lt_bw_sampling(sk);    /* stop using lt_bw */
6430f8782eaSNeal Cardwell 			bbr_reset_probe_bw_mode(sk);  /* restart gain cycling */
6440f8782eaSNeal Cardwell 		}
6450f8782eaSNeal Cardwell 		return;
6460f8782eaSNeal Cardwell 	}
6470f8782eaSNeal Cardwell 
6480f8782eaSNeal Cardwell 	/* Wait for the first loss before sampling, to let the policer exhaust
6490f8782eaSNeal Cardwell 	 * its tokens and estimate the steady-state rate allowed by the policer.
6500f8782eaSNeal Cardwell 	 * Starting samples earlier includes bursts that over-estimate the bw.
6510f8782eaSNeal Cardwell 	 */
6520f8782eaSNeal Cardwell 	if (!bbr->lt_is_sampling) {
6530f8782eaSNeal Cardwell 		if (!rs->losses)
6540f8782eaSNeal Cardwell 			return;
6550f8782eaSNeal Cardwell 		bbr_reset_lt_bw_sampling_interval(sk);
6560f8782eaSNeal Cardwell 		bbr->lt_is_sampling = true;
6570f8782eaSNeal Cardwell 	}
6580f8782eaSNeal Cardwell 
6590f8782eaSNeal Cardwell 	/* To avoid underestimates, reset sampling if we run out of data. */
6600f8782eaSNeal Cardwell 	if (rs->is_app_limited) {
6610f8782eaSNeal Cardwell 		bbr_reset_lt_bw_sampling(sk);
6620f8782eaSNeal Cardwell 		return;
6630f8782eaSNeal Cardwell 	}
6640f8782eaSNeal Cardwell 
6650f8782eaSNeal Cardwell 	if (bbr->round_start)
6660f8782eaSNeal Cardwell 		bbr->lt_rtt_cnt++;	/* count round trips in this interval */
6670f8782eaSNeal Cardwell 	if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
6680f8782eaSNeal Cardwell 		return;		/* sampling interval needs to be longer */
6690f8782eaSNeal Cardwell 	if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
6700f8782eaSNeal Cardwell 		bbr_reset_lt_bw_sampling(sk);  /* interval is too long */
6710f8782eaSNeal Cardwell 		return;
6720f8782eaSNeal Cardwell 	}
6730f8782eaSNeal Cardwell 
6740f8782eaSNeal Cardwell 	/* End sampling interval when a packet is lost, so we estimate the
6750f8782eaSNeal Cardwell 	 * policer tokens were exhausted. Stopping the sampling before the
6760f8782eaSNeal Cardwell 	 * tokens are exhausted under-estimates the policed rate.
6770f8782eaSNeal Cardwell 	 */
6780f8782eaSNeal Cardwell 	if (!rs->losses)
6790f8782eaSNeal Cardwell 		return;
6800f8782eaSNeal Cardwell 
6810f8782eaSNeal Cardwell 	/* Calculate packets lost and delivered in sampling interval. */
6820f8782eaSNeal Cardwell 	lost = tp->lost - bbr->lt_last_lost;
6830f8782eaSNeal Cardwell 	delivered = tp->delivered - bbr->lt_last_delivered;
6840f8782eaSNeal Cardwell 	/* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
6850f8782eaSNeal Cardwell 	if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
6860f8782eaSNeal Cardwell 		return;
6870f8782eaSNeal Cardwell 
6880f8782eaSNeal Cardwell 	/* Find average delivery rate in this sampling interval. */
6899a568de4SEric Dumazet 	t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
6909a568de4SEric Dumazet 	if ((s32)t < 1)
6919a568de4SEric Dumazet 		return;		/* interval is less than one ms, so wait */
6929a568de4SEric Dumazet 	/* Check if can multiply without overflow */
6939a568de4SEric Dumazet 	if (t >= ~0U / USEC_PER_MSEC) {
6940f8782eaSNeal Cardwell 		bbr_reset_lt_bw_sampling(sk);  /* interval too long; reset */
6950f8782eaSNeal Cardwell 		return;
6960f8782eaSNeal Cardwell 	}
6979a568de4SEric Dumazet 	t *= USEC_PER_MSEC;
6980f8782eaSNeal Cardwell 	bw = (u64)delivered * BW_UNIT;
6990f8782eaSNeal Cardwell 	do_div(bw, t);
7000f8782eaSNeal Cardwell 	bbr_lt_bw_interval_done(sk, bw);
7010f8782eaSNeal Cardwell }
7020f8782eaSNeal Cardwell 
7030f8782eaSNeal Cardwell /* Estimate the bandwidth based on how fast packets are delivered */
7040f8782eaSNeal Cardwell static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
7050f8782eaSNeal Cardwell {
7060f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
7070f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
7080f8782eaSNeal Cardwell 	u64 bw;
7090f8782eaSNeal Cardwell 
7100f8782eaSNeal Cardwell 	bbr->round_start = 0;
7110f8782eaSNeal Cardwell 	if (rs->delivered < 0 || rs->interval_us <= 0)
7120f8782eaSNeal Cardwell 		return; /* Not a valid observation */
7130f8782eaSNeal Cardwell 
7140f8782eaSNeal Cardwell 	/* See if we've reached the next RTT */
7150f8782eaSNeal Cardwell 	if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
7160f8782eaSNeal Cardwell 		bbr->next_rtt_delivered = tp->delivered;
7170f8782eaSNeal Cardwell 		bbr->rtt_cnt++;
7180f8782eaSNeal Cardwell 		bbr->round_start = 1;
7190f8782eaSNeal Cardwell 		bbr->packet_conservation = 0;
7200f8782eaSNeal Cardwell 	}
7210f8782eaSNeal Cardwell 
7220f8782eaSNeal Cardwell 	bbr_lt_bw_sampling(sk, rs);
7230f8782eaSNeal Cardwell 
7240f8782eaSNeal Cardwell 	/* Divide delivered by the interval to find a (lower bound) bottleneck
7250f8782eaSNeal Cardwell 	 * bandwidth sample. Delivered is in packets and interval_us in uS and
7260f8782eaSNeal Cardwell 	 * ratio will be <<1 for most connections. So delivered is first scaled.
7270f8782eaSNeal Cardwell 	 */
7280f8782eaSNeal Cardwell 	bw = (u64)rs->delivered * BW_UNIT;
7290f8782eaSNeal Cardwell 	do_div(bw, rs->interval_us);
7300f8782eaSNeal Cardwell 
7310f8782eaSNeal Cardwell 	/* If this sample is application-limited, it is likely to have a very
7320f8782eaSNeal Cardwell 	 * low delivered count that represents application behavior rather than
7330f8782eaSNeal Cardwell 	 * the available network rate. Such a sample could drag down estimated
7340f8782eaSNeal Cardwell 	 * bw, causing needless slow-down. Thus, to continue to send at the
7350f8782eaSNeal Cardwell 	 * last measured network rate, we filter out app-limited samples unless
7360f8782eaSNeal Cardwell 	 * they describe the path bw at least as well as our bw model.
7370f8782eaSNeal Cardwell 	 *
7380f8782eaSNeal Cardwell 	 * So the goal during app-limited phase is to proceed with the best
7390f8782eaSNeal Cardwell 	 * network rate no matter how long. We automatically leave this
7400f8782eaSNeal Cardwell 	 * phase when app writes faster than the network can deliver :)
7410f8782eaSNeal Cardwell 	 */
7420f8782eaSNeal Cardwell 	if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
7430f8782eaSNeal Cardwell 		/* Incorporate new sample into our max bw filter. */
7440f8782eaSNeal Cardwell 		minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
7450f8782eaSNeal Cardwell 	}
7460f8782eaSNeal Cardwell }
7470f8782eaSNeal Cardwell 
7480f8782eaSNeal Cardwell /* Estimate when the pipe is full, using the change in delivery rate: BBR
7490f8782eaSNeal Cardwell  * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
7500f8782eaSNeal Cardwell  * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
7510f8782eaSNeal Cardwell  * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
7520f8782eaSNeal Cardwell  * higher rwin, 3: we get higher delivery rate samples. Or transient
7530f8782eaSNeal Cardwell  * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
7540f8782eaSNeal Cardwell  * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
7550f8782eaSNeal Cardwell  */
7560f8782eaSNeal Cardwell static void bbr_check_full_bw_reached(struct sock *sk,
7570f8782eaSNeal Cardwell 				      const struct rate_sample *rs)
7580f8782eaSNeal Cardwell {
7590f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
7600f8782eaSNeal Cardwell 	u32 bw_thresh;
7610f8782eaSNeal Cardwell 
7620f8782eaSNeal Cardwell 	if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
7630f8782eaSNeal Cardwell 		return;
7640f8782eaSNeal Cardwell 
7650f8782eaSNeal Cardwell 	bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
7660f8782eaSNeal Cardwell 	if (bbr_max_bw(sk) >= bw_thresh) {
7670f8782eaSNeal Cardwell 		bbr->full_bw = bbr_max_bw(sk);
7680f8782eaSNeal Cardwell 		bbr->full_bw_cnt = 0;
7690f8782eaSNeal Cardwell 		return;
7700f8782eaSNeal Cardwell 	}
7710f8782eaSNeal Cardwell 	++bbr->full_bw_cnt;
772c589e69bSNeal Cardwell 	bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
7730f8782eaSNeal Cardwell }
7740f8782eaSNeal Cardwell 
7750f8782eaSNeal Cardwell /* If pipe is probably full, drain the queue and then enter steady-state. */
7760f8782eaSNeal Cardwell static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
7770f8782eaSNeal Cardwell {
7780f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
7790f8782eaSNeal Cardwell 
7800f8782eaSNeal Cardwell 	if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
7810f8782eaSNeal Cardwell 		bbr->mode = BBR_DRAIN;	/* drain queue we created */
78253794570SYousuk Seung 		tcp_sk(sk)->snd_ssthresh =
783*232aa8ecSPriyaranjan Jha 				bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
7840f8782eaSNeal Cardwell 	}	/* fall through to check if in-flight is already small: */
7850f8782eaSNeal Cardwell 	if (bbr->mode == BBR_DRAIN &&
786a87c83d5SNeal Cardwell 	    bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
787*232aa8ecSPriyaranjan Jha 	    bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
7880f8782eaSNeal Cardwell 		bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
7890f8782eaSNeal Cardwell }
7900f8782eaSNeal Cardwell 
791fb998862SKevin Yang static void bbr_check_probe_rtt_done(struct sock *sk)
792fb998862SKevin Yang {
793fb998862SKevin Yang 	struct tcp_sock *tp = tcp_sk(sk);
794fb998862SKevin Yang 	struct bbr *bbr = inet_csk_ca(sk);
795fb998862SKevin Yang 
796fb998862SKevin Yang 	if (!(bbr->probe_rtt_done_stamp &&
797fb998862SKevin Yang 	      after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
798fb998862SKevin Yang 		return;
799fb998862SKevin Yang 
800fb998862SKevin Yang 	bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
801fb998862SKevin Yang 	tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
802fb998862SKevin Yang 	bbr_reset_mode(sk);
803fb998862SKevin Yang }
804fb998862SKevin Yang 
8050f8782eaSNeal Cardwell /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
8060f8782eaSNeal Cardwell  * periodically drain the bottleneck queue, to converge to measure the true
8070f8782eaSNeal Cardwell  * min_rtt (unloaded propagation delay). This allows the flows to keep queues
8080f8782eaSNeal Cardwell  * small (reducing queuing delay and packet loss) and achieve fairness among
8090f8782eaSNeal Cardwell  * BBR flows.
8100f8782eaSNeal Cardwell  *
8110f8782eaSNeal Cardwell  * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
8120f8782eaSNeal Cardwell  * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
8130f8782eaSNeal Cardwell  * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
8140f8782eaSNeal Cardwell  * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
8150f8782eaSNeal Cardwell  * re-enter the previous mode. BBR uses 200ms to approximately bound the
8160f8782eaSNeal Cardwell  * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
8170f8782eaSNeal Cardwell  *
8180f8782eaSNeal Cardwell  * Note that flows need only pay 2% if they are busy sending over the last 10
8190f8782eaSNeal Cardwell  * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
8200f8782eaSNeal Cardwell  * natural silences or low-rate periods within 10 seconds where the rate is low
8210f8782eaSNeal Cardwell  * enough for long enough to drain its queue in the bottleneck. We pick up
8220f8782eaSNeal Cardwell  * these min RTT measurements opportunistically with our min_rtt filter. :-)
8230f8782eaSNeal Cardwell  */
8240f8782eaSNeal Cardwell static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
8250f8782eaSNeal Cardwell {
8260f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
8270f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
8280f8782eaSNeal Cardwell 	bool filter_expired;
8290f8782eaSNeal Cardwell 
8300f8782eaSNeal Cardwell 	/* Track min RTT seen in the min_rtt_win_sec filter window: */
8312660bfa8SEric Dumazet 	filter_expired = after(tcp_jiffies32,
8320f8782eaSNeal Cardwell 			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
8330f8782eaSNeal Cardwell 	if (rs->rtt_us >= 0 &&
834e4286603SYuchung Cheng 	    (rs->rtt_us <= bbr->min_rtt_us ||
835e4286603SYuchung Cheng 	     (filter_expired && !rs->is_ack_delayed))) {
8360f8782eaSNeal Cardwell 		bbr->min_rtt_us = rs->rtt_us;
8372660bfa8SEric Dumazet 		bbr->min_rtt_stamp = tcp_jiffies32;
8380f8782eaSNeal Cardwell 	}
8390f8782eaSNeal Cardwell 
8400f8782eaSNeal Cardwell 	if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
8410f8782eaSNeal Cardwell 	    !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
8420f8782eaSNeal Cardwell 		bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
8430f8782eaSNeal Cardwell 		bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
8440f8782eaSNeal Cardwell 		bbr->probe_rtt_done_stamp = 0;
8450f8782eaSNeal Cardwell 	}
8460f8782eaSNeal Cardwell 
8470f8782eaSNeal Cardwell 	if (bbr->mode == BBR_PROBE_RTT) {
8480f8782eaSNeal Cardwell 		/* Ignore low rate samples during this mode. */
8490f8782eaSNeal Cardwell 		tp->app_limited =
8500f8782eaSNeal Cardwell 			(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
8510f8782eaSNeal Cardwell 		/* Maintain min packets in flight for max(200 ms, 1 round). */
8520f8782eaSNeal Cardwell 		if (!bbr->probe_rtt_done_stamp &&
8530f8782eaSNeal Cardwell 		    tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
8542660bfa8SEric Dumazet 			bbr->probe_rtt_done_stamp = tcp_jiffies32 +
8550f8782eaSNeal Cardwell 				msecs_to_jiffies(bbr_probe_rtt_mode_ms);
8560f8782eaSNeal Cardwell 			bbr->probe_rtt_round_done = 0;
8570f8782eaSNeal Cardwell 			bbr->next_rtt_delivered = tp->delivered;
8580f8782eaSNeal Cardwell 		} else if (bbr->probe_rtt_done_stamp) {
8590f8782eaSNeal Cardwell 			if (bbr->round_start)
8600f8782eaSNeal Cardwell 				bbr->probe_rtt_round_done = 1;
861fb998862SKevin Yang 			if (bbr->probe_rtt_round_done)
862fb998862SKevin Yang 				bbr_check_probe_rtt_done(sk);
8630f8782eaSNeal Cardwell 		}
8640f8782eaSNeal Cardwell 	}
865e6e6a278SNeal Cardwell 	/* Restart after idle ends only once we process a new S/ACK for data */
866e6e6a278SNeal Cardwell 	if (rs->delivered > 0)
8670f8782eaSNeal Cardwell 		bbr->idle_restart = 0;
8680f8782eaSNeal Cardwell }
8690f8782eaSNeal Cardwell 
870cf33e25cSNeal Cardwell static void bbr_update_gains(struct sock *sk)
871cf33e25cSNeal Cardwell {
872cf33e25cSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
873cf33e25cSNeal Cardwell 
874cf33e25cSNeal Cardwell 	switch (bbr->mode) {
875cf33e25cSNeal Cardwell 	case BBR_STARTUP:
876cf33e25cSNeal Cardwell 		bbr->pacing_gain = bbr_high_gain;
877cf33e25cSNeal Cardwell 		bbr->cwnd_gain	 = bbr_high_gain;
878cf33e25cSNeal Cardwell 		break;
879cf33e25cSNeal Cardwell 	case BBR_DRAIN:
880cf33e25cSNeal Cardwell 		bbr->pacing_gain = bbr_drain_gain;	/* slow, to drain */
881cf33e25cSNeal Cardwell 		bbr->cwnd_gain	 = bbr_high_gain;	/* keep cwnd */
882cf33e25cSNeal Cardwell 		break;
883cf33e25cSNeal Cardwell 	case BBR_PROBE_BW:
884cf33e25cSNeal Cardwell 		bbr->pacing_gain = (bbr->lt_use_bw ?
885cf33e25cSNeal Cardwell 				    BBR_UNIT :
886cf33e25cSNeal Cardwell 				    bbr_pacing_gain[bbr->cycle_idx]);
887cf33e25cSNeal Cardwell 		bbr->cwnd_gain	 = bbr_cwnd_gain;
888cf33e25cSNeal Cardwell 		break;
889cf33e25cSNeal Cardwell 	case BBR_PROBE_RTT:
890cf33e25cSNeal Cardwell 		bbr->pacing_gain = BBR_UNIT;
891cf33e25cSNeal Cardwell 		bbr->cwnd_gain	 = BBR_UNIT;
892cf33e25cSNeal Cardwell 		break;
893cf33e25cSNeal Cardwell 	default:
894cf33e25cSNeal Cardwell 		WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
895cf33e25cSNeal Cardwell 		break;
896cf33e25cSNeal Cardwell 	}
897cf33e25cSNeal Cardwell }
898cf33e25cSNeal Cardwell 
8990f8782eaSNeal Cardwell static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
9000f8782eaSNeal Cardwell {
9010f8782eaSNeal Cardwell 	bbr_update_bw(sk, rs);
9020f8782eaSNeal Cardwell 	bbr_update_cycle_phase(sk, rs);
9030f8782eaSNeal Cardwell 	bbr_check_full_bw_reached(sk, rs);
9040f8782eaSNeal Cardwell 	bbr_check_drain(sk, rs);
9050f8782eaSNeal Cardwell 	bbr_update_min_rtt(sk, rs);
906cf33e25cSNeal Cardwell 	bbr_update_gains(sk);
9070f8782eaSNeal Cardwell }
9080f8782eaSNeal Cardwell 
9090f8782eaSNeal Cardwell static void bbr_main(struct sock *sk, const struct rate_sample *rs)
9100f8782eaSNeal Cardwell {
9110f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
9120f8782eaSNeal Cardwell 	u32 bw;
9130f8782eaSNeal Cardwell 
9140f8782eaSNeal Cardwell 	bbr_update_model(sk, rs);
9150f8782eaSNeal Cardwell 
9160f8782eaSNeal Cardwell 	bw = bbr_bw(sk);
9170f8782eaSNeal Cardwell 	bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
9180f8782eaSNeal Cardwell 	bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
9190f8782eaSNeal Cardwell }
9200f8782eaSNeal Cardwell 
9210f8782eaSNeal Cardwell static void bbr_init(struct sock *sk)
9220f8782eaSNeal Cardwell {
9230f8782eaSNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
9240f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
9250f8782eaSNeal Cardwell 
9260f8782eaSNeal Cardwell 	bbr->prior_cwnd = 0;
92753794570SYousuk Seung 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
9280f8782eaSNeal Cardwell 	bbr->rtt_cnt = 0;
9290f8782eaSNeal Cardwell 	bbr->next_rtt_delivered = 0;
9300f8782eaSNeal Cardwell 	bbr->prev_ca_state = TCP_CA_Open;
9310f8782eaSNeal Cardwell 	bbr->packet_conservation = 0;
9320f8782eaSNeal Cardwell 
9330f8782eaSNeal Cardwell 	bbr->probe_rtt_done_stamp = 0;
9340f8782eaSNeal Cardwell 	bbr->probe_rtt_round_done = 0;
9350f8782eaSNeal Cardwell 	bbr->min_rtt_us = tcp_min_rtt(tp);
9362660bfa8SEric Dumazet 	bbr->min_rtt_stamp = tcp_jiffies32;
9370f8782eaSNeal Cardwell 
9380f8782eaSNeal Cardwell 	minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
9390f8782eaSNeal Cardwell 
94032984565SNeal Cardwell 	bbr->has_seen_rtt = 0;
94179135b89SNeal Cardwell 	bbr_init_pacing_rate_from_rtt(sk);
9420f8782eaSNeal Cardwell 
9430f8782eaSNeal Cardwell 	bbr->round_start = 0;
9440f8782eaSNeal Cardwell 	bbr->idle_restart = 0;
945c589e69bSNeal Cardwell 	bbr->full_bw_reached = 0;
9460f8782eaSNeal Cardwell 	bbr->full_bw = 0;
9470f8782eaSNeal Cardwell 	bbr->full_bw_cnt = 0;
9489a568de4SEric Dumazet 	bbr->cycle_mstamp = 0;
9490f8782eaSNeal Cardwell 	bbr->cycle_idx = 0;
9500f8782eaSNeal Cardwell 	bbr_reset_lt_bw_sampling(sk);
9510f8782eaSNeal Cardwell 	bbr_reset_startup_mode(sk);
952218af599SEric Dumazet 
953218af599SEric Dumazet 	cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
9540f8782eaSNeal Cardwell }
9550f8782eaSNeal Cardwell 
9560f8782eaSNeal Cardwell static u32 bbr_sndbuf_expand(struct sock *sk)
9570f8782eaSNeal Cardwell {
9580f8782eaSNeal Cardwell 	/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
9590f8782eaSNeal Cardwell 	return 3;
9600f8782eaSNeal Cardwell }
9610f8782eaSNeal Cardwell 
9620f8782eaSNeal Cardwell /* In theory BBR does not need to undo the cwnd since it does not
9630f8782eaSNeal Cardwell  * always reduce cwnd on losses (see bbr_main()). Keep it for now.
9640f8782eaSNeal Cardwell  */
9650f8782eaSNeal Cardwell static u32 bbr_undo_cwnd(struct sock *sk)
9660f8782eaSNeal Cardwell {
9672f6c498eSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
9682f6c498eSNeal Cardwell 
9692f6c498eSNeal Cardwell 	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
9702f6c498eSNeal Cardwell 	bbr->full_bw_cnt = 0;
971600647d4SNeal Cardwell 	bbr_reset_lt_bw_sampling(sk);
9720f8782eaSNeal Cardwell 	return tcp_sk(sk)->snd_cwnd;
9730f8782eaSNeal Cardwell }
9740f8782eaSNeal Cardwell 
9750f8782eaSNeal Cardwell /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
9760f8782eaSNeal Cardwell static u32 bbr_ssthresh(struct sock *sk)
9770f8782eaSNeal Cardwell {
9780f8782eaSNeal Cardwell 	bbr_save_cwnd(sk);
97953794570SYousuk Seung 	return tcp_sk(sk)->snd_ssthresh;
9800f8782eaSNeal Cardwell }
9810f8782eaSNeal Cardwell 
9820f8782eaSNeal Cardwell static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
9830f8782eaSNeal Cardwell 			   union tcp_cc_info *info)
9840f8782eaSNeal Cardwell {
9850f8782eaSNeal Cardwell 	if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
9860f8782eaSNeal Cardwell 	    ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
9870f8782eaSNeal Cardwell 		struct tcp_sock *tp = tcp_sk(sk);
9880f8782eaSNeal Cardwell 		struct bbr *bbr = inet_csk_ca(sk);
9890f8782eaSNeal Cardwell 		u64 bw = bbr_bw(sk);
9900f8782eaSNeal Cardwell 
9910f8782eaSNeal Cardwell 		bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
9920f8782eaSNeal Cardwell 		memset(&info->bbr, 0, sizeof(info->bbr));
9930f8782eaSNeal Cardwell 		info->bbr.bbr_bw_lo		= (u32)bw;
9940f8782eaSNeal Cardwell 		info->bbr.bbr_bw_hi		= (u32)(bw >> 32);
9950f8782eaSNeal Cardwell 		info->bbr.bbr_min_rtt		= bbr->min_rtt_us;
9960f8782eaSNeal Cardwell 		info->bbr.bbr_pacing_gain	= bbr->pacing_gain;
9970f8782eaSNeal Cardwell 		info->bbr.bbr_cwnd_gain		= bbr->cwnd_gain;
9980f8782eaSNeal Cardwell 		*attr = INET_DIAG_BBRINFO;
9990f8782eaSNeal Cardwell 		return sizeof(info->bbr);
10000f8782eaSNeal Cardwell 	}
10010f8782eaSNeal Cardwell 	return 0;
10020f8782eaSNeal Cardwell }
10030f8782eaSNeal Cardwell 
10040f8782eaSNeal Cardwell static void bbr_set_state(struct sock *sk, u8 new_state)
10050f8782eaSNeal Cardwell {
10060f8782eaSNeal Cardwell 	struct bbr *bbr = inet_csk_ca(sk);
10070f8782eaSNeal Cardwell 
10080f8782eaSNeal Cardwell 	if (new_state == TCP_CA_Loss) {
10090f8782eaSNeal Cardwell 		struct rate_sample rs = { .losses = 1 };
10100f8782eaSNeal Cardwell 
10110f8782eaSNeal Cardwell 		bbr->prev_ca_state = TCP_CA_Loss;
10120f8782eaSNeal Cardwell 		bbr->full_bw = 0;
10130f8782eaSNeal Cardwell 		bbr->round_start = 1;	/* treat RTO like end of a round */
10140f8782eaSNeal Cardwell 		bbr_lt_bw_sampling(sk, &rs);
10150f8782eaSNeal Cardwell 	}
10160f8782eaSNeal Cardwell }
10170f8782eaSNeal Cardwell 
10180f8782eaSNeal Cardwell static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
10190f8782eaSNeal Cardwell 	.flags		= TCP_CONG_NON_RESTRICTED,
10200f8782eaSNeal Cardwell 	.name		= "bbr",
10210f8782eaSNeal Cardwell 	.owner		= THIS_MODULE,
10220f8782eaSNeal Cardwell 	.init		= bbr_init,
10230f8782eaSNeal Cardwell 	.cong_control	= bbr_main,
10240f8782eaSNeal Cardwell 	.sndbuf_expand	= bbr_sndbuf_expand,
10250f8782eaSNeal Cardwell 	.undo_cwnd	= bbr_undo_cwnd,
10260f8782eaSNeal Cardwell 	.cwnd_event	= bbr_cwnd_event,
10270f8782eaSNeal Cardwell 	.ssthresh	= bbr_ssthresh,
1028dcb8c9b4SEric Dumazet 	.min_tso_segs	= bbr_min_tso_segs,
10290f8782eaSNeal Cardwell 	.get_info	= bbr_get_info,
10300f8782eaSNeal Cardwell 	.set_state	= bbr_set_state,
10310f8782eaSNeal Cardwell };
10320f8782eaSNeal Cardwell 
10330f8782eaSNeal Cardwell static int __init bbr_register(void)
10340f8782eaSNeal Cardwell {
10350f8782eaSNeal Cardwell 	BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
10360f8782eaSNeal Cardwell 	return tcp_register_congestion_control(&tcp_bbr_cong_ops);
10370f8782eaSNeal Cardwell }
10380f8782eaSNeal Cardwell 
10390f8782eaSNeal Cardwell static void __exit bbr_unregister(void)
10400f8782eaSNeal Cardwell {
10410f8782eaSNeal Cardwell 	tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
10420f8782eaSNeal Cardwell }
10430f8782eaSNeal Cardwell 
10440f8782eaSNeal Cardwell module_init(bbr_register);
10450f8782eaSNeal Cardwell module_exit(bbr_unregister);
10460f8782eaSNeal Cardwell 
10470f8782eaSNeal Cardwell MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
10480f8782eaSNeal Cardwell MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
10490f8782eaSNeal Cardwell MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
10500f8782eaSNeal Cardwell MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
10510f8782eaSNeal Cardwell MODULE_LICENSE("Dual BSD/GPL");
10520f8782eaSNeal Cardwell MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
1053