109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b87d8561SStephen Hemminger /*
3b87d8561SStephen Hemminger * TCP Vegas congestion control
4b87d8561SStephen Hemminger *
5b87d8561SStephen Hemminger * This is based on the congestion detection/avoidance scheme described in
6b87d8561SStephen Hemminger * Lawrence S. Brakmo and Larry L. Peterson.
7b87d8561SStephen Hemminger * "TCP Vegas: End to end congestion avoidance on a global internet."
8b87d8561SStephen Hemminger * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
9b87d8561SStephen Hemminger * October 1995. Available from:
10b87d8561SStephen Hemminger * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
11b87d8561SStephen Hemminger *
12b87d8561SStephen Hemminger * See http://www.cs.arizona.edu/xkernel/ for their implementation.
13b87d8561SStephen Hemminger * The main aspects that distinguish this implementation from the
14b87d8561SStephen Hemminger * Arizona Vegas implementation are:
15b87d8561SStephen Hemminger * o We do not change the loss detection or recovery mechanisms of
16b87d8561SStephen Hemminger * Linux in any way. Linux already recovers from losses quite well,
17b87d8561SStephen Hemminger * using fine-grained timers, NewReno, and FACK.
18b87d8561SStephen Hemminger * o To avoid the performance penalty imposed by increasing cwnd
19b87d8561SStephen Hemminger * only every-other RTT during slow start, we increase during
20b87d8561SStephen Hemminger * every RTT during slow start, just like Reno.
21b87d8561SStephen Hemminger * o Largely to allow continuous cwnd growth during slow start,
22b87d8561SStephen Hemminger * we use the rate at which ACKs come back as the "actual"
23b87d8561SStephen Hemminger * rate, rather than the rate at which data is sent.
24b87d8561SStephen Hemminger * o To speed convergence to the right rate, we set the cwnd
25b87d8561SStephen Hemminger * to achieve the right ("actual") rate when we exit slow start.
26b87d8561SStephen Hemminger * o To filter out the noise caused by delayed ACKs, we use the
27b87d8561SStephen Hemminger * minimum RTT sample observed during the last RTT to calculate
28b87d8561SStephen Hemminger * the actual rate.
29b87d8561SStephen Hemminger * o When the sender re-starts from idle, it waits until it has
30b87d8561SStephen Hemminger * received ACKs for an entire flight of new data before making
31b87d8561SStephen Hemminger * a cwnd adjustment decision. The original Vegas implementation
32b87d8561SStephen Hemminger * assumed senders never went idle.
33b87d8561SStephen Hemminger */
34b87d8561SStephen Hemminger
35b87d8561SStephen Hemminger #include <linux/mm.h>
36b87d8561SStephen Hemminger #include <linux/module.h>
37b87d8561SStephen Hemminger #include <linux/skbuff.h>
38a8c2190eSArnaldo Carvalho de Melo #include <linux/inet_diag.h>
39b87d8561SStephen Hemminger
40b87d8561SStephen Hemminger #include <net/tcp.h>
41b87d8561SStephen Hemminger
427752237eSStephen Hemminger #include "tcp_vegas.h"
437752237eSStephen Hemminger
448d3a564dSDoug Leith static int alpha = 2;
458d3a564dSDoug Leith static int beta = 4;
468d3a564dSDoug Leith static int gamma = 1;
47b87d8561SStephen Hemminger
48b87d8561SStephen Hemminger module_param(alpha, int, 0644);
498d3a564dSDoug Leith MODULE_PARM_DESC(alpha, "lower bound of packets in network");
50b87d8561SStephen Hemminger module_param(beta, int, 0644);
518d3a564dSDoug Leith MODULE_PARM_DESC(beta, "upper bound of packets in network");
52b87d8561SStephen Hemminger module_param(gamma, int, 0644);
53b87d8561SStephen Hemminger MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
54b87d8561SStephen Hemminger
55b87d8561SStephen Hemminger /* There are several situations when we must "re-start" Vegas:
56b87d8561SStephen Hemminger *
57b87d8561SStephen Hemminger * o when a connection is established
58b87d8561SStephen Hemminger * o after an RTO
59b87d8561SStephen Hemminger * o after fast recovery
60b87d8561SStephen Hemminger * o when we send a packet and there is no outstanding
61b87d8561SStephen Hemminger * unacknowledged data (restarting an idle connection)
62b87d8561SStephen Hemminger *
63b87d8561SStephen Hemminger * In these circumstances we cannot do a Vegas calculation at the
64b87d8561SStephen Hemminger * end of the first RTT, because any calculation we do is using
65b87d8561SStephen Hemminger * stale info -- both the saved cwnd and congestion feedback are
66b87d8561SStephen Hemminger * stale.
67b87d8561SStephen Hemminger *
68b87d8561SStephen Hemminger * Instead we must wait until the completion of an RTT during
69b87d8561SStephen Hemminger * which we actually receive ACKs.
70b87d8561SStephen Hemminger */
vegas_enable(struct sock * sk)717752237eSStephen Hemminger static void vegas_enable(struct sock *sk)
72b87d8561SStephen Hemminger {
736687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk);
746687e988SArnaldo Carvalho de Melo struct vegas *vegas = inet_csk_ca(sk);
75b87d8561SStephen Hemminger
76b87d8561SStephen Hemminger /* Begin taking Vegas samples next time we send something. */
77b87d8561SStephen Hemminger vegas->doing_vegas_now = 1;
78b87d8561SStephen Hemminger
79b87d8561SStephen Hemminger /* Set the beginning of the next send window. */
80b87d8561SStephen Hemminger vegas->beg_snd_nxt = tp->snd_nxt;
81b87d8561SStephen Hemminger
82b87d8561SStephen Hemminger vegas->cntRTT = 0;
83b87d8561SStephen Hemminger vegas->minRTT = 0x7fffffff;
84b87d8561SStephen Hemminger }
85b87d8561SStephen Hemminger
86b87d8561SStephen Hemminger /* Stop taking Vegas samples for now. */
vegas_disable(struct sock * sk)876687e988SArnaldo Carvalho de Melo static inline void vegas_disable(struct sock *sk)
88b87d8561SStephen Hemminger {
896687e988SArnaldo Carvalho de Melo struct vegas *vegas = inet_csk_ca(sk);
90b87d8561SStephen Hemminger
91b87d8561SStephen Hemminger vegas->doing_vegas_now = 0;
92b87d8561SStephen Hemminger }
93b87d8561SStephen Hemminger
tcp_vegas_init(struct sock * sk)947752237eSStephen Hemminger void tcp_vegas_init(struct sock *sk)
95b87d8561SStephen Hemminger {
966687e988SArnaldo Carvalho de Melo struct vegas *vegas = inet_csk_ca(sk);
97b87d8561SStephen Hemminger
98b87d8561SStephen Hemminger vegas->baseRTT = 0x7fffffff;
996687e988SArnaldo Carvalho de Melo vegas_enable(sk);
100b87d8561SStephen Hemminger }
1017752237eSStephen Hemminger EXPORT_SYMBOL_GPL(tcp_vegas_init);
102b87d8561SStephen Hemminger
103b87d8561SStephen Hemminger /* Do RTT sampling needed for Vegas.
104b87d8561SStephen Hemminger * Basically we:
105b87d8561SStephen Hemminger * o min-filter RTT samples from within an RTT to get the current
106b87d8561SStephen Hemminger * propagation delay + queuing delay (we are min-filtering to try to
107b87d8561SStephen Hemminger * avoid the effects of delayed ACKs)
108b87d8561SStephen Hemminger * o min-filter RTT samples from a much longer window (forever for now)
109b87d8561SStephen Hemminger * to find the propagation delay (baseRTT)
110b87d8561SStephen Hemminger */
tcp_vegas_pkts_acked(struct sock * sk,const struct ack_sample * sample)111756ee172SLawrence Brakmo void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample)
112b87d8561SStephen Hemminger {
1136687e988SArnaldo Carvalho de Melo struct vegas *vegas = inet_csk_ca(sk);
114164891aaSStephen Hemminger u32 vrtt;
115164891aaSStephen Hemminger
116756ee172SLawrence Brakmo if (sample->rtt_us < 0)
117b9ce204fSIlpo Järvinen return;
118b9ce204fSIlpo Järvinen
119164891aaSStephen Hemminger /* Never allow zero rtt or baseRTT */
120756ee172SLawrence Brakmo vrtt = sample->rtt_us + 1;
121b87d8561SStephen Hemminger
122b87d8561SStephen Hemminger /* Filter to find propagation delay: */
123b87d8561SStephen Hemminger if (vrtt < vegas->baseRTT)
124b87d8561SStephen Hemminger vegas->baseRTT = vrtt;
125b87d8561SStephen Hemminger
126b87d8561SStephen Hemminger /* Find the min RTT during the last RTT to find
127b87d8561SStephen Hemminger * the current prop. delay + queuing delay:
128b87d8561SStephen Hemminger */
129b87d8561SStephen Hemminger vegas->minRTT = min(vegas->minRTT, vrtt);
130b87d8561SStephen Hemminger vegas->cntRTT++;
131b87d8561SStephen Hemminger }
1327752237eSStephen Hemminger EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked);
133b87d8561SStephen Hemminger
tcp_vegas_state(struct sock * sk,u8 ca_state)1347752237eSStephen Hemminger void tcp_vegas_state(struct sock *sk, u8 ca_state)
135b87d8561SStephen Hemminger {
136b87d8561SStephen Hemminger if (ca_state == TCP_CA_Open)
1376687e988SArnaldo Carvalho de Melo vegas_enable(sk);
138b87d8561SStephen Hemminger else
1396687e988SArnaldo Carvalho de Melo vegas_disable(sk);
140b87d8561SStephen Hemminger }
1417752237eSStephen Hemminger EXPORT_SYMBOL_GPL(tcp_vegas_state);
142b87d8561SStephen Hemminger
143b87d8561SStephen Hemminger /*
144b87d8561SStephen Hemminger * If the connection is idle and we are restarting,
145b87d8561SStephen Hemminger * then we don't want to do any Vegas calculations
146b87d8561SStephen Hemminger * until we get fresh RTT samples. So when we
147b87d8561SStephen Hemminger * restart, we reset our Vegas state to a clean
148b87d8561SStephen Hemminger * slate. After we get acks for this flight of
149b87d8561SStephen Hemminger * packets, _then_ we can make Vegas calculations
150b87d8561SStephen Hemminger * again.
151b87d8561SStephen Hemminger */
tcp_vegas_cwnd_event(struct sock * sk,enum tcp_ca_event event)1527752237eSStephen Hemminger void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
153b87d8561SStephen Hemminger {
154b87d8561SStephen Hemminger if (event == CA_EVENT_CWND_RESTART ||
155b87d8561SStephen Hemminger event == CA_EVENT_TX_START)
1566687e988SArnaldo Carvalho de Melo tcp_vegas_init(sk);
157b87d8561SStephen Hemminger }
1587752237eSStephen Hemminger EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
159b87d8561SStephen Hemminger
tcp_vegas_ssthresh(struct tcp_sock * tp)160c80a5cdfSDoug Leith static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
161c80a5cdfSDoug Leith {
162*40570375SEric Dumazet return min(tp->snd_ssthresh, tcp_snd_cwnd(tp));
163c80a5cdfSDoug Leith }
164c80a5cdfSDoug Leith
tcp_vegas_cong_avoid(struct sock * sk,u32 ack,u32 acked)16524901551SEric Dumazet static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
166b87d8561SStephen Hemminger {
1676687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk);
1686687e988SArnaldo Carvalho de Melo struct vegas *vegas = inet_csk_ca(sk);
169b87d8561SStephen Hemminger
170ab59859dSHarvey Harrison if (!vegas->doing_vegas_now) {
17124901551SEric Dumazet tcp_reno_cong_avoid(sk, ack, acked);
172ab59859dSHarvey Harrison return;
173ab59859dSHarvey Harrison }
174b87d8561SStephen Hemminger
175b87d8561SStephen Hemminger if (after(ack, vegas->beg_snd_nxt)) {
176b87d8561SStephen Hemminger /* Do the Vegas once-per-RTT cwnd adjustment. */
177b87d8561SStephen Hemminger
178b87d8561SStephen Hemminger /* Save the extent of the current window so we can use this
179b87d8561SStephen Hemminger * at the end of the next RTT.
180b87d8561SStephen Hemminger */
181b87d8561SStephen Hemminger vegas->beg_snd_nxt = tp->snd_nxt;
182b87d8561SStephen Hemminger
183b87d8561SStephen Hemminger /* We do the Vegas calculations only if we got enough RTT
184b87d8561SStephen Hemminger * samples that we can be reasonably sure that we got
185b87d8561SStephen Hemminger * at least one RTT sample that wasn't from a delayed ACK.
186b87d8561SStephen Hemminger * If we only had 2 samples total,
187b87d8561SStephen Hemminger * then that means we're getting only 1 ACK per RTT, which
188b87d8561SStephen Hemminger * means they're almost certainly delayed ACKs.
189b87d8561SStephen Hemminger * If we have 3 samples, we should be OK.
190b87d8561SStephen Hemminger */
191b87d8561SStephen Hemminger
192b87d8561SStephen Hemminger if (vegas->cntRTT <= 2) {
193b87d8561SStephen Hemminger /* We don't have enough RTT samples to do the Vegas
194b87d8561SStephen Hemminger * calculation, so we'll behave like Reno.
195b87d8561SStephen Hemminger */
19624901551SEric Dumazet tcp_reno_cong_avoid(sk, ack, acked);
197b87d8561SStephen Hemminger } else {
19815913114SLachlan Andrew u32 rtt, diff;
19915913114SLachlan Andrew u64 target_cwnd;
200b87d8561SStephen Hemminger
201b87d8561SStephen Hemminger /* We have enough RTT samples, so, using the Vegas
202b87d8561SStephen Hemminger * algorithm, we determine if we should increase or
203b87d8561SStephen Hemminger * decrease cwnd, and by how much.
204b87d8561SStephen Hemminger */
205b87d8561SStephen Hemminger
206b87d8561SStephen Hemminger /* Pluck out the RTT we are using for the Vegas
207b87d8561SStephen Hemminger * calculations. This is the min RTT seen during the
208b87d8561SStephen Hemminger * last RTT. Taking the min filters out the effects
209b87d8561SStephen Hemminger * of delayed ACKs, at the cost of noticing congestion
210b87d8561SStephen Hemminger * a bit later.
211b87d8561SStephen Hemminger */
212b87d8561SStephen Hemminger rtt = vegas->minRTT;
213b87d8561SStephen Hemminger
214b87d8561SStephen Hemminger /* Calculate the cwnd we should have, if we weren't
215b87d8561SStephen Hemminger * going too fast.
216b87d8561SStephen Hemminger *
217b87d8561SStephen Hemminger * This is:
218b87d8561SStephen Hemminger * (actual rate in segments) * baseRTT
219b87d8561SStephen Hemminger */
220*40570375SEric Dumazet target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT;
2211f74e613SChristoph Paasch do_div(target_cwnd, rtt);
222b87d8561SStephen Hemminger
223b87d8561SStephen Hemminger /* Calculate the difference between the window we had,
224b87d8561SStephen Hemminger * and the window we would like to have. This quantity
225b87d8561SStephen Hemminger * is the "Diff" from the Arizona Vegas papers.
226b87d8561SStephen Hemminger */
227*40570375SEric Dumazet diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT;
228b87d8561SStephen Hemminger
229071d5080SYuchung Cheng if (diff > gamma && tcp_in_slow_start(tp)) {
230b87d8561SStephen Hemminger /* Going too fast. Time to slow down
231b87d8561SStephen Hemminger * and switch to congestion avoidance.
232b87d8561SStephen Hemminger */
233b87d8561SStephen Hemminger
234b87d8561SStephen Hemminger /* Set cwnd to match the actual rate
235b87d8561SStephen Hemminger * exactly:
236b87d8561SStephen Hemminger * cwnd = (actual rate) * baseRTT
237b87d8561SStephen Hemminger * Then we add 1 because the integer
238b87d8561SStephen Hemminger * truncation robs us of full link
239b87d8561SStephen Hemminger * utilization.
240b87d8561SStephen Hemminger */
241*40570375SEric Dumazet tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
242*40570375SEric Dumazet (u32)target_cwnd + 1));
243c80a5cdfSDoug Leith tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
244b87d8561SStephen Hemminger
245071d5080SYuchung Cheng } else if (tcp_in_slow_start(tp)) {
246c940587bSXiaoliang (David) Wei /* Slow start. */
2479f9843a7SYuchung Cheng tcp_slow_start(tp, acked);
248b87d8561SStephen Hemminger } else {
249b87d8561SStephen Hemminger /* Congestion avoidance. */
250b87d8561SStephen Hemminger
251b87d8561SStephen Hemminger /* Figure out where we would like cwnd
252b87d8561SStephen Hemminger * to be.
253b87d8561SStephen Hemminger */
254b87d8561SStephen Hemminger if (diff > beta) {
255b87d8561SStephen Hemminger /* The old window was too fast, so
256b87d8561SStephen Hemminger * we slow down.
257b87d8561SStephen Hemminger */
258*40570375SEric Dumazet tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
259c80a5cdfSDoug Leith tp->snd_ssthresh
260c80a5cdfSDoug Leith = tcp_vegas_ssthresh(tp);
261b87d8561SStephen Hemminger } else if (diff < alpha) {
262b87d8561SStephen Hemminger /* We don't have enough extra packets
263b87d8561SStephen Hemminger * in the network, so speed up.
264b87d8561SStephen Hemminger */
265*40570375SEric Dumazet tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
266b87d8561SStephen Hemminger } else {
267b87d8561SStephen Hemminger /* Sending just as fast as we
268b87d8561SStephen Hemminger * should be.
269b87d8561SStephen Hemminger */
270b87d8561SStephen Hemminger }
271b87d8561SStephen Hemminger }
2727faffa1cSStephen Hemminger
273*40570375SEric Dumazet if (tcp_snd_cwnd(tp) < 2)
274*40570375SEric Dumazet tcp_snd_cwnd_set(tp, 2);
275*40570375SEric Dumazet else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
276*40570375SEric Dumazet tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
277a6af2d6bSDoug Leith
278a6af2d6bSDoug Leith tp->snd_ssthresh = tcp_current_ssthresh(sk);
2797faffa1cSStephen Hemminger }
280b87d8561SStephen Hemminger
281b87d8561SStephen Hemminger /* Wipe the slate clean for the next RTT. */
282b87d8561SStephen Hemminger vegas->cntRTT = 0;
283b87d8561SStephen Hemminger vegas->minRTT = 0x7fffffff;
284b87d8561SStephen Hemminger }
28574cb8798SThomas Young /* Use normal slow start */
286071d5080SYuchung Cheng else if (tcp_in_slow_start(tp))
2879f9843a7SYuchung Cheng tcp_slow_start(tp, acked);
2885b495613SThomas Young }
289b87d8561SStephen Hemminger
290b87d8561SStephen Hemminger /* Extract info for Tcp socket info provided via netlink. */
tcp_vegas_get_info(struct sock * sk,u32 ext,int * attr,union tcp_cc_info * info)29164f40ff5SEric Dumazet size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
29264f40ff5SEric Dumazet union tcp_cc_info *info)
293b87d8561SStephen Hemminger {
2946687e988SArnaldo Carvalho de Melo const struct vegas *ca = inet_csk_ca(sk);
295b87d8561SStephen Hemminger
29664f40ff5SEric Dumazet if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
29744797589SJulia Lawall info->vegas.tcpv_enabled = ca->doing_vegas_now;
29844797589SJulia Lawall info->vegas.tcpv_rttcnt = ca->cntRTT;
29944797589SJulia Lawall info->vegas.tcpv_rtt = ca->baseRTT;
30044797589SJulia Lawall info->vegas.tcpv_minrtt = ca->minRTT;
30164f40ff5SEric Dumazet
30264f40ff5SEric Dumazet *attr = INET_DIAG_VEGASINFO;
30364f40ff5SEric Dumazet return sizeof(struct tcpvegas_info);
304b87d8561SStephen Hemminger }
305521f1cf1SEric Dumazet return 0;
306b87d8561SStephen Hemminger }
3077752237eSStephen Hemminger EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
308b87d8561SStephen Hemminger
309a252bebeSStephen Hemminger static struct tcp_congestion_ops tcp_vegas __read_mostly = {
310b87d8561SStephen Hemminger .init = tcp_vegas_init,
311b87d8561SStephen Hemminger .ssthresh = tcp_reno_ssthresh,
312e9799183SFlorian Westphal .undo_cwnd = tcp_reno_undo_cwnd,
313b87d8561SStephen Hemminger .cong_avoid = tcp_vegas_cong_avoid,
314164891aaSStephen Hemminger .pkts_acked = tcp_vegas_pkts_acked,
315b87d8561SStephen Hemminger .set_state = tcp_vegas_state,
316b87d8561SStephen Hemminger .cwnd_event = tcp_vegas_cwnd_event,
317b87d8561SStephen Hemminger .get_info = tcp_vegas_get_info,
318b87d8561SStephen Hemminger
319b87d8561SStephen Hemminger .owner = THIS_MODULE,
320b87d8561SStephen Hemminger .name = "vegas",
321b87d8561SStephen Hemminger };
322b87d8561SStephen Hemminger
tcp_vegas_register(void)323b87d8561SStephen Hemminger static int __init tcp_vegas_register(void)
324b87d8561SStephen Hemminger {
32574975d40SAlexey Dobriyan BUILD_BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE);
326b87d8561SStephen Hemminger tcp_register_congestion_control(&tcp_vegas);
327b87d8561SStephen Hemminger return 0;
328b87d8561SStephen Hemminger }
329b87d8561SStephen Hemminger
tcp_vegas_unregister(void)330b87d8561SStephen Hemminger static void __exit tcp_vegas_unregister(void)
331b87d8561SStephen Hemminger {
332b87d8561SStephen Hemminger tcp_unregister_congestion_control(&tcp_vegas);
333b87d8561SStephen Hemminger }
334b87d8561SStephen Hemminger
335b87d8561SStephen Hemminger module_init(tcp_vegas_register);
336b87d8561SStephen Hemminger module_exit(tcp_vegas_unregister);
337b87d8561SStephen Hemminger
338b87d8561SStephen Hemminger MODULE_AUTHOR("Stephen Hemminger");
339b87d8561SStephen Hemminger MODULE_LICENSE("GPL");
340b87d8561SStephen Hemminger MODULE_DESCRIPTION("TCP Vegas");
341