109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
287270762SStephen Hemminger /*
3b7d7a9e3SLuca De Cicco * TCP Westwood+: end-to-end bandwidth estimation for TCP
487270762SStephen Hemminger *
5b7d7a9e3SLuca De Cicco * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
6b7d7a9e3SLuca De Cicco *
7b7d7a9e3SLuca De Cicco * Support at http://c3lab.poliba.it/index.php/Westwood
8b7d7a9e3SLuca De Cicco * Main references in literature:
9b7d7a9e3SLuca De Cicco *
10b7d7a9e3SLuca De Cicco * - Mascolo S, Casetti, M. Gerla et al.
11b7d7a9e3SLuca De Cicco * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
12b7d7a9e3SLuca De Cicco *
13b7d7a9e3SLuca De Cicco * - A. Grieco, s. Mascolo
14b7d7a9e3SLuca De Cicco * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
15b7d7a9e3SLuca De Cicco * Comm. Review, 2004
16b7d7a9e3SLuca De Cicco *
17b7d7a9e3SLuca De Cicco * - A. Dell'Aera, L. Grieco, S. Mascolo.
18b7d7a9e3SLuca De Cicco * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
19b7d7a9e3SLuca De Cicco * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
20b7d7a9e3SLuca De Cicco *
21b7d7a9e3SLuca De Cicco * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
22b7d7a9e3SLuca De Cicco * ssthresh after packet loss. The probing phase is as the original Reno.
2387270762SStephen Hemminger */
2487270762SStephen Hemminger
2587270762SStephen Hemminger #include <linux/mm.h>
2687270762SStephen Hemminger #include <linux/module.h>
2787270762SStephen Hemminger #include <linux/skbuff.h>
28a8c2190eSArnaldo Carvalho de Melo #include <linux/inet_diag.h>
2987270762SStephen Hemminger #include <net/tcp.h>
3087270762SStephen Hemminger
3187270762SStephen Hemminger /* TCP Westwood structure */
3287270762SStephen Hemminger struct westwood {
3387270762SStephen Hemminger u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
3487270762SStephen Hemminger u32 bw_est; /* bandwidth estimate */
3587270762SStephen Hemminger u32 rtt_win_sx; /* here starts a new evaluation... */
3687270762SStephen Hemminger u32 bk;
3787270762SStephen Hemminger u32 snd_una; /* used for evaluating the number of acked bytes */
3887270762SStephen Hemminger u32 cumul_ack;
3987270762SStephen Hemminger u32 accounted;
4087270762SStephen Hemminger u32 rtt;
4187270762SStephen Hemminger u32 rtt_min; /* minimum observed RTT */
42f61e2901SStephen Hemminger u8 first_ack; /* flag which infers that this is the first ack */
43bc726a71SLuca De Cicco u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
4487270762SStephen Hemminger };
4587270762SStephen Hemminger
4687270762SStephen Hemminger /* TCP Westwood functions and constants */
4787270762SStephen Hemminger #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
4887270762SStephen Hemminger #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
4987270762SStephen Hemminger
5087270762SStephen Hemminger /*
5187270762SStephen Hemminger * @tcp_westwood_create
5287270762SStephen Hemminger * This function initializes fields used in TCP Westwood+,
5387270762SStephen Hemminger * it is called after the initial SYN, so the sequence numbers
5487270762SStephen Hemminger * are correct but new passive connections we have no
5587270762SStephen Hemminger * information about RTTmin at this time so we simply set it to
5687270762SStephen Hemminger * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
5787270762SStephen Hemminger * since in this way we're sure it will be updated in a consistent
5887270762SStephen Hemminger * way as soon as possible. It will reasonably happen within the first
5987270762SStephen Hemminger * RTT period of the connection lifetime.
6087270762SStephen Hemminger */
tcp_westwood_init(struct sock * sk)616687e988SArnaldo Carvalho de Melo static void tcp_westwood_init(struct sock *sk)
6287270762SStephen Hemminger {
636687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
6487270762SStephen Hemminger
6587270762SStephen Hemminger w->bk = 0;
6687270762SStephen Hemminger w->bw_ns_est = 0;
6787270762SStephen Hemminger w->bw_est = 0;
6887270762SStephen Hemminger w->accounted = 0;
6987270762SStephen Hemminger w->cumul_ack = 0;
70bc726a71SLuca De Cicco w->reset_rtt_min = 1;
7187270762SStephen Hemminger w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72ad5ad69eSEric Dumazet w->rtt_win_sx = tcp_jiffies32;
736687e988SArnaldo Carvalho de Melo w->snd_una = tcp_sk(sk)->snd_una;
74f61e2901SStephen Hemminger w->first_ack = 1;
7587270762SStephen Hemminger }
7687270762SStephen Hemminger
7787270762SStephen Hemminger /*
7887270762SStephen Hemminger * @westwood_do_filter
7987270762SStephen Hemminger * Low-pass filter. Implemented using constant coefficients.
8087270762SStephen Hemminger */
westwood_do_filter(u32 a,u32 b)8187270762SStephen Hemminger static inline u32 westwood_do_filter(u32 a, u32 b)
8287270762SStephen Hemminger {
83a02cec21SEric Dumazet return ((7 * a) + b) >> 3;
8487270762SStephen Hemminger }
8587270762SStephen Hemminger
westwood_filter(struct westwood * w,u32 delta)86b3a92eabSLuca De Cicco static void westwood_filter(struct westwood *w, u32 delta)
8787270762SStephen Hemminger {
88b3a92eabSLuca De Cicco /* If the filter is empty fill it with the first sample of bandwidth */
89b3a92eabSLuca De Cicco if (w->bw_ns_est == 0 && w->bw_est == 0) {
90b3a92eabSLuca De Cicco w->bw_ns_est = w->bk / delta;
91b3a92eabSLuca De Cicco w->bw_est = w->bw_ns_est;
92b3a92eabSLuca De Cicco } else {
9387270762SStephen Hemminger w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
9487270762SStephen Hemminger w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
9587270762SStephen Hemminger }
96b3a92eabSLuca De Cicco }
9787270762SStephen Hemminger
9887270762SStephen Hemminger /*
9987270762SStephen Hemminger * @westwood_pkts_acked
10087270762SStephen Hemminger * Called after processing group of packets.
10187270762SStephen Hemminger * but all westwood needs is the last sample of srtt.
10287270762SStephen Hemminger */
tcp_westwood_pkts_acked(struct sock * sk,const struct ack_sample * sample)103756ee172SLawrence Brakmo static void tcp_westwood_pkts_acked(struct sock *sk,
104756ee172SLawrence Brakmo const struct ack_sample *sample)
10587270762SStephen Hemminger {
1066687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
10730cfd0baSStephen Hemminger
108756ee172SLawrence Brakmo if (sample->rtt_us > 0)
109756ee172SLawrence Brakmo w->rtt = usecs_to_jiffies(sample->rtt_us);
11087270762SStephen Hemminger }
11187270762SStephen Hemminger
11287270762SStephen Hemminger /*
11387270762SStephen Hemminger * @westwood_update_window
11487270762SStephen Hemminger * It updates RTT evaluation window if it is the right moment to do
11587270762SStephen Hemminger * it. If so it calls filter for evaluating bandwidth.
11687270762SStephen Hemminger */
westwood_update_window(struct sock * sk)1176687e988SArnaldo Carvalho de Melo static void westwood_update_window(struct sock *sk)
11887270762SStephen Hemminger {
1196687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
120ad5ad69eSEric Dumazet s32 delta = tcp_jiffies32 - w->rtt_win_sx;
12187270762SStephen Hemminger
122b7d7a9e3SLuca De Cicco /* Initialize w->snd_una with the first acked sequence number in order
123f61e2901SStephen Hemminger * to fix mismatch between tp->snd_una and w->snd_una for the first
124f61e2901SStephen Hemminger * bandwidth sample
125f61e2901SStephen Hemminger */
126f61e2901SStephen Hemminger if (w->first_ack) {
127f61e2901SStephen Hemminger w->snd_una = tcp_sk(sk)->snd_una;
128f61e2901SStephen Hemminger w->first_ack = 0;
129f61e2901SStephen Hemminger }
130f61e2901SStephen Hemminger
13187270762SStephen Hemminger /*
13287270762SStephen Hemminger * See if a RTT-window has passed.
13387270762SStephen Hemminger * Be careful since if RTT is less than
13487270762SStephen Hemminger * 50ms we don't filter but we continue 'building the sample'.
13587270762SStephen Hemminger * This minimum limit was chosen since an estimation on small
13687270762SStephen Hemminger * time intervals is better to avoid...
13787270762SStephen Hemminger * Obviously on a LAN we reasonably will always have
13887270762SStephen Hemminger * right_bound = left_bound + WESTWOOD_RTT_MIN
13987270762SStephen Hemminger */
14087270762SStephen Hemminger if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
14187270762SStephen Hemminger westwood_filter(w, delta);
14287270762SStephen Hemminger
14387270762SStephen Hemminger w->bk = 0;
144ad5ad69eSEric Dumazet w->rtt_win_sx = tcp_jiffies32;
14587270762SStephen Hemminger }
14687270762SStephen Hemminger }
14787270762SStephen Hemminger
update_rtt_min(struct westwood * w)148bc726a71SLuca De Cicco static inline void update_rtt_min(struct westwood *w)
149bc726a71SLuca De Cicco {
150bc726a71SLuca De Cicco if (w->reset_rtt_min) {
151bc726a71SLuca De Cicco w->rtt_min = w->rtt;
152bc726a71SLuca De Cicco w->reset_rtt_min = 0;
153bc726a71SLuca De Cicco } else
154bc726a71SLuca De Cicco w->rtt_min = min(w->rtt, w->rtt_min);
155bc726a71SLuca De Cicco }
156bc726a71SLuca De Cicco
15787270762SStephen Hemminger /*
15887270762SStephen Hemminger * @westwood_fast_bw
15987270762SStephen Hemminger * It is called when we are in fast path. In particular it is called when
16087270762SStephen Hemminger * header prediction is successful. In such case in fact update is
16187270762SStephen Hemminger * straight forward and doesn't need any particular care.
16287270762SStephen Hemminger */
westwood_fast_bw(struct sock * sk)1636687e988SArnaldo Carvalho de Melo static inline void westwood_fast_bw(struct sock *sk)
16487270762SStephen Hemminger {
1656687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk);
1666687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
16787270762SStephen Hemminger
1686687e988SArnaldo Carvalho de Melo westwood_update_window(sk);
16987270762SStephen Hemminger
17087270762SStephen Hemminger w->bk += tp->snd_una - w->snd_una;
17187270762SStephen Hemminger w->snd_una = tp->snd_una;
172bc726a71SLuca De Cicco update_rtt_min(w);
17387270762SStephen Hemminger }
17487270762SStephen Hemminger
17587270762SStephen Hemminger /*
17687270762SStephen Hemminger * @westwood_acked_count
17787270762SStephen Hemminger * This function evaluates cumul_ack for evaluating bk in case of
17887270762SStephen Hemminger * delayed or partial acks.
17987270762SStephen Hemminger */
westwood_acked_count(struct sock * sk)1806687e988SArnaldo Carvalho de Melo static inline u32 westwood_acked_count(struct sock *sk)
18187270762SStephen Hemminger {
1826687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk);
1836687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
18487270762SStephen Hemminger
18587270762SStephen Hemminger w->cumul_ack = tp->snd_una - w->snd_una;
18687270762SStephen Hemminger
18787270762SStephen Hemminger /* If cumul_ack is 0 this is a dupack since it's not moving
18887270762SStephen Hemminger * tp->snd_una.
18987270762SStephen Hemminger */
19087270762SStephen Hemminger if (!w->cumul_ack) {
19187270762SStephen Hemminger w->accounted += tp->mss_cache;
19287270762SStephen Hemminger w->cumul_ack = tp->mss_cache;
19387270762SStephen Hemminger }
19487270762SStephen Hemminger
19587270762SStephen Hemminger if (w->cumul_ack > tp->mss_cache) {
19687270762SStephen Hemminger /* Partial or delayed ack */
19787270762SStephen Hemminger if (w->accounted >= w->cumul_ack) {
19887270762SStephen Hemminger w->accounted -= w->cumul_ack;
19987270762SStephen Hemminger w->cumul_ack = tp->mss_cache;
20087270762SStephen Hemminger } else {
20187270762SStephen Hemminger w->cumul_ack -= w->accounted;
20287270762SStephen Hemminger w->accounted = 0;
20387270762SStephen Hemminger }
20487270762SStephen Hemminger }
20587270762SStephen Hemminger
20687270762SStephen Hemminger w->snd_una = tp->snd_una;
20787270762SStephen Hemminger
20887270762SStephen Hemminger return w->cumul_ack;
20987270762SStephen Hemminger }
21087270762SStephen Hemminger
21187270762SStephen Hemminger /*
21287270762SStephen Hemminger * TCP Westwood
21387270762SStephen Hemminger * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
21487270762SStephen Hemminger * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
21587270762SStephen Hemminger * so avoids ever returning 0.
21687270762SStephen Hemminger */
tcp_westwood_bw_rttmin(const struct sock * sk)21772dc5b92SStephen Hemminger static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
21887270762SStephen Hemminger {
21972dc5b92SStephen Hemminger const struct tcp_sock *tp = tcp_sk(sk);
22072dc5b92SStephen Hemminger const struct westwood *w = inet_csk_ca(sk);
221688d1945Sstephen hemminger
22272dc5b92SStephen Hemminger return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
22387270762SStephen Hemminger }
22487270762SStephen Hemminger
tcp_westwood_ack(struct sock * sk,u32 ack_flags)2257354c8c3SFlorian Westphal static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
2267354c8c3SFlorian Westphal {
2277354c8c3SFlorian Westphal if (ack_flags & CA_ACK_SLOWPATH) {
2287354c8c3SFlorian Westphal struct westwood *w = inet_csk_ca(sk);
2297354c8c3SFlorian Westphal
2307354c8c3SFlorian Westphal westwood_update_window(sk);
2317354c8c3SFlorian Westphal w->bk += westwood_acked_count(sk);
2327354c8c3SFlorian Westphal
2337354c8c3SFlorian Westphal update_rtt_min(w);
2347354c8c3SFlorian Westphal return;
2357354c8c3SFlorian Westphal }
2367354c8c3SFlorian Westphal
2377354c8c3SFlorian Westphal westwood_fast_bw(sk);
2387354c8c3SFlorian Westphal }
2397354c8c3SFlorian Westphal
tcp_westwood_event(struct sock * sk,enum tcp_ca_event event)2406687e988SArnaldo Carvalho de Melo static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
24187270762SStephen Hemminger {
2426687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk);
2436687e988SArnaldo Carvalho de Melo struct westwood *w = inet_csk_ca(sk);
24487270762SStephen Hemminger
24587270762SStephen Hemminger switch (event) {
24687270762SStephen Hemminger case CA_EVENT_COMPLETE_CWR:
247*40570375SEric Dumazet tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
248*40570375SEric Dumazet tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
24987270762SStephen Hemminger break;
2509b44190dSYuchung Cheng case CA_EVENT_LOSS:
25172dc5b92SStephen Hemminger tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
252bc726a71SLuca De Cicco /* Update RTT_min when next ack arrives */
253bc726a71SLuca De Cicco w->reset_rtt_min = 1;
25487270762SStephen Hemminger break;
25587270762SStephen Hemminger default:
25687270762SStephen Hemminger /* don't care */
25787270762SStephen Hemminger break;
25887270762SStephen Hemminger }
25987270762SStephen Hemminger }
26087270762SStephen Hemminger
26187270762SStephen Hemminger /* Extract info for Tcp socket info provided via netlink. */
tcp_westwood_info(struct sock * sk,u32 ext,int * attr,union tcp_cc_info * info)26231ccd0e6SEric Dumazet static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
26331ccd0e6SEric Dumazet union tcp_cc_info *info)
26487270762SStephen Hemminger {
2656687e988SArnaldo Carvalho de Melo const struct westwood *ca = inet_csk_ca(sk);
266688d1945Sstephen hemminger
26773c1f4a0SArnaldo Carvalho de Melo if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
26831ccd0e6SEric Dumazet info->vegas.tcpv_enabled = 1;
26931ccd0e6SEric Dumazet info->vegas.tcpv_rttcnt = 0;
270be7164cdSchun Long info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt);
271be7164cdSchun Long info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
27287270762SStephen Hemminger
27331ccd0e6SEric Dumazet *attr = INET_DIAG_VEGASINFO;
27431ccd0e6SEric Dumazet return sizeof(struct tcpvegas_info);
27587270762SStephen Hemminger }
276521f1cf1SEric Dumazet return 0;
27787270762SStephen Hemminger }
27887270762SStephen Hemminger
279a252bebeSStephen Hemminger static struct tcp_congestion_ops tcp_westwood __read_mostly = {
28087270762SStephen Hemminger .init = tcp_westwood_init,
28187270762SStephen Hemminger .ssthresh = tcp_reno_ssthresh,
28287270762SStephen Hemminger .cong_avoid = tcp_reno_cong_avoid,
283e9799183SFlorian Westphal .undo_cwnd = tcp_reno_undo_cwnd,
28487270762SStephen Hemminger .cwnd_event = tcp_westwood_event,
2857354c8c3SFlorian Westphal .in_ack_event = tcp_westwood_ack,
28687270762SStephen Hemminger .get_info = tcp_westwood_info,
28787270762SStephen Hemminger .pkts_acked = tcp_westwood_pkts_acked,
28887270762SStephen Hemminger
28987270762SStephen Hemminger .owner = THIS_MODULE,
29087270762SStephen Hemminger .name = "westwood"
29187270762SStephen Hemminger };
29287270762SStephen Hemminger
tcp_westwood_register(void)29387270762SStephen Hemminger static int __init tcp_westwood_register(void)
29487270762SStephen Hemminger {
29574975d40SAlexey Dobriyan BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
29687270762SStephen Hemminger return tcp_register_congestion_control(&tcp_westwood);
29787270762SStephen Hemminger }
29887270762SStephen Hemminger
tcp_westwood_unregister(void)29987270762SStephen Hemminger static void __exit tcp_westwood_unregister(void)
30087270762SStephen Hemminger {
30187270762SStephen Hemminger tcp_unregister_congestion_control(&tcp_westwood);
30287270762SStephen Hemminger }
30387270762SStephen Hemminger
30487270762SStephen Hemminger module_init(tcp_westwood_register);
30587270762SStephen Hemminger module_exit(tcp_westwood_unregister);
30687270762SStephen Hemminger
30787270762SStephen Hemminger MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
30887270762SStephen Hemminger MODULE_LICENSE("GPL");
30987270762SStephen Hemminger MODULE_DESCRIPTION("TCP Westwood+");
310