xref: /linux/net/ipv4/tcp_westwood.c (revision f61e29018a30c738e1298e1b13be956aa17ee17b)
187270762SStephen Hemminger /*
287270762SStephen Hemminger  * TCP Westwood+
387270762SStephen Hemminger  *
487270762SStephen Hemminger  *	Angelo Dell'Aera:	TCP Westwood+ support
587270762SStephen Hemminger  */
687270762SStephen Hemminger 
787270762SStephen Hemminger #include <linux/config.h>
887270762SStephen Hemminger #include <linux/mm.h>
987270762SStephen Hemminger #include <linux/module.h>
1087270762SStephen Hemminger #include <linux/skbuff.h>
11a8c2190eSArnaldo Carvalho de Melo #include <linux/inet_diag.h>
1287270762SStephen Hemminger #include <net/tcp.h>
1387270762SStephen Hemminger 
1487270762SStephen Hemminger /* TCP Westwood structure */
1587270762SStephen Hemminger struct westwood {
1687270762SStephen Hemminger 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
1787270762SStephen Hemminger 	u32    bw_est;           /* bandwidth estimate */
1887270762SStephen Hemminger 	u32    rtt_win_sx;       /* here starts a new evaluation... */
1987270762SStephen Hemminger 	u32    bk;
2087270762SStephen Hemminger 	u32    snd_una;          /* used for evaluating the number of acked bytes */
2187270762SStephen Hemminger 	u32    cumul_ack;
2287270762SStephen Hemminger 	u32    accounted;
2387270762SStephen Hemminger 	u32    rtt;
2487270762SStephen Hemminger 	u32    rtt_min;          /* minimum observed RTT */
25*f61e2901SStephen Hemminger 	u8     first_ack;        /* flag which infers that this is the first ack */
2687270762SStephen Hemminger };
2787270762SStephen Hemminger 
2887270762SStephen Hemminger 
2987270762SStephen Hemminger /* TCP Westwood functions and constants */
3087270762SStephen Hemminger #define TCP_WESTWOOD_RTT_MIN   (HZ/20)	/* 50ms */
3187270762SStephen Hemminger #define TCP_WESTWOOD_INIT_RTT  (20*HZ)	/* maybe too conservative?! */
3287270762SStephen Hemminger 
3387270762SStephen Hemminger /*
3487270762SStephen Hemminger  * @tcp_westwood_create
3587270762SStephen Hemminger  * This function initializes fields used in TCP Westwood+,
3687270762SStephen Hemminger  * it is called after the initial SYN, so the sequence numbers
3787270762SStephen Hemminger  * are correct but new passive connections we have no
3887270762SStephen Hemminger  * information about RTTmin at this time so we simply set it to
3987270762SStephen Hemminger  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
4087270762SStephen Hemminger  * since in this way we're sure it will be updated in a consistent
4187270762SStephen Hemminger  * way as soon as possible. It will reasonably happen within the first
4287270762SStephen Hemminger  * RTT period of the connection lifetime.
4387270762SStephen Hemminger  */
446687e988SArnaldo Carvalho de Melo static void tcp_westwood_init(struct sock *sk)
4587270762SStephen Hemminger {
466687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
4787270762SStephen Hemminger 
4887270762SStephen Hemminger 	w->bk = 0;
4987270762SStephen Hemminger         w->bw_ns_est = 0;
5087270762SStephen Hemminger         w->bw_est = 0;
5187270762SStephen Hemminger         w->accounted = 0;
5287270762SStephen Hemminger         w->cumul_ack = 0;
5387270762SStephen Hemminger 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
5487270762SStephen Hemminger 	w->rtt_win_sx = tcp_time_stamp;
556687e988SArnaldo Carvalho de Melo 	w->snd_una = tcp_sk(sk)->snd_una;
56*f61e2901SStephen Hemminger 	w->first_ack = 1;
5787270762SStephen Hemminger }
5887270762SStephen Hemminger 
5987270762SStephen Hemminger /*
6087270762SStephen Hemminger  * @westwood_do_filter
6187270762SStephen Hemminger  * Low-pass filter. Implemented using constant coefficients.
6287270762SStephen Hemminger  */
6387270762SStephen Hemminger static inline u32 westwood_do_filter(u32 a, u32 b)
6487270762SStephen Hemminger {
6587270762SStephen Hemminger 	return (((7 * a) + b) >> 3);
6687270762SStephen Hemminger }
6787270762SStephen Hemminger 
6887270762SStephen Hemminger static inline void westwood_filter(struct westwood *w, u32 delta)
6987270762SStephen Hemminger {
7087270762SStephen Hemminger 	w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
7187270762SStephen Hemminger 	w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
7287270762SStephen Hemminger }
7387270762SStephen Hemminger 
7487270762SStephen Hemminger /*
7587270762SStephen Hemminger  * @westwood_pkts_acked
7687270762SStephen Hemminger  * Called after processing group of packets.
7787270762SStephen Hemminger  * but all westwood needs is the last sample of srtt.
7887270762SStephen Hemminger  */
796687e988SArnaldo Carvalho de Melo static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
8087270762SStephen Hemminger {
816687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
8287270762SStephen Hemminger 	if (cnt > 0)
836687e988SArnaldo Carvalho de Melo 		w->rtt = tcp_sk(sk)->srtt >> 3;
8487270762SStephen Hemminger }
8587270762SStephen Hemminger 
8687270762SStephen Hemminger /*
8787270762SStephen Hemminger  * @westwood_update_window
8887270762SStephen Hemminger  * It updates RTT evaluation window if it is the right moment to do
8987270762SStephen Hemminger  * it. If so it calls filter for evaluating bandwidth.
9087270762SStephen Hemminger  */
916687e988SArnaldo Carvalho de Melo static void westwood_update_window(struct sock *sk)
9287270762SStephen Hemminger {
936687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
9487270762SStephen Hemminger 	s32 delta = tcp_time_stamp - w->rtt_win_sx;
9587270762SStephen Hemminger 
96*f61e2901SStephen Hemminger 	/* Initialise w->snd_una with the first acked sequence number in order
97*f61e2901SStephen Hemminger 	 * to fix mismatch between tp->snd_una and w->snd_una for the first
98*f61e2901SStephen Hemminger 	 * bandwidth sample
99*f61e2901SStephen Hemminger 	 */
100*f61e2901SStephen Hemminger         if (w->first_ack) {
101*f61e2901SStephen Hemminger 		w->snd_una = tcp_sk(sk)->snd_una;
102*f61e2901SStephen Hemminger 		w->first_ack = 0;
103*f61e2901SStephen Hemminger 	}
104*f61e2901SStephen Hemminger 
10587270762SStephen Hemminger 	/*
10687270762SStephen Hemminger 	 * See if a RTT-window has passed.
10787270762SStephen Hemminger 	 * Be careful since if RTT is less than
10887270762SStephen Hemminger 	 * 50ms we don't filter but we continue 'building the sample'.
10987270762SStephen Hemminger 	 * This minimum limit was chosen since an estimation on small
11087270762SStephen Hemminger 	 * time intervals is better to avoid...
11187270762SStephen Hemminger 	 * Obviously on a LAN we reasonably will always have
11287270762SStephen Hemminger 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
11387270762SStephen Hemminger 	 */
11487270762SStephen Hemminger 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
11587270762SStephen Hemminger 		westwood_filter(w, delta);
11687270762SStephen Hemminger 
11787270762SStephen Hemminger 		w->bk = 0;
11887270762SStephen Hemminger 		w->rtt_win_sx = tcp_time_stamp;
11987270762SStephen Hemminger 	}
12087270762SStephen Hemminger }
12187270762SStephen Hemminger 
12287270762SStephen Hemminger /*
12387270762SStephen Hemminger  * @westwood_fast_bw
12487270762SStephen Hemminger  * It is called when we are in fast path. In particular it is called when
12587270762SStephen Hemminger  * header prediction is successful. In such case in fact update is
12687270762SStephen Hemminger  * straight forward and doesn't need any particular care.
12787270762SStephen Hemminger  */
1286687e988SArnaldo Carvalho de Melo static inline void westwood_fast_bw(struct sock *sk)
12987270762SStephen Hemminger {
1306687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1316687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
13287270762SStephen Hemminger 
1336687e988SArnaldo Carvalho de Melo 	westwood_update_window(sk);
13487270762SStephen Hemminger 
13587270762SStephen Hemminger 	w->bk += tp->snd_una - w->snd_una;
13687270762SStephen Hemminger 	w->snd_una = tp->snd_una;
13787270762SStephen Hemminger 	w->rtt_min = min(w->rtt, w->rtt_min);
13887270762SStephen Hemminger }
13987270762SStephen Hemminger 
14087270762SStephen Hemminger /*
14187270762SStephen Hemminger  * @westwood_acked_count
14287270762SStephen Hemminger  * This function evaluates cumul_ack for evaluating bk in case of
14387270762SStephen Hemminger  * delayed or partial acks.
14487270762SStephen Hemminger  */
1456687e988SArnaldo Carvalho de Melo static inline u32 westwood_acked_count(struct sock *sk)
14687270762SStephen Hemminger {
1476687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1486687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
14987270762SStephen Hemminger 
15087270762SStephen Hemminger 	w->cumul_ack = tp->snd_una - w->snd_una;
15187270762SStephen Hemminger 
15287270762SStephen Hemminger         /* If cumul_ack is 0 this is a dupack since it's not moving
15387270762SStephen Hemminger          * tp->snd_una.
15487270762SStephen Hemminger          */
15587270762SStephen Hemminger         if (!w->cumul_ack) {
15687270762SStephen Hemminger 		w->accounted += tp->mss_cache;
15787270762SStephen Hemminger 		w->cumul_ack = tp->mss_cache;
15887270762SStephen Hemminger 	}
15987270762SStephen Hemminger 
16087270762SStephen Hemminger         if (w->cumul_ack > tp->mss_cache) {
16187270762SStephen Hemminger 		/* Partial or delayed ack */
16287270762SStephen Hemminger 		if (w->accounted >= w->cumul_ack) {
16387270762SStephen Hemminger 			w->accounted -= w->cumul_ack;
16487270762SStephen Hemminger 			w->cumul_ack = tp->mss_cache;
16587270762SStephen Hemminger 		} else {
16687270762SStephen Hemminger 			w->cumul_ack -= w->accounted;
16787270762SStephen Hemminger 			w->accounted = 0;
16887270762SStephen Hemminger 		}
16987270762SStephen Hemminger 	}
17087270762SStephen Hemminger 
17187270762SStephen Hemminger 	w->snd_una = tp->snd_una;
17287270762SStephen Hemminger 
17387270762SStephen Hemminger 	return w->cumul_ack;
17487270762SStephen Hemminger }
17587270762SStephen Hemminger 
17687270762SStephen Hemminger 
17787270762SStephen Hemminger /*
17887270762SStephen Hemminger  * TCP Westwood
17987270762SStephen Hemminger  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
18087270762SStephen Hemminger  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
18187270762SStephen Hemminger  * so avoids ever returning 0.
18287270762SStephen Hemminger  */
18372dc5b92SStephen Hemminger static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
18487270762SStephen Hemminger {
18572dc5b92SStephen Hemminger 	const struct tcp_sock *tp = tcp_sk(sk);
18672dc5b92SStephen Hemminger 	const struct westwood *w = inet_csk_ca(sk);
18772dc5b92SStephen Hemminger 	return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
18887270762SStephen Hemminger }
18987270762SStephen Hemminger 
1906687e988SArnaldo Carvalho de Melo static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
19187270762SStephen Hemminger {
1926687e988SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1936687e988SArnaldo Carvalho de Melo 	struct westwood *w = inet_csk_ca(sk);
19487270762SStephen Hemminger 
19587270762SStephen Hemminger 	switch(event) {
19687270762SStephen Hemminger 	case CA_EVENT_FAST_ACK:
1976687e988SArnaldo Carvalho de Melo 		westwood_fast_bw(sk);
19887270762SStephen Hemminger 		break;
19987270762SStephen Hemminger 
20087270762SStephen Hemminger 	case CA_EVENT_COMPLETE_CWR:
20172dc5b92SStephen Hemminger 		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
20287270762SStephen Hemminger 		break;
20387270762SStephen Hemminger 
20487270762SStephen Hemminger 	case CA_EVENT_FRTO:
20572dc5b92SStephen Hemminger 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
20687270762SStephen Hemminger 		break;
20787270762SStephen Hemminger 
20887270762SStephen Hemminger 	case CA_EVENT_SLOW_ACK:
2096687e988SArnaldo Carvalho de Melo 		westwood_update_window(sk);
2106687e988SArnaldo Carvalho de Melo 		w->bk += westwood_acked_count(sk);
21187270762SStephen Hemminger 		w->rtt_min = min(w->rtt, w->rtt_min);
21287270762SStephen Hemminger 		break;
21387270762SStephen Hemminger 
21487270762SStephen Hemminger 	default:
21587270762SStephen Hemminger 		/* don't care */
21687270762SStephen Hemminger 		break;
21787270762SStephen Hemminger 	}
21887270762SStephen Hemminger }
21987270762SStephen Hemminger 
22087270762SStephen Hemminger 
22187270762SStephen Hemminger /* Extract info for Tcp socket info provided via netlink. */
2226687e988SArnaldo Carvalho de Melo static void tcp_westwood_info(struct sock *sk, u32 ext,
22387270762SStephen Hemminger 			      struct sk_buff *skb)
22487270762SStephen Hemminger {
2256687e988SArnaldo Carvalho de Melo 	const struct westwood *ca = inet_csk_ca(sk);
22673c1f4a0SArnaldo Carvalho de Melo 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
22787270762SStephen Hemminger 		struct rtattr *rta;
22887270762SStephen Hemminger 		struct tcpvegas_info *info;
22987270762SStephen Hemminger 
23073c1f4a0SArnaldo Carvalho de Melo 		rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
23187270762SStephen Hemminger 		info = RTA_DATA(rta);
23287270762SStephen Hemminger 		info->tcpv_enabled = 1;
23387270762SStephen Hemminger 		info->tcpv_rttcnt = 0;
23487270762SStephen Hemminger 		info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
23587270762SStephen Hemminger 		info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
23687270762SStephen Hemminger 	rtattr_failure:	;
23787270762SStephen Hemminger 	}
23887270762SStephen Hemminger }
23987270762SStephen Hemminger 
24087270762SStephen Hemminger 
24187270762SStephen Hemminger static struct tcp_congestion_ops tcp_westwood = {
24287270762SStephen Hemminger 	.init		= tcp_westwood_init,
24387270762SStephen Hemminger 	.ssthresh	= tcp_reno_ssthresh,
24487270762SStephen Hemminger 	.cong_avoid	= tcp_reno_cong_avoid,
24572dc5b92SStephen Hemminger 	.min_cwnd	= tcp_westwood_bw_rttmin,
24687270762SStephen Hemminger 	.cwnd_event	= tcp_westwood_event,
24787270762SStephen Hemminger 	.get_info	= tcp_westwood_info,
24887270762SStephen Hemminger 	.pkts_acked	= tcp_westwood_pkts_acked,
24987270762SStephen Hemminger 
25087270762SStephen Hemminger 	.owner		= THIS_MODULE,
25187270762SStephen Hemminger 	.name		= "westwood"
25287270762SStephen Hemminger };
25387270762SStephen Hemminger 
25487270762SStephen Hemminger static int __init tcp_westwood_register(void)
25587270762SStephen Hemminger {
2566687e988SArnaldo Carvalho de Melo 	BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
25787270762SStephen Hemminger 	return tcp_register_congestion_control(&tcp_westwood);
25887270762SStephen Hemminger }
25987270762SStephen Hemminger 
26087270762SStephen Hemminger static void __exit tcp_westwood_unregister(void)
26187270762SStephen Hemminger {
26287270762SStephen Hemminger 	tcp_unregister_congestion_control(&tcp_westwood);
26387270762SStephen Hemminger }
26487270762SStephen Hemminger 
26587270762SStephen Hemminger module_init(tcp_westwood_register);
26687270762SStephen Hemminger module_exit(tcp_westwood_unregister);
26787270762SStephen Hemminger 
26887270762SStephen Hemminger MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
26987270762SStephen Hemminger MODULE_LICENSE("GPL");
27087270762SStephen Hemminger MODULE_DESCRIPTION("TCP Westwood+");
271