xref: /linux/net/ipv4/tcp_westwood.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * TCP Westwood+: end-to-end bandwidth estimation for TCP
3  *
4  *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
5  *
6  * Support at http://c3lab.poliba.it/index.php/Westwood
7  * Main references in literature:
8  *
9  * - Mascolo S, Casetti, M. Gerla et al.
10  *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
11  *
12  * - A. Grieco, s. Mascolo
13  *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
14  *     Comm. Review, 2004
15  *
16  * - A. Dell'Aera, L. Grieco, S. Mascolo.
17  *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18  *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
19  *
20  * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21  * ssthresh after packet loss. The probing phase is as the original Reno.
22  */
23 
24 #include <linux/config.h>
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/skbuff.h>
28 #include <linux/inet_diag.h>
29 #include <net/tcp.h>
30 
31 /* TCP Westwood structure */
32 struct westwood {
33 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
34 	u32    bw_est;           /* bandwidth estimate */
35 	u32    rtt_win_sx;       /* here starts a new evaluation... */
36 	u32    bk;
37 	u32    snd_una;          /* used for evaluating the number of acked bytes */
38 	u32    cumul_ack;
39 	u32    accounted;
40 	u32    rtt;
41 	u32    rtt_min;          /* minimum observed RTT */
42 	u8     first_ack;        /* flag which infers that this is the first ack */
43 	u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
44 };
45 
46 
47 /* TCP Westwood functions and constants */
48 #define TCP_WESTWOOD_RTT_MIN   (HZ/20)	/* 50ms */
49 #define TCP_WESTWOOD_INIT_RTT  (20*HZ)	/* maybe too conservative?! */
50 
51 /*
52  * @tcp_westwood_create
53  * This function initializes fields used in TCP Westwood+,
54  * it is called after the initial SYN, so the sequence numbers
55  * are correct but new passive connections we have no
56  * information about RTTmin at this time so we simply set it to
57  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
58  * since in this way we're sure it will be updated in a consistent
59  * way as soon as possible. It will reasonably happen within the first
60  * RTT period of the connection lifetime.
61  */
62 static void tcp_westwood_init(struct sock *sk)
63 {
64 	struct westwood *w = inet_csk_ca(sk);
65 
66 	w->bk = 0;
67         w->bw_ns_est = 0;
68         w->bw_est = 0;
69         w->accounted = 0;
70         w->cumul_ack = 0;
71 	w->reset_rtt_min = 1;
72 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
73 	w->rtt_win_sx = tcp_time_stamp;
74 	w->snd_una = tcp_sk(sk)->snd_una;
75 	w->first_ack = 1;
76 }
77 
78 /*
79  * @westwood_do_filter
80  * Low-pass filter. Implemented using constant coefficients.
81  */
82 static inline u32 westwood_do_filter(u32 a, u32 b)
83 {
84 	return (((7 * a) + b) >> 3);
85 }
86 
87 static void westwood_filter(struct westwood *w, u32 delta)
88 {
89 	/* If the filter is empty fill it with the first sample of bandwidth  */
90 	if (w->bw_ns_est == 0 && w->bw_est == 0) {
91 		w->bw_ns_est = w->bk / delta;
92 		w->bw_est = w->bw_ns_est;
93 	} else {
94 		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
95 		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
96 	}
97 }
98 
99 /*
100  * @westwood_pkts_acked
101  * Called after processing group of packets.
102  * but all westwood needs is the last sample of srtt.
103  */
104 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
105 {
106 	struct westwood *w = inet_csk_ca(sk);
107 	if (cnt > 0)
108 		w->rtt = tcp_sk(sk)->srtt >> 3;
109 }
110 
111 /*
112  * @westwood_update_window
113  * It updates RTT evaluation window if it is the right moment to do
114  * it. If so it calls filter for evaluating bandwidth.
115  */
116 static void westwood_update_window(struct sock *sk)
117 {
118 	struct westwood *w = inet_csk_ca(sk);
119 	s32 delta = tcp_time_stamp - w->rtt_win_sx;
120 
121 	/* Initialize w->snd_una with the first acked sequence number in order
122 	 * to fix mismatch between tp->snd_una and w->snd_una for the first
123 	 * bandwidth sample
124 	 */
125         if (w->first_ack) {
126 		w->snd_una = tcp_sk(sk)->snd_una;
127 		w->first_ack = 0;
128 	}
129 
130 	/*
131 	 * See if a RTT-window has passed.
132 	 * Be careful since if RTT is less than
133 	 * 50ms we don't filter but we continue 'building the sample'.
134 	 * This minimum limit was chosen since an estimation on small
135 	 * time intervals is better to avoid...
136 	 * Obviously on a LAN we reasonably will always have
137 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
138 	 */
139 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
140 		westwood_filter(w, delta);
141 
142 		w->bk = 0;
143 		w->rtt_win_sx = tcp_time_stamp;
144 	}
145 }
146 
147 static inline void update_rtt_min(struct westwood *w)
148 {
149 	if (w->reset_rtt_min) {
150 		w->rtt_min = w->rtt;
151 		w->reset_rtt_min = 0;
152 	} else
153 		w->rtt_min = min(w->rtt, w->rtt_min);
154 }
155 
156 
157 /*
158  * @westwood_fast_bw
159  * It is called when we are in fast path. In particular it is called when
160  * header prediction is successful. In such case in fact update is
161  * straight forward and doesn't need any particular care.
162  */
163 static inline void westwood_fast_bw(struct sock *sk)
164 {
165 	const struct tcp_sock *tp = tcp_sk(sk);
166 	struct westwood *w = inet_csk_ca(sk);
167 
168 	westwood_update_window(sk);
169 
170 	w->bk += tp->snd_una - w->snd_una;
171 	w->snd_una = tp->snd_una;
172 	update_rtt_min(w);
173 }
174 
175 /*
176  * @westwood_acked_count
177  * This function evaluates cumul_ack for evaluating bk in case of
178  * delayed or partial acks.
179  */
180 static inline u32 westwood_acked_count(struct sock *sk)
181 {
182 	const struct tcp_sock *tp = tcp_sk(sk);
183 	struct westwood *w = inet_csk_ca(sk);
184 
185 	w->cumul_ack = tp->snd_una - w->snd_una;
186 
187         /* If cumul_ack is 0 this is a dupack since it's not moving
188          * tp->snd_una.
189          */
190         if (!w->cumul_ack) {
191 		w->accounted += tp->mss_cache;
192 		w->cumul_ack = tp->mss_cache;
193 	}
194 
195         if (w->cumul_ack > tp->mss_cache) {
196 		/* Partial or delayed ack */
197 		if (w->accounted >= w->cumul_ack) {
198 			w->accounted -= w->cumul_ack;
199 			w->cumul_ack = tp->mss_cache;
200 		} else {
201 			w->cumul_ack -= w->accounted;
202 			w->accounted = 0;
203 		}
204 	}
205 
206 	w->snd_una = tp->snd_una;
207 
208 	return w->cumul_ack;
209 }
210 
211 
212 /*
213  * TCP Westwood
214  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
215  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
216  * so avoids ever returning 0.
217  */
218 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
219 {
220 	const struct tcp_sock *tp = tcp_sk(sk);
221 	const struct westwood *w = inet_csk_ca(sk);
222 	return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
223 }
224 
225 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
226 {
227 	struct tcp_sock *tp = tcp_sk(sk);
228 	struct westwood *w = inet_csk_ca(sk);
229 
230 	switch(event) {
231 	case CA_EVENT_FAST_ACK:
232 		westwood_fast_bw(sk);
233 		break;
234 
235 	case CA_EVENT_COMPLETE_CWR:
236 		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
237 		break;
238 
239 	case CA_EVENT_FRTO:
240 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
241  		/* Update RTT_min when next ack arrives */
242 		w->reset_rtt_min = 1;
243 		break;
244 
245 	case CA_EVENT_SLOW_ACK:
246 		westwood_update_window(sk);
247 		w->bk += westwood_acked_count(sk);
248 		update_rtt_min(w);
249 		break;
250 
251 	default:
252 		/* don't care */
253 		break;
254 	}
255 }
256 
257 
258 /* Extract info for Tcp socket info provided via netlink. */
259 static void tcp_westwood_info(struct sock *sk, u32 ext,
260 			      struct sk_buff *skb)
261 {
262 	const struct westwood *ca = inet_csk_ca(sk);
263 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
264 		struct rtattr *rta;
265 		struct tcpvegas_info *info;
266 
267 		rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
268 		info = RTA_DATA(rta);
269 		info->tcpv_enabled = 1;
270 		info->tcpv_rttcnt = 0;
271 		info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
272 		info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
273 	rtattr_failure:	;
274 	}
275 }
276 
277 
278 static struct tcp_congestion_ops tcp_westwood = {
279 	.init		= tcp_westwood_init,
280 	.ssthresh	= tcp_reno_ssthresh,
281 	.cong_avoid	= tcp_reno_cong_avoid,
282 	.min_cwnd	= tcp_westwood_bw_rttmin,
283 	.cwnd_event	= tcp_westwood_event,
284 	.get_info	= tcp_westwood_info,
285 	.pkts_acked	= tcp_westwood_pkts_acked,
286 
287 	.owner		= THIS_MODULE,
288 	.name		= "westwood"
289 };
290 
291 static int __init tcp_westwood_register(void)
292 {
293 	BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
294 	return tcp_register_congestion_control(&tcp_westwood);
295 }
296 
297 static void __exit tcp_westwood_unregister(void)
298 {
299 	tcp_unregister_congestion_control(&tcp_westwood);
300 }
301 
302 module_init(tcp_westwood_register);
303 module_exit(tcp_westwood_unregister);
304 
305 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
306 MODULE_LICENSE("GPL");
307 MODULE_DESCRIPTION("TCP Westwood+");
308