xref: /linux/net/ipv4/tcp_westwood.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * TCP Westwood+: end-to-end bandwidth estimation for TCP
3  *
4  *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
5  *
6  * Support at http://c3lab.poliba.it/index.php/Westwood
7  * Main references in literature:
8  *
9  * - Mascolo S, Casetti, M. Gerla et al.
10  *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
11  *
12  * - A. Grieco, s. Mascolo
13  *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
14  *     Comm. Review, 2004
15  *
16  * - A. Dell'Aera, L. Grieco, S. Mascolo.
17  *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18  *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
19  *
20  * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21  * ssthresh after packet loss. The probing phase is as the original Reno.
22  */
23 
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/skbuff.h>
27 #include <linux/inet_diag.h>
28 #include <net/tcp.h>
29 
30 /* TCP Westwood structure */
31 struct westwood {
32 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
33 	u32    bw_est;           /* bandwidth estimate */
34 	u32    rtt_win_sx;       /* here starts a new evaluation... */
35 	u32    bk;
36 	u32    snd_una;          /* used for evaluating the number of acked bytes */
37 	u32    cumul_ack;
38 	u32    accounted;
39 	u32    rtt;
40 	u32    rtt_min;          /* minimum observed RTT */
41 	u8     first_ack;        /* flag which infers that this is the first ack */
42 	u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
43 };
44 
45 
46 /* TCP Westwood functions and constants */
47 #define TCP_WESTWOOD_RTT_MIN   (HZ/20)	/* 50ms */
48 #define TCP_WESTWOOD_INIT_RTT  (20*HZ)	/* maybe too conservative?! */
49 
50 /*
51  * @tcp_westwood_create
52  * This function initializes fields used in TCP Westwood+,
53  * it is called after the initial SYN, so the sequence numbers
54  * are correct but new passive connections we have no
55  * information about RTTmin at this time so we simply set it to
56  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
57  * since in this way we're sure it will be updated in a consistent
58  * way as soon as possible. It will reasonably happen within the first
59  * RTT period of the connection lifetime.
60  */
61 static void tcp_westwood_init(struct sock *sk)
62 {
63 	struct westwood *w = inet_csk_ca(sk);
64 
65 	w->bk = 0;
66         w->bw_ns_est = 0;
67         w->bw_est = 0;
68         w->accounted = 0;
69         w->cumul_ack = 0;
70 	w->reset_rtt_min = 1;
71 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72 	w->rtt_win_sx = tcp_time_stamp;
73 	w->snd_una = tcp_sk(sk)->snd_una;
74 	w->first_ack = 1;
75 }
76 
77 /*
78  * @westwood_do_filter
79  * Low-pass filter. Implemented using constant coefficients.
80  */
81 static inline u32 westwood_do_filter(u32 a, u32 b)
82 {
83 	return (((7 * a) + b) >> 3);
84 }
85 
86 static void westwood_filter(struct westwood *w, u32 delta)
87 {
88 	/* If the filter is empty fill it with the first sample of bandwidth  */
89 	if (w->bw_ns_est == 0 && w->bw_est == 0) {
90 		w->bw_ns_est = w->bk / delta;
91 		w->bw_est = w->bw_ns_est;
92 	} else {
93 		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
94 		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
95 	}
96 }
97 
98 /*
99  * @westwood_pkts_acked
100  * Called after processing group of packets.
101  * but all westwood needs is the last sample of srtt.
102  */
103 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
104 {
105 	struct westwood *w = inet_csk_ca(sk);
106 	if (cnt > 0)
107 		w->rtt = tcp_sk(sk)->srtt >> 3;
108 }
109 
110 /*
111  * @westwood_update_window
112  * It updates RTT evaluation window if it is the right moment to do
113  * it. If so it calls filter for evaluating bandwidth.
114  */
115 static void westwood_update_window(struct sock *sk)
116 {
117 	struct westwood *w = inet_csk_ca(sk);
118 	s32 delta = tcp_time_stamp - w->rtt_win_sx;
119 
120 	/* Initialize w->snd_una with the first acked sequence number in order
121 	 * to fix mismatch between tp->snd_una and w->snd_una for the first
122 	 * bandwidth sample
123 	 */
124         if (w->first_ack) {
125 		w->snd_una = tcp_sk(sk)->snd_una;
126 		w->first_ack = 0;
127 	}
128 
129 	/*
130 	 * See if a RTT-window has passed.
131 	 * Be careful since if RTT is less than
132 	 * 50ms we don't filter but we continue 'building the sample'.
133 	 * This minimum limit was chosen since an estimation on small
134 	 * time intervals is better to avoid...
135 	 * Obviously on a LAN we reasonably will always have
136 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
137 	 */
138 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
139 		westwood_filter(w, delta);
140 
141 		w->bk = 0;
142 		w->rtt_win_sx = tcp_time_stamp;
143 	}
144 }
145 
146 static inline void update_rtt_min(struct westwood *w)
147 {
148 	if (w->reset_rtt_min) {
149 		w->rtt_min = w->rtt;
150 		w->reset_rtt_min = 0;
151 	} else
152 		w->rtt_min = min(w->rtt, w->rtt_min);
153 }
154 
155 
156 /*
157  * @westwood_fast_bw
158  * It is called when we are in fast path. In particular it is called when
159  * header prediction is successful. In such case in fact update is
160  * straight forward and doesn't need any particular care.
161  */
162 static inline void westwood_fast_bw(struct sock *sk)
163 {
164 	const struct tcp_sock *tp = tcp_sk(sk);
165 	struct westwood *w = inet_csk_ca(sk);
166 
167 	westwood_update_window(sk);
168 
169 	w->bk += tp->snd_una - w->snd_una;
170 	w->snd_una = tp->snd_una;
171 	update_rtt_min(w);
172 }
173 
174 /*
175  * @westwood_acked_count
176  * This function evaluates cumul_ack for evaluating bk in case of
177  * delayed or partial acks.
178  */
179 static inline u32 westwood_acked_count(struct sock *sk)
180 {
181 	const struct tcp_sock *tp = tcp_sk(sk);
182 	struct westwood *w = inet_csk_ca(sk);
183 
184 	w->cumul_ack = tp->snd_una - w->snd_una;
185 
186         /* If cumul_ack is 0 this is a dupack since it's not moving
187          * tp->snd_una.
188          */
189         if (!w->cumul_ack) {
190 		w->accounted += tp->mss_cache;
191 		w->cumul_ack = tp->mss_cache;
192 	}
193 
194         if (w->cumul_ack > tp->mss_cache) {
195 		/* Partial or delayed ack */
196 		if (w->accounted >= w->cumul_ack) {
197 			w->accounted -= w->cumul_ack;
198 			w->cumul_ack = tp->mss_cache;
199 		} else {
200 			w->cumul_ack -= w->accounted;
201 			w->accounted = 0;
202 		}
203 	}
204 
205 	w->snd_una = tp->snd_una;
206 
207 	return w->cumul_ack;
208 }
209 
210 
211 /*
212  * TCP Westwood
213  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
214  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
215  * so avoids ever returning 0.
216  */
217 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
218 {
219 	const struct tcp_sock *tp = tcp_sk(sk);
220 	const struct westwood *w = inet_csk_ca(sk);
221 	return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
222 }
223 
224 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
225 {
226 	struct tcp_sock *tp = tcp_sk(sk);
227 	struct westwood *w = inet_csk_ca(sk);
228 
229 	switch(event) {
230 	case CA_EVENT_FAST_ACK:
231 		westwood_fast_bw(sk);
232 		break;
233 
234 	case CA_EVENT_COMPLETE_CWR:
235 		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
236 		break;
237 
238 	case CA_EVENT_FRTO:
239 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
240  		/* Update RTT_min when next ack arrives */
241 		w->reset_rtt_min = 1;
242 		break;
243 
244 	case CA_EVENT_SLOW_ACK:
245 		westwood_update_window(sk);
246 		w->bk += westwood_acked_count(sk);
247 		update_rtt_min(w);
248 		break;
249 
250 	default:
251 		/* don't care */
252 		break;
253 	}
254 }
255 
256 
257 /* Extract info for Tcp socket info provided via netlink. */
258 static void tcp_westwood_info(struct sock *sk, u32 ext,
259 			      struct sk_buff *skb)
260 {
261 	const struct westwood *ca = inet_csk_ca(sk);
262 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
263 		struct rtattr *rta;
264 		struct tcpvegas_info *info;
265 
266 		rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
267 		info = RTA_DATA(rta);
268 		info->tcpv_enabled = 1;
269 		info->tcpv_rttcnt = 0;
270 		info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
271 		info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
272 	rtattr_failure:	;
273 	}
274 }
275 
276 
277 static struct tcp_congestion_ops tcp_westwood = {
278 	.init		= tcp_westwood_init,
279 	.ssthresh	= tcp_reno_ssthresh,
280 	.cong_avoid	= tcp_reno_cong_avoid,
281 	.min_cwnd	= tcp_westwood_bw_rttmin,
282 	.cwnd_event	= tcp_westwood_event,
283 	.get_info	= tcp_westwood_info,
284 	.pkts_acked	= tcp_westwood_pkts_acked,
285 
286 	.owner		= THIS_MODULE,
287 	.name		= "westwood"
288 };
289 
290 static int __init tcp_westwood_register(void)
291 {
292 	BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
293 	return tcp_register_congestion_control(&tcp_westwood);
294 }
295 
296 static void __exit tcp_westwood_unregister(void)
297 {
298 	tcp_unregister_congestion_control(&tcp_westwood);
299 }
300 
301 module_init(tcp_westwood_register);
302 module_exit(tcp_westwood_unregister);
303 
304 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
305 MODULE_LICENSE("GPL");
306 MODULE_DESCRIPTION("TCP Westwood+");
307