xref: /linux/net/ipv4/tcp_cong.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Plugable TCP congestion control support and newReno
3  * congestion control.
4  * Based on ideas from I/O scheduler suport and Web100.
5  *
6  * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/gfp.h>
14 #include <net/tcp.h>
15 
16 int sysctl_tcp_max_ssthresh = 0;
17 
18 static DEFINE_SPINLOCK(tcp_cong_list_lock);
19 static LIST_HEAD(tcp_cong_list);
20 
21 /* Simple linear search, don't expect many entries! */
22 static struct tcp_congestion_ops *tcp_ca_find(const char *name)
23 {
24 	struct tcp_congestion_ops *e;
25 
26 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
27 		if (strcmp(e->name, name) == 0)
28 			return e;
29 	}
30 
31 	return NULL;
32 }
33 
34 /*
35  * Attach new congestion control algorithm to the list
36  * of available options.
37  */
38 int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
39 {
40 	int ret = 0;
41 
42 	/* all algorithms must implement ssthresh and cong_avoid ops */
43 	if (!ca->ssthresh || !ca->cong_avoid) {
44 		printk(KERN_ERR "TCP %s does not implement required ops\n",
45 		       ca->name);
46 		return -EINVAL;
47 	}
48 
49 	spin_lock(&tcp_cong_list_lock);
50 	if (tcp_ca_find(ca->name)) {
51 		printk(KERN_NOTICE "TCP %s already registered\n", ca->name);
52 		ret = -EEXIST;
53 	} else {
54 		list_add_tail_rcu(&ca->list, &tcp_cong_list);
55 		printk(KERN_INFO "TCP %s registered\n", ca->name);
56 	}
57 	spin_unlock(&tcp_cong_list_lock);
58 
59 	return ret;
60 }
61 EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
62 
63 /*
64  * Remove congestion control algorithm, called from
65  * the module's remove function.  Module ref counts are used
66  * to ensure that this can't be done till all sockets using
67  * that method are closed.
68  */
69 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
70 {
71 	spin_lock(&tcp_cong_list_lock);
72 	list_del_rcu(&ca->list);
73 	spin_unlock(&tcp_cong_list_lock);
74 }
75 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
76 
77 /* Assign choice of congestion control. */
78 void tcp_init_congestion_control(struct sock *sk)
79 {
80 	struct inet_connection_sock *icsk = inet_csk(sk);
81 	struct tcp_congestion_ops *ca;
82 
83 	/* if no choice made yet assign the current value set as default */
84 	if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
85 		rcu_read_lock();
86 		list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
87 			if (try_module_get(ca->owner)) {
88 				icsk->icsk_ca_ops = ca;
89 				break;
90 			}
91 
92 			/* fallback to next available */
93 		}
94 		rcu_read_unlock();
95 	}
96 
97 	if (icsk->icsk_ca_ops->init)
98 		icsk->icsk_ca_ops->init(sk);
99 }
100 
101 /* Manage refcounts on socket close. */
102 void tcp_cleanup_congestion_control(struct sock *sk)
103 {
104 	struct inet_connection_sock *icsk = inet_csk(sk);
105 
106 	if (icsk->icsk_ca_ops->release)
107 		icsk->icsk_ca_ops->release(sk);
108 	module_put(icsk->icsk_ca_ops->owner);
109 }
110 
111 /* Used by sysctl to change default congestion control */
112 int tcp_set_default_congestion_control(const char *name)
113 {
114 	struct tcp_congestion_ops *ca;
115 	int ret = -ENOENT;
116 
117 	spin_lock(&tcp_cong_list_lock);
118 	ca = tcp_ca_find(name);
119 #ifdef CONFIG_MODULES
120 	if (!ca && capable(CAP_NET_ADMIN)) {
121 		spin_unlock(&tcp_cong_list_lock);
122 
123 		request_module("tcp_%s", name);
124 		spin_lock(&tcp_cong_list_lock);
125 		ca = tcp_ca_find(name);
126 	}
127 #endif
128 
129 	if (ca) {
130 		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
131 		list_move(&ca->list, &tcp_cong_list);
132 		ret = 0;
133 	}
134 	spin_unlock(&tcp_cong_list_lock);
135 
136 	return ret;
137 }
138 
139 /* Set default value from kernel configuration at bootup */
140 static int __init tcp_congestion_default(void)
141 {
142 	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
143 }
144 late_initcall(tcp_congestion_default);
145 
146 
147 /* Build string with list of available congestion control values */
148 void tcp_get_available_congestion_control(char *buf, size_t maxlen)
149 {
150 	struct tcp_congestion_ops *ca;
151 	size_t offs = 0;
152 
153 	rcu_read_lock();
154 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
155 		offs += snprintf(buf + offs, maxlen - offs,
156 				 "%s%s",
157 				 offs == 0 ? "" : " ", ca->name);
158 
159 	}
160 	rcu_read_unlock();
161 }
162 
163 /* Get current default congestion control */
164 void tcp_get_default_congestion_control(char *name)
165 {
166 	struct tcp_congestion_ops *ca;
167 	/* We will always have reno... */
168 	BUG_ON(list_empty(&tcp_cong_list));
169 
170 	rcu_read_lock();
171 	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
172 	strncpy(name, ca->name, TCP_CA_NAME_MAX);
173 	rcu_read_unlock();
174 }
175 
176 /* Built list of non-restricted congestion control values */
177 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
178 {
179 	struct tcp_congestion_ops *ca;
180 	size_t offs = 0;
181 
182 	*buf = '\0';
183 	rcu_read_lock();
184 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
185 		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
186 			continue;
187 		offs += snprintf(buf + offs, maxlen - offs,
188 				 "%s%s",
189 				 offs == 0 ? "" : " ", ca->name);
190 
191 	}
192 	rcu_read_unlock();
193 }
194 
195 /* Change list of non-restricted congestion control */
196 int tcp_set_allowed_congestion_control(char *val)
197 {
198 	struct tcp_congestion_ops *ca;
199 	char *clone, *name;
200 	int ret = 0;
201 
202 	clone = kstrdup(val, GFP_USER);
203 	if (!clone)
204 		return -ENOMEM;
205 
206 	spin_lock(&tcp_cong_list_lock);
207 	/* pass 1 check for bad entries */
208 	while ((name = strsep(&clone, " ")) && *name) {
209 		ca = tcp_ca_find(name);
210 		if (!ca) {
211 			ret = -ENOENT;
212 			goto out;
213 		}
214 	}
215 
216 	/* pass 2 clear old values */
217 	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
218 		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
219 
220 	/* pass 3 mark as allowed */
221 	while ((name = strsep(&val, " ")) && *name) {
222 		ca = tcp_ca_find(name);
223 		WARN_ON(!ca);
224 		if (ca)
225 			ca->flags |= TCP_CONG_NON_RESTRICTED;
226 	}
227 out:
228 	spin_unlock(&tcp_cong_list_lock);
229 
230 	return ret;
231 }
232 
233 
234 /* Change congestion control for socket */
235 int tcp_set_congestion_control(struct sock *sk, const char *name)
236 {
237 	struct inet_connection_sock *icsk = inet_csk(sk);
238 	struct tcp_congestion_ops *ca;
239 	int err = 0;
240 
241 	rcu_read_lock();
242 	ca = tcp_ca_find(name);
243 
244 	/* no change asking for existing value */
245 	if (ca == icsk->icsk_ca_ops)
246 		goto out;
247 
248 #ifdef CONFIG_MODULES
249 	/* not found attempt to autoload module */
250 	if (!ca && capable(CAP_NET_ADMIN)) {
251 		rcu_read_unlock();
252 		request_module("tcp_%s", name);
253 		rcu_read_lock();
254 		ca = tcp_ca_find(name);
255 	}
256 #endif
257 	if (!ca)
258 		err = -ENOENT;
259 
260 	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
261 		err = -EPERM;
262 
263 	else if (!try_module_get(ca->owner))
264 		err = -EBUSY;
265 
266 	else {
267 		tcp_cleanup_congestion_control(sk);
268 		icsk->icsk_ca_ops = ca;
269 
270 		if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
271 			icsk->icsk_ca_ops->init(sk);
272 	}
273  out:
274 	rcu_read_unlock();
275 	return err;
276 }
277 
278 /* RFC2861 Check whether we are limited by application or congestion window
279  * This is the inverse of cwnd check in tcp_tso_should_defer
280  */
281 int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
282 {
283 	const struct tcp_sock *tp = tcp_sk(sk);
284 	u32 left;
285 
286 	if (in_flight >= tp->snd_cwnd)
287 		return 1;
288 
289 	left = tp->snd_cwnd - in_flight;
290 	if (sk_can_gso(sk) &&
291 	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
292 	    left * tp->mss_cache < sk->sk_gso_max_size)
293 		return 1;
294 	return left <= tcp_max_burst(tp);
295 }
296 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
297 
298 /*
299  * Slow start is used when congestion window is less than slow start
300  * threshold. This version implements the basic RFC2581 version
301  * and optionally supports:
302  * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh
303  *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
304  */
305 void tcp_slow_start(struct tcp_sock *tp)
306 {
307 	int cnt; /* increase in packets */
308 
309 	/* RFC3465: ABC Slow start
310 	 * Increase only after a full MSS of bytes is acked
311 	 *
312 	 * TCP sender SHOULD increase cwnd by the number of
313 	 * previously unacknowledged bytes ACKed by each incoming
314 	 * acknowledgment, provided the increase is not more than L
315 	 */
316 	if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
317 		return;
318 
319 	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
320 		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */
321 	else
322 		cnt = tp->snd_cwnd;			/* exponential increase */
323 
324 	/* RFC3465: ABC
325 	 * We MAY increase by 2 if discovered delayed ack
326 	 */
327 	if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
328 		cnt <<= 1;
329 	tp->bytes_acked = 0;
330 
331 	tp->snd_cwnd_cnt += cnt;
332 	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
333 		tp->snd_cwnd_cnt -= tp->snd_cwnd;
334 		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
335 			tp->snd_cwnd++;
336 	}
337 }
338 EXPORT_SYMBOL_GPL(tcp_slow_start);
339 
340 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
341 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
342 {
343 	if (tp->snd_cwnd_cnt >= w) {
344 		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
345 			tp->snd_cwnd++;
346 		tp->snd_cwnd_cnt = 0;
347 	} else {
348 		tp->snd_cwnd_cnt++;
349 	}
350 }
351 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
352 
353 /*
354  * TCP Reno congestion control
355  * This is special case used for fallback as well.
356  */
357 /* This is Jacobson's slow start and congestion avoidance.
358  * SIGCOMM '88, p. 328.
359  */
360 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
361 {
362 	struct tcp_sock *tp = tcp_sk(sk);
363 
364 	if (!tcp_is_cwnd_limited(sk, in_flight))
365 		return;
366 
367 	/* In "safe" area, increase. */
368 	if (tp->snd_cwnd <= tp->snd_ssthresh)
369 		tcp_slow_start(tp);
370 
371 	/* In dangerous area, increase slowly. */
372 	else if (sysctl_tcp_abc) {
373 		/* RFC3465: Appropriate Byte Count
374 		 * increase once for each full cwnd acked
375 		 */
376 		if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
377 			tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
378 			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
379 				tp->snd_cwnd++;
380 		}
381 	} else {
382 		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
383 	}
384 }
385 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
386 
387 /* Slow start threshold is half the congestion window (min 2) */
388 u32 tcp_reno_ssthresh(struct sock *sk)
389 {
390 	const struct tcp_sock *tp = tcp_sk(sk);
391 	return max(tp->snd_cwnd >> 1U, 2U);
392 }
393 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
394 
395 /* Lower bound on congestion window with halving. */
396 u32 tcp_reno_min_cwnd(const struct sock *sk)
397 {
398 	const struct tcp_sock *tp = tcp_sk(sk);
399 	return tp->snd_ssthresh/2;
400 }
401 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
402 
403 struct tcp_congestion_ops tcp_reno = {
404 	.flags		= TCP_CONG_NON_RESTRICTED,
405 	.name		= "reno",
406 	.owner		= THIS_MODULE,
407 	.ssthresh	= tcp_reno_ssthresh,
408 	.cong_avoid	= tcp_reno_cong_avoid,
409 	.min_cwnd	= tcp_reno_min_cwnd,
410 };
411 
412 /* Initial congestion control used (until SYN)
413  * really reno under another name so we can tell difference
414  * during tcp_set_default_congestion_control
415  */
416 struct tcp_congestion_ops tcp_init_congestion_ops  = {
417 	.name		= "",
418 	.owner		= THIS_MODULE,
419 	.ssthresh	= tcp_reno_ssthresh,
420 	.cong_avoid	= tcp_reno_cong_avoid,
421 	.min_cwnd	= tcp_reno_min_cwnd,
422 };
423 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
424