xref: /linux/net/ipv4/tcp_scalable.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /* Tom Kelly's Scalable TCP
2  *
3  * See http://www.deneholme.net/tom/scalable/
4  *
5  * John Heffner <jheffner@sc.edu>
6  */
7 
8 #include <linux/module.h>
9 #include <net/tcp.h>
10 
11 /* These factors derived from the recommended values in the aer:
12  * .01 and and 7/8. We use 50 instead of 100 to account for
13  * delayed ack.
14  */
15 #define TCP_SCALABLE_AI_CNT	50U
16 #define TCP_SCALABLE_MD_SCALE	3
17 
18 struct scalable {
19 	u32 loss_cwnd;
20 };
21 
22 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
23 {
24 	struct tcp_sock *tp = tcp_sk(sk);
25 
26 	if (!tcp_is_cwnd_limited(sk))
27 		return;
28 
29 	if (tcp_in_slow_start(tp))
30 		tcp_slow_start(tp, acked);
31 	else
32 		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
33 				  1);
34 }
35 
36 static u32 tcp_scalable_ssthresh(struct sock *sk)
37 {
38 	const struct tcp_sock *tp = tcp_sk(sk);
39 	struct scalable *ca = inet_csk_ca(sk);
40 
41 	ca->loss_cwnd = tp->snd_cwnd;
42 
43 	return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
44 }
45 
46 static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47 {
48 	const struct scalable *ca = inet_csk_ca(sk);
49 
50 	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51 }
52 
53 static struct tcp_congestion_ops tcp_scalable __read_mostly = {
54 	.ssthresh	= tcp_scalable_ssthresh,
55 	.undo_cwnd	= tcp_scalable_cwnd_undo,
56 	.cong_avoid	= tcp_scalable_cong_avoid,
57 
58 	.owner		= THIS_MODULE,
59 	.name		= "scalable",
60 };
61 
62 static int __init tcp_scalable_register(void)
63 {
64 	return tcp_register_congestion_control(&tcp_scalable);
65 }
66 
67 static void __exit tcp_scalable_unregister(void)
68 {
69 	tcp_unregister_congestion_control(&tcp_scalable);
70 }
71 
72 module_init(tcp_scalable_register);
73 module_exit(tcp_scalable_unregister);
74 
75 MODULE_AUTHOR("John Heffner");
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Scalable TCP");
78