xref: /linux/net/ipv4/tcp_hybla.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TCP HYBLA
4  *
5  * TCP-HYBLA Congestion control algorithm, based on:
6  *   C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement
7  *   for Heterogeneous Networks",
8  *   International Journal on satellite Communications,
9  *				       September 2004
10  *    Daniele Lacamera
11  *    root at danielinux.net
12  */
13 
14 #include <linux/module.h>
15 #include <net/tcp.h>
16 
17 /* Tcp Hybla structure. */
18 struct hybla {
19 	bool  hybla_en;
20 	u32   snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
21 	u32   rho;	      /* Rho parameter, integer part  */
22 	u32   rho2;	      /* Rho * Rho, integer part */
23 	u32   rho_3ls;	      /* Rho parameter, <<3 */
24 	u32   rho2_7ls;	      /* Rho^2, <<7	*/
25 	u32   minrtt_us;      /* Minimum smoothed round trip time value seen */
26 };
27 
28 /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
29 static int rtt0 = 25;
30 module_param(rtt0, int, 0644);
31 MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
32 
33 /* This is called to refresh values for hybla parameters */
hybla_recalc_param(struct sock * sk)34 static inline void hybla_recalc_param (struct sock *sk)
35 {
36 	struct hybla *ca = inet_csk_ca(sk);
37 
38 	ca->rho_3ls = max_t(u32,
39 			    tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC),
40 			    8U);
41 	ca->rho = ca->rho_3ls >> 3;
42 	ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
43 	ca->rho2 = ca->rho2_7ls >> 7;
44 }
45 
hybla_init(struct sock * sk)46 static void hybla_init(struct sock *sk)
47 {
48 	struct tcp_sock *tp = tcp_sk(sk);
49 	struct hybla *ca = inet_csk_ca(sk);
50 
51 	ca->rho = 0;
52 	ca->rho2 = 0;
53 	ca->rho_3ls = 0;
54 	ca->rho2_7ls = 0;
55 	ca->snd_cwnd_cents = 0;
56 	ca->hybla_en = true;
57 	tcp_snd_cwnd_set(tp, 2);
58 	tp->snd_cwnd_clamp = 65535;
59 
60 	/* 1st Rho measurement based on initial srtt */
61 	hybla_recalc_param(sk);
62 
63 	/* set minimum rtt as this is the 1st ever seen */
64 	ca->minrtt_us = tp->srtt_us;
65 	tcp_snd_cwnd_set(tp, ca->rho);
66 }
67 
hybla_state(struct sock * sk,u8 ca_state)68 static void hybla_state(struct sock *sk, u8 ca_state)
69 {
70 	struct hybla *ca = inet_csk_ca(sk);
71 
72 	ca->hybla_en = (ca_state == TCP_CA_Open);
73 }
74 
hybla_fraction(u32 odds)75 static inline u32 hybla_fraction(u32 odds)
76 {
77 	static const u32 fractions[] = {
78 		128, 139, 152, 165, 181, 197, 215, 234,
79 	};
80 
81 	return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128;
82 }
83 
84 /* TCP Hybla main routine.
85  * This is the algorithm behavior:
86  *     o Recalc Hybla parameters if min_rtt has changed
87  *     o Give cwnd a new value based on the model proposed
88  *     o remember increments <1
89  */
hybla_cong_avoid(struct sock * sk,u32 ack,u32 acked)90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
91 {
92 	struct tcp_sock *tp = tcp_sk(sk);
93 	struct hybla *ca = inet_csk_ca(sk);
94 	u32 increment, odd, rho_fractions;
95 	int is_slowstart = 0;
96 
97 	/*  Recalculate rho only if this srtt is the lowest */
98 	if (tp->srtt_us < ca->minrtt_us) {
99 		hybla_recalc_param(sk);
100 		ca->minrtt_us = tp->srtt_us;
101 	}
102 
103 	if (!tcp_is_cwnd_limited(sk))
104 		return;
105 
106 	if (!ca->hybla_en) {
107 		tcp_reno_cong_avoid(sk, ack, acked);
108 		return;
109 	}
110 
111 	if (ca->rho == 0)
112 		hybla_recalc_param(sk);
113 
114 	rho_fractions = ca->rho_3ls - (ca->rho << 3);
115 
116 	if (tcp_in_slow_start(tp)) {
117 		/*
118 		 * slow start
119 		 *      INC = 2^RHO - 1
120 		 * This is done by splitting the rho parameter
121 		 * into 2 parts: an integer part and a fraction part.
122 		 * Inrement<<7 is estimated by doing:
123 		 *	       [2^(int+fract)]<<7
124 		 * that is equal to:
125 		 *	       (2^int)	*  [(2^fract) <<7]
126 		 * 2^int is straightly computed as 1<<int,
127 		 * while we will use hybla_slowstart_fraction_increment() to
128 		 * calculate 2^fract in a <<7 value.
129 		 */
130 		is_slowstart = 1;
131 		increment = ((1 << min(ca->rho, 16U)) *
132 			hybla_fraction(rho_fractions)) - 128;
133 	} else {
134 		/*
135 		 * congestion avoidance
136 		 * INC = RHO^2 / W
137 		 * as long as increment is estimated as (rho<<7)/window
138 		 * it already is <<7 and we can easily count its fractions.
139 		 */
140 		increment = ca->rho2_7ls / tcp_snd_cwnd(tp);
141 		if (increment < 128)
142 			tp->snd_cwnd_cnt++;
143 	}
144 
145 	odd = increment % 128;
146 	tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7));
147 	ca->snd_cwnd_cents += odd;
148 
149 	/* check when fractions goes >=128 and increase cwnd by 1. */
150 	while (ca->snd_cwnd_cents >= 128) {
151 		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
152 		ca->snd_cwnd_cents -= 128;
153 		tp->snd_cwnd_cnt = 0;
154 	}
155 	/* check when cwnd has not been incremented for a while */
156 	if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
157 		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
158 		tp->snd_cwnd_cnt = 0;
159 	}
160 	/* clamp down slowstart cwnd to ssthresh value. */
161 	if (is_slowstart)
162 		tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh));
163 
164 	tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
165 }
166 
167 static struct tcp_congestion_ops tcp_hybla __read_mostly = {
168 	.init		= hybla_init,
169 	.ssthresh	= tcp_reno_ssthresh,
170 	.undo_cwnd	= tcp_reno_undo_cwnd,
171 	.cong_avoid	= hybla_cong_avoid,
172 	.set_state	= hybla_state,
173 
174 	.owner		= THIS_MODULE,
175 	.name		= "hybla"
176 };
177 
hybla_register(void)178 static int __init hybla_register(void)
179 {
180 	BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
181 	return tcp_register_congestion_control(&tcp_hybla);
182 }
183 
hybla_unregister(void)184 static void __exit hybla_unregister(void)
185 {
186 	tcp_unregister_congestion_control(&tcp_hybla);
187 }
188 
189 module_init(hybla_register);
190 module_exit(hybla_unregister);
191 
192 MODULE_AUTHOR("Daniele Lacamera");
193 MODULE_LICENSE("GPL");
194 MODULE_DESCRIPTION("TCP Hybla");
195