xref: /linux/net/ipv4/tcp_nv.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2699fafafSLawrence Brakmo /*
3699fafafSLawrence Brakmo  * TCP NV: TCP with Congestion Avoidance
4699fafafSLawrence Brakmo  *
5699fafafSLawrence Brakmo  * TCP-NV is a successor of TCP-Vegas that has been developed to
6699fafafSLawrence Brakmo  * deal with the issues that occur in modern networks.
7699fafafSLawrence Brakmo  * Like TCP-Vegas, TCP-NV supports true congestion avoidance,
8699fafafSLawrence Brakmo  * the ability to detect congestion before packet losses occur.
9699fafafSLawrence Brakmo  * When congestion (queue buildup) starts to occur, TCP-NV
10699fafafSLawrence Brakmo  * predicts what the cwnd size should be for the current
11699fafafSLawrence Brakmo  * throughput and it reduces the cwnd proportionally to
12699fafafSLawrence Brakmo  * the difference between the current cwnd and the predicted cwnd.
13699fafafSLawrence Brakmo  *
14699fafafSLawrence Brakmo  * NV is only recommeneded for traffic within a data center, and when
15699fafafSLawrence Brakmo  * all the flows are NV (at least those within the data center). This
16699fafafSLawrence Brakmo  * is due to the inherent unfairness between flows using losses to
17699fafafSLawrence Brakmo  * detect congestion (congestion control) and those that use queue
18699fafafSLawrence Brakmo  * buildup to detect congestion (congestion avoidance).
19699fafafSLawrence Brakmo  *
20699fafafSLawrence Brakmo  * Note: High NIC coalescence values may lower the performance of NV
21699fafafSLawrence Brakmo  * due to the increased noise in RTT values. In particular, we have
22699fafafSLawrence Brakmo  * seen issues with rx-frames values greater than 8.
23699fafafSLawrence Brakmo  *
24699fafafSLawrence Brakmo  * TODO:
25699fafafSLawrence Brakmo  * 1) Add mechanism to deal with reverse congestion.
26699fafafSLawrence Brakmo  */
27699fafafSLawrence Brakmo 
28699fafafSLawrence Brakmo #include <linux/module.h>
29699fafafSLawrence Brakmo #include <linux/math64.h>
30699fafafSLawrence Brakmo #include <net/tcp.h>
31699fafafSLawrence Brakmo #include <linux/inet_diag.h>
32699fafafSLawrence Brakmo 
33699fafafSLawrence Brakmo /* TCP NV parameters
34699fafafSLawrence Brakmo  *
35699fafafSLawrence Brakmo  * nv_pad		Max number of queued packets allowed in network
36699fafafSLawrence Brakmo  * nv_pad_buffer	Do not grow cwnd if this closed to nv_pad
37699fafafSLawrence Brakmo  * nv_reset_period	How often (in) seconds)to reset min_rtt
38699fafafSLawrence Brakmo  * nv_min_cwnd		Don't decrease cwnd below this if there are no losses
39699fafafSLawrence Brakmo  * nv_cong_dec_mult	Decrease cwnd by X% (30%) of congestion when detected
40699fafafSLawrence Brakmo  * nv_ssthresh_factor	On congestion set ssthresh to this * <desired cwnd> / 8
41699fafafSLawrence Brakmo  * nv_rtt_factor	RTT averaging factor
4285cce215SLawrence Brakmo  * nv_loss_dec_factor	Decrease cwnd to this (80%) when losses occur
43699fafafSLawrence Brakmo  * nv_dec_eval_min_calls	Wait this many RTT measurements before dec cwnd
44699fafafSLawrence Brakmo  * nv_inc_eval_min_calls	Wait this many RTT measurements before inc cwnd
45699fafafSLawrence Brakmo  * nv_ssthresh_eval_min_calls	Wait this many RTT measurements before stopping
46699fafafSLawrence Brakmo  *				slow-start due to congestion
47699fafafSLawrence Brakmo  * nv_stop_rtt_cnt	Only grow cwnd for this many RTTs after non-congestion
48699fafafSLawrence Brakmo  * nv_rtt_min_cnt	Wait these many RTTs before making congesion decision
49699fafafSLawrence Brakmo  * nv_cwnd_growth_rate_neg
50699fafafSLawrence Brakmo  * nv_cwnd_growth_rate_pos
51699fafafSLawrence Brakmo  *	How quickly to double growth rate (not rate) of cwnd when not
52699fafafSLawrence Brakmo  *	congested. One value (nv_cwnd_growth_rate_neg) for when
53699fafafSLawrence Brakmo  *	rate < 1 pkt/RTT (after losses). The other (nv_cwnd_growth_rate_pos)
54699fafafSLawrence Brakmo  *	otherwise.
55699fafafSLawrence Brakmo  */
56699fafafSLawrence Brakmo 
57699fafafSLawrence Brakmo static int nv_pad __read_mostly = 10;
58699fafafSLawrence Brakmo static int nv_pad_buffer __read_mostly = 2;
59699fafafSLawrence Brakmo static int nv_reset_period __read_mostly = 5; /* in seconds */
60699fafafSLawrence Brakmo static int nv_min_cwnd __read_mostly = 2;
61699fafafSLawrence Brakmo static int nv_cong_dec_mult __read_mostly = 30 * 128 / 100; /* = 30% */
62699fafafSLawrence Brakmo static int nv_ssthresh_factor __read_mostly = 8; /* = 1 */
63699fafafSLawrence Brakmo static int nv_rtt_factor __read_mostly = 128; /* = 1/2*old + 1/2*new */
6485cce215SLawrence Brakmo static int nv_loss_dec_factor __read_mostly = 819; /* => 80% */
65699fafafSLawrence Brakmo static int nv_cwnd_growth_rate_neg __read_mostly = 8;
66699fafafSLawrence Brakmo static int nv_cwnd_growth_rate_pos __read_mostly; /* 0 => fixed like Reno */
67699fafafSLawrence Brakmo static int nv_dec_eval_min_calls __read_mostly = 60;
68699fafafSLawrence Brakmo static int nv_inc_eval_min_calls __read_mostly = 20;
69699fafafSLawrence Brakmo static int nv_ssthresh_eval_min_calls __read_mostly = 30;
70699fafafSLawrence Brakmo static int nv_stop_rtt_cnt __read_mostly = 10;
71699fafafSLawrence Brakmo static int nv_rtt_min_cnt __read_mostly = 2;
72699fafafSLawrence Brakmo 
73699fafafSLawrence Brakmo module_param(nv_pad, int, 0644);
74699fafafSLawrence Brakmo MODULE_PARM_DESC(nv_pad, "max queued packets allowed in network");
75699fafafSLawrence Brakmo module_param(nv_reset_period, int, 0644);
76699fafafSLawrence Brakmo MODULE_PARM_DESC(nv_reset_period, "nv_min_rtt reset period (secs)");
77699fafafSLawrence Brakmo module_param(nv_min_cwnd, int, 0644);
78699fafafSLawrence Brakmo MODULE_PARM_DESC(nv_min_cwnd, "NV will not decrease cwnd below this value"
79699fafafSLawrence Brakmo 		 " without losses");
80699fafafSLawrence Brakmo 
81699fafafSLawrence Brakmo /* TCP NV Parameters */
82699fafafSLawrence Brakmo struct tcpnv {
83699fafafSLawrence Brakmo 	unsigned long nv_min_rtt_reset_jiffies;  /* when to switch to
84699fafafSLawrence Brakmo 						  * nv_min_rtt_new */
85699fafafSLawrence Brakmo 	s8  cwnd_growth_factor;	/* Current cwnd growth factor,
86699fafafSLawrence Brakmo 				 * < 0 => less than 1 packet/RTT */
87699fafafSLawrence Brakmo 	u8  available8;
88699fafafSLawrence Brakmo 	u16 available16;
89699fafafSLawrence Brakmo 	u8  nv_allow_cwnd_growth:1, /* whether cwnd can grow */
90699fafafSLawrence Brakmo 		nv_reset:1,	    /* whether to reset values */
91699fafafSLawrence Brakmo 		nv_catchup:1;	    /* whether we are growing because
92699fafafSLawrence Brakmo 				     * of temporary cwnd decrease */
93699fafafSLawrence Brakmo 	u8  nv_eval_call_cnt;	/* call count since last eval */
94699fafafSLawrence Brakmo 	u8  nv_min_cwnd;	/* nv won't make a ca decision if cwnd is
95699fafafSLawrence Brakmo 				 * smaller than this. It may grow to handle
96699fafafSLawrence Brakmo 				 * TSO, LRO and interrupt coalescence because
97699fafafSLawrence Brakmo 				 * with these a small cwnd cannot saturate
98699fafafSLawrence Brakmo 				 * the link. Note that this is different from
99699fafafSLawrence Brakmo 				 * the file local nv_min_cwnd */
100699fafafSLawrence Brakmo 	u8  nv_rtt_cnt;		/* RTTs without making ca decision */;
101699fafafSLawrence Brakmo 	u32 nv_last_rtt;	/* last rtt */
102699fafafSLawrence Brakmo 	u32 nv_min_rtt;		/* active min rtt. Used to determine slope */
103699fafafSLawrence Brakmo 	u32 nv_min_rtt_new;	/* min rtt for future use */
10485cce215SLawrence Brakmo 	u32 nv_base_rtt;        /* If non-zero it represents the threshold for
10585cce215SLawrence Brakmo 				 * congestion */
10685cce215SLawrence Brakmo 	u32 nv_lower_bound_rtt; /* Used in conjunction with nv_base_rtt. It is
10785cce215SLawrence Brakmo 				 * set to 80% of nv_base_rtt. It helps reduce
10885cce215SLawrence Brakmo 				 * unfairness between flows */
109699fafafSLawrence Brakmo 	u32 nv_rtt_max_rate;	/* max rate seen during current RTT */
110699fafafSLawrence Brakmo 	u32 nv_rtt_start_seq;	/* current RTT ends when packet arrives
111699fafafSLawrence Brakmo 				 * acking beyond nv_rtt_start_seq */
112699fafafSLawrence Brakmo 	u32 nv_last_snd_una;	/* Previous value of tp->snd_una. It is
113699fafafSLawrence Brakmo 				 * used to determine bytes acked since last
114699fafafSLawrence Brakmo 				 * call to bictcp_acked */
115699fafafSLawrence Brakmo 	u32 nv_no_cong_cnt;	/* Consecutive no congestion decisions */
116699fafafSLawrence Brakmo };
117699fafafSLawrence Brakmo 
118699fafafSLawrence Brakmo #define NV_INIT_RTT	  U32_MAX
119699fafafSLawrence Brakmo #define NV_MIN_CWND	  4
120699fafafSLawrence Brakmo #define NV_MIN_CWND_GROW  2
121699fafafSLawrence Brakmo #define NV_TSO_CWND_BOUND 80
122699fafafSLawrence Brakmo 
tcpnv_reset(struct tcpnv * ca,struct sock * sk)123699fafafSLawrence Brakmo static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
124699fafafSLawrence Brakmo {
125699fafafSLawrence Brakmo 	struct tcp_sock *tp = tcp_sk(sk);
126699fafafSLawrence Brakmo 
127699fafafSLawrence Brakmo 	ca->nv_reset = 0;
128699fafafSLawrence Brakmo 	ca->nv_no_cong_cnt = 0;
129699fafafSLawrence Brakmo 	ca->nv_rtt_cnt = 0;
130699fafafSLawrence Brakmo 	ca->nv_last_rtt = 0;
131699fafafSLawrence Brakmo 	ca->nv_rtt_max_rate = 0;
132699fafafSLawrence Brakmo 	ca->nv_rtt_start_seq = tp->snd_una;
133699fafafSLawrence Brakmo 	ca->nv_eval_call_cnt = 0;
134699fafafSLawrence Brakmo 	ca->nv_last_snd_una = tp->snd_una;
135699fafafSLawrence Brakmo }
136699fafafSLawrence Brakmo 
tcpnv_init(struct sock * sk)137699fafafSLawrence Brakmo static void tcpnv_init(struct sock *sk)
138699fafafSLawrence Brakmo {
139699fafafSLawrence Brakmo 	struct tcpnv *ca = inet_csk_ca(sk);
14085cce215SLawrence Brakmo 	int base_rtt;
141699fafafSLawrence Brakmo 
142699fafafSLawrence Brakmo 	tcpnv_reset(ca, sk);
143699fafafSLawrence Brakmo 
14485cce215SLawrence Brakmo 	/* See if base_rtt is available from socket_ops bpf program.
14585cce215SLawrence Brakmo 	 * It is meant to be used in environments, such as communication
14685cce215SLawrence Brakmo 	 * within a datacenter, where we have reasonable estimates of
14785cce215SLawrence Brakmo 	 * RTTs
14885cce215SLawrence Brakmo 	 */
149de525be2SLawrence Brakmo 	base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT, 0, NULL);
15085cce215SLawrence Brakmo 	if (base_rtt > 0) {
15185cce215SLawrence Brakmo 		ca->nv_base_rtt = base_rtt;
15285cce215SLawrence Brakmo 		ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */
15385cce215SLawrence Brakmo 	} else {
15485cce215SLawrence Brakmo 		ca->nv_base_rtt = 0;
15585cce215SLawrence Brakmo 		ca->nv_lower_bound_rtt = 0;
15685cce215SLawrence Brakmo 	}
15785cce215SLawrence Brakmo 
158699fafafSLawrence Brakmo 	ca->nv_allow_cwnd_growth = 1;
159699fafafSLawrence Brakmo 	ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ;
160699fafafSLawrence Brakmo 	ca->nv_min_rtt = NV_INIT_RTT;
161699fafafSLawrence Brakmo 	ca->nv_min_rtt_new = NV_INIT_RTT;
162699fafafSLawrence Brakmo 	ca->nv_min_cwnd = NV_MIN_CWND;
163699fafafSLawrence Brakmo 	ca->nv_catchup = 0;
164699fafafSLawrence Brakmo 	ca->cwnd_growth_factor = 0;
165699fafafSLawrence Brakmo }
166699fafafSLawrence Brakmo 
16785cce215SLawrence Brakmo /* If provided, apply upper (base_rtt) and lower (lower_bound_rtt)
16885cce215SLawrence Brakmo  * bounds to RTT.
16985cce215SLawrence Brakmo  */
nv_get_bounded_rtt(struct tcpnv * ca,u32 val)17085cce215SLawrence Brakmo inline u32 nv_get_bounded_rtt(struct tcpnv *ca, u32 val)
17185cce215SLawrence Brakmo {
17285cce215SLawrence Brakmo 	if (ca->nv_lower_bound_rtt > 0 && val < ca->nv_lower_bound_rtt)
17385cce215SLawrence Brakmo 		return ca->nv_lower_bound_rtt;
17485cce215SLawrence Brakmo 	else if (ca->nv_base_rtt > 0 && val > ca->nv_base_rtt)
17585cce215SLawrence Brakmo 		return ca->nv_base_rtt;
17685cce215SLawrence Brakmo 	else
17785cce215SLawrence Brakmo 		return val;
17885cce215SLawrence Brakmo }
17985cce215SLawrence Brakmo 
tcpnv_cong_avoid(struct sock * sk,u32 ack,u32 acked)180699fafafSLawrence Brakmo static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
181699fafafSLawrence Brakmo {
182699fafafSLawrence Brakmo 	struct tcp_sock *tp = tcp_sk(sk);
183699fafafSLawrence Brakmo 	struct tcpnv *ca = inet_csk_ca(sk);
184699fafafSLawrence Brakmo 	u32 cnt;
185699fafafSLawrence Brakmo 
186699fafafSLawrence Brakmo 	if (!tcp_is_cwnd_limited(sk))
187699fafafSLawrence Brakmo 		return;
188699fafafSLawrence Brakmo 
189699fafafSLawrence Brakmo 	/* Only grow cwnd if NV has not detected congestion */
190699fafafSLawrence Brakmo 	if (!ca->nv_allow_cwnd_growth)
191699fafafSLawrence Brakmo 		return;
192699fafafSLawrence Brakmo 
193699fafafSLawrence Brakmo 	if (tcp_in_slow_start(tp)) {
194699fafafSLawrence Brakmo 		acked = tcp_slow_start(tp, acked);
195699fafafSLawrence Brakmo 		if (!acked)
196699fafafSLawrence Brakmo 			return;
197699fafafSLawrence Brakmo 	}
198699fafafSLawrence Brakmo 
199699fafafSLawrence Brakmo 	if (ca->cwnd_growth_factor < 0) {
200*40570375SEric Dumazet 		cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor;
201699fafafSLawrence Brakmo 		tcp_cong_avoid_ai(tp, cnt, acked);
202699fafafSLawrence Brakmo 	} else {
203*40570375SEric Dumazet 		cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor);
204699fafafSLawrence Brakmo 		tcp_cong_avoid_ai(tp, cnt, acked);
205699fafafSLawrence Brakmo 	}
206699fafafSLawrence Brakmo }
207699fafafSLawrence Brakmo 
tcpnv_recalc_ssthresh(struct sock * sk)208699fafafSLawrence Brakmo static u32 tcpnv_recalc_ssthresh(struct sock *sk)
209699fafafSLawrence Brakmo {
210699fafafSLawrence Brakmo 	const struct tcp_sock *tp = tcp_sk(sk);
211699fafafSLawrence Brakmo 
212*40570375SEric Dumazet 	return max((tcp_snd_cwnd(tp) * nv_loss_dec_factor) >> 10, 2U);
213699fafafSLawrence Brakmo }
214699fafafSLawrence Brakmo 
tcpnv_state(struct sock * sk,u8 new_state)215699fafafSLawrence Brakmo static void tcpnv_state(struct sock *sk, u8 new_state)
216699fafafSLawrence Brakmo {
217699fafafSLawrence Brakmo 	struct tcpnv *ca = inet_csk_ca(sk);
218699fafafSLawrence Brakmo 
219699fafafSLawrence Brakmo 	if (new_state == TCP_CA_Open && ca->nv_reset) {
220699fafafSLawrence Brakmo 		tcpnv_reset(ca, sk);
221699fafafSLawrence Brakmo 	} else if (new_state == TCP_CA_Loss || new_state == TCP_CA_CWR ||
222699fafafSLawrence Brakmo 		new_state == TCP_CA_Recovery) {
223699fafafSLawrence Brakmo 		ca->nv_reset = 1;
224699fafafSLawrence Brakmo 		ca->nv_allow_cwnd_growth = 0;
225699fafafSLawrence Brakmo 		if (new_state == TCP_CA_Loss) {
226699fafafSLawrence Brakmo 			/* Reset cwnd growth factor to Reno value */
227699fafafSLawrence Brakmo 			if (ca->cwnd_growth_factor > 0)
228699fafafSLawrence Brakmo 				ca->cwnd_growth_factor = 0;
229699fafafSLawrence Brakmo 			/* Decrease growth rate if allowed */
230699fafafSLawrence Brakmo 			if (nv_cwnd_growth_rate_neg > 0 &&
231699fafafSLawrence Brakmo 			    ca->cwnd_growth_factor > -8)
232699fafafSLawrence Brakmo 				ca->cwnd_growth_factor--;
233699fafafSLawrence Brakmo 		}
234699fafafSLawrence Brakmo 	}
235699fafafSLawrence Brakmo }
236699fafafSLawrence Brakmo 
237699fafafSLawrence Brakmo /* Do congestion avoidance calculations for TCP-NV
238699fafafSLawrence Brakmo  */
tcpnv_acked(struct sock * sk,const struct ack_sample * sample)239699fafafSLawrence Brakmo static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
240699fafafSLawrence Brakmo {
241699fafafSLawrence Brakmo 	const struct inet_connection_sock *icsk = inet_csk(sk);
242699fafafSLawrence Brakmo 	struct tcp_sock *tp = tcp_sk(sk);
243699fafafSLawrence Brakmo 	struct tcpnv *ca = inet_csk_ca(sk);
244699fafafSLawrence Brakmo 	unsigned long now = jiffies;
245991a26afSKonstantin Khlebnikov 	u64 rate64;
246699fafafSLawrence Brakmo 	u32 rate, max_win, cwnd_by_slope;
247699fafafSLawrence Brakmo 	u32 avg_rtt;
248699fafafSLawrence Brakmo 	u32 bytes_acked = 0;
249699fafafSLawrence Brakmo 
250699fafafSLawrence Brakmo 	/* Some calls are for duplicates without timetamps */
251699fafafSLawrence Brakmo 	if (sample->rtt_us < 0)
252699fafafSLawrence Brakmo 		return;
253699fafafSLawrence Brakmo 
254699fafafSLawrence Brakmo 	/* If not in TCP_CA_Open or TCP_CA_Disorder states, skip. */
255699fafafSLawrence Brakmo 	if (icsk->icsk_ca_state != TCP_CA_Open &&
256699fafafSLawrence Brakmo 	    icsk->icsk_ca_state != TCP_CA_Disorder)
257699fafafSLawrence Brakmo 		return;
258699fafafSLawrence Brakmo 
259699fafafSLawrence Brakmo 	/* Stop cwnd growth if we were in catch up mode */
260*40570375SEric Dumazet 	if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) {
261699fafafSLawrence Brakmo 		ca->nv_catchup = 0;
262699fafafSLawrence Brakmo 		ca->nv_allow_cwnd_growth = 0;
263699fafafSLawrence Brakmo 	}
264699fafafSLawrence Brakmo 
265699fafafSLawrence Brakmo 	bytes_acked = tp->snd_una - ca->nv_last_snd_una;
266699fafafSLawrence Brakmo 	ca->nv_last_snd_una = tp->snd_una;
267699fafafSLawrence Brakmo 
268699fafafSLawrence Brakmo 	if (sample->in_flight == 0)
269699fafafSLawrence Brakmo 		return;
270699fafafSLawrence Brakmo 
271699fafafSLawrence Brakmo 	/* Calculate moving average of RTT */
272699fafafSLawrence Brakmo 	if (nv_rtt_factor > 0) {
273699fafafSLawrence Brakmo 		if (ca->nv_last_rtt > 0) {
274699fafafSLawrence Brakmo 			avg_rtt = (((u64)sample->rtt_us) * nv_rtt_factor +
275699fafafSLawrence Brakmo 				   ((u64)ca->nv_last_rtt)
276699fafafSLawrence Brakmo 				   * (256 - nv_rtt_factor)) >> 8;
277699fafafSLawrence Brakmo 		} else {
278699fafafSLawrence Brakmo 			avg_rtt = sample->rtt_us;
279699fafafSLawrence Brakmo 			ca->nv_min_rtt = avg_rtt << 1;
280699fafafSLawrence Brakmo 		}
281699fafafSLawrence Brakmo 		ca->nv_last_rtt = avg_rtt;
282699fafafSLawrence Brakmo 	} else {
283699fafafSLawrence Brakmo 		avg_rtt = sample->rtt_us;
284699fafafSLawrence Brakmo 	}
285699fafafSLawrence Brakmo 
286699fafafSLawrence Brakmo 	/* rate in 100's bits per second */
287991a26afSKonstantin Khlebnikov 	rate64 = ((u64)sample->in_flight) * 80000;
288991a26afSKonstantin Khlebnikov 	do_div(rate64, avg_rtt ?: 1);
289991a26afSKonstantin Khlebnikov 	rate = (u32)rate64;
290699fafafSLawrence Brakmo 
291699fafafSLawrence Brakmo 	/* Remember the maximum rate seen during this RTT
292699fafafSLawrence Brakmo 	 * Note: It may be more than one RTT. This function should be
293699fafafSLawrence Brakmo 	 *       called at least nv_dec_eval_min_calls times.
294699fafafSLawrence Brakmo 	 */
295699fafafSLawrence Brakmo 	if (ca->nv_rtt_max_rate < rate)
296699fafafSLawrence Brakmo 		ca->nv_rtt_max_rate = rate;
297699fafafSLawrence Brakmo 
298699fafafSLawrence Brakmo 	/* We have valid information, increment counter */
299699fafafSLawrence Brakmo 	if (ca->nv_eval_call_cnt < 255)
300699fafafSLawrence Brakmo 		ca->nv_eval_call_cnt++;
301699fafafSLawrence Brakmo 
30285cce215SLawrence Brakmo 	/* Apply bounds to rtt. Only used to update min_rtt */
30385cce215SLawrence Brakmo 	avg_rtt = nv_get_bounded_rtt(ca, avg_rtt);
30485cce215SLawrence Brakmo 
305699fafafSLawrence Brakmo 	/* update min rtt if necessary */
306699fafafSLawrence Brakmo 	if (avg_rtt < ca->nv_min_rtt)
307699fafafSLawrence Brakmo 		ca->nv_min_rtt = avg_rtt;
308699fafafSLawrence Brakmo 
309699fafafSLawrence Brakmo 	/* update future min_rtt if necessary */
310699fafafSLawrence Brakmo 	if (avg_rtt < ca->nv_min_rtt_new)
311699fafafSLawrence Brakmo 		ca->nv_min_rtt_new = avg_rtt;
312699fafafSLawrence Brakmo 
313699fafafSLawrence Brakmo 	/* nv_min_rtt is updated with the minimum (possibley averaged) rtt
314699fafafSLawrence Brakmo 	 * seen in the last sysctl_tcp_nv_reset_period seconds (i.e. a
315699fafafSLawrence Brakmo 	 * warm reset). This new nv_min_rtt will be continued to be updated
316699fafafSLawrence Brakmo 	 * and be used for another sysctl_tcp_nv_reset_period seconds,
317699fafafSLawrence Brakmo 	 * when it will be updated again.
318699fafafSLawrence Brakmo 	 * In practice we introduce some randomness, so the actual period used
319699fafafSLawrence Brakmo 	 * is chosen randomly from the range:
320699fafafSLawrence Brakmo 	 *   [sysctl_tcp_nv_reset_period*3/4, sysctl_tcp_nv_reset_period*5/4)
321699fafafSLawrence Brakmo 	 */
322699fafafSLawrence Brakmo 	if (time_after_eq(now, ca->nv_min_rtt_reset_jiffies)) {
323699fafafSLawrence Brakmo 		unsigned char rand;
324699fafafSLawrence Brakmo 
325699fafafSLawrence Brakmo 		ca->nv_min_rtt = ca->nv_min_rtt_new;
326699fafafSLawrence Brakmo 		ca->nv_min_rtt_new = NV_INIT_RTT;
327699fafafSLawrence Brakmo 		get_random_bytes(&rand, 1);
328699fafafSLawrence Brakmo 		ca->nv_min_rtt_reset_jiffies =
329699fafafSLawrence Brakmo 			now + ((nv_reset_period * (384 + rand) * HZ) >> 9);
330699fafafSLawrence Brakmo 		/* Every so often we decrease ca->nv_min_cwnd in case previous
331699fafafSLawrence Brakmo 		 *  value is no longer accurate.
332699fafafSLawrence Brakmo 		 */
333699fafafSLawrence Brakmo 		ca->nv_min_cwnd = max(ca->nv_min_cwnd / 2, NV_MIN_CWND);
334699fafafSLawrence Brakmo 	}
335699fafafSLawrence Brakmo 
336699fafafSLawrence Brakmo 	/* Once per RTT check if we need to do congestion avoidance */
337699fafafSLawrence Brakmo 	if (before(ca->nv_rtt_start_seq, tp->snd_una)) {
338699fafafSLawrence Brakmo 		ca->nv_rtt_start_seq = tp->snd_nxt;
339699fafafSLawrence Brakmo 		if (ca->nv_rtt_cnt < 0xff)
340699fafafSLawrence Brakmo 			/* Increase counter for RTTs without CA decision */
341699fafafSLawrence Brakmo 			ca->nv_rtt_cnt++;
342699fafafSLawrence Brakmo 
343699fafafSLawrence Brakmo 		/* If this function is only called once within an RTT
344699fafafSLawrence Brakmo 		 * the cwnd is probably too small (in some cases due to
345699fafafSLawrence Brakmo 		 * tso, lro or interrupt coalescence), so we increase
346699fafafSLawrence Brakmo 		 * ca->nv_min_cwnd.
347699fafafSLawrence Brakmo 		 */
348699fafafSLawrence Brakmo 		if (ca->nv_eval_call_cnt == 1 &&
349699fafafSLawrence Brakmo 		    bytes_acked >= (ca->nv_min_cwnd - 1) * tp->mss_cache &&
350699fafafSLawrence Brakmo 		    ca->nv_min_cwnd < (NV_TSO_CWND_BOUND + 1)) {
351699fafafSLawrence Brakmo 			ca->nv_min_cwnd = min(ca->nv_min_cwnd
352699fafafSLawrence Brakmo 					      + NV_MIN_CWND_GROW,
353699fafafSLawrence Brakmo 					      NV_TSO_CWND_BOUND + 1);
354699fafafSLawrence Brakmo 			ca->nv_rtt_start_seq = tp->snd_nxt +
355699fafafSLawrence Brakmo 				ca->nv_min_cwnd * tp->mss_cache;
356699fafafSLawrence Brakmo 			ca->nv_eval_call_cnt = 0;
357699fafafSLawrence Brakmo 			ca->nv_allow_cwnd_growth = 1;
358699fafafSLawrence Brakmo 			return;
359699fafafSLawrence Brakmo 		}
360699fafafSLawrence Brakmo 
361699fafafSLawrence Brakmo 		/* Find the ideal cwnd for current rate from slope
362699fafafSLawrence Brakmo 		 * slope = 80000.0 * mss / nv_min_rtt
363699fafafSLawrence Brakmo 		 * cwnd_by_slope = nv_rtt_max_rate / slope
364699fafafSLawrence Brakmo 		 */
365699fafafSLawrence Brakmo 		cwnd_by_slope = (u32)
366699fafafSLawrence Brakmo 			div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
367e4823fbdSGustavo A. R. Silva 				  80000ULL * tp->mss_cache);
368699fafafSLawrence Brakmo 		max_win = cwnd_by_slope + nv_pad;
369699fafafSLawrence Brakmo 
370699fafafSLawrence Brakmo 		/* If cwnd > max_win, decrease cwnd
371699fafafSLawrence Brakmo 		 * if cwnd < max_win, grow cwnd
372699fafafSLawrence Brakmo 		 * else leave the same
373699fafafSLawrence Brakmo 		 */
374*40570375SEric Dumazet 		if (tcp_snd_cwnd(tp) > max_win) {
375699fafafSLawrence Brakmo 			/* there is congestion, check that it is ok
376699fafafSLawrence Brakmo 			 * to make a CA decision
377699fafafSLawrence Brakmo 			 * 1. We should have at least nv_dec_eval_min_calls
378699fafafSLawrence Brakmo 			 *    data points before making a CA  decision
379699fafafSLawrence Brakmo 			 * 2. We only make a congesion decision after
380699fafafSLawrence Brakmo 			 *    nv_rtt_min_cnt RTTs
381699fafafSLawrence Brakmo 			 */
382699fafafSLawrence Brakmo 			if (ca->nv_rtt_cnt < nv_rtt_min_cnt) {
383699fafafSLawrence Brakmo 				return;
384699fafafSLawrence Brakmo 			} else if (tp->snd_ssthresh == TCP_INFINITE_SSTHRESH) {
385699fafafSLawrence Brakmo 				if (ca->nv_eval_call_cnt <
386699fafafSLawrence Brakmo 				    nv_ssthresh_eval_min_calls)
387699fafafSLawrence Brakmo 					return;
388699fafafSLawrence Brakmo 				/* otherwise we will decrease cwnd */
389699fafafSLawrence Brakmo 			} else if (ca->nv_eval_call_cnt <
390699fafafSLawrence Brakmo 				   nv_dec_eval_min_calls) {
391699fafafSLawrence Brakmo 				if (ca->nv_allow_cwnd_growth &&
392699fafafSLawrence Brakmo 				    ca->nv_rtt_cnt > nv_stop_rtt_cnt)
393699fafafSLawrence Brakmo 					ca->nv_allow_cwnd_growth = 0;
394699fafafSLawrence Brakmo 				return;
395699fafafSLawrence Brakmo 			}
396699fafafSLawrence Brakmo 
397699fafafSLawrence Brakmo 			/* We have enough data to determine we are congested */
398699fafafSLawrence Brakmo 			ca->nv_allow_cwnd_growth = 0;
399699fafafSLawrence Brakmo 			tp->snd_ssthresh =
400699fafafSLawrence Brakmo 				(nv_ssthresh_factor * max_win) >> 3;
401*40570375SEric Dumazet 			if (tcp_snd_cwnd(tp) - max_win > 2) {
402699fafafSLawrence Brakmo 				/* gap > 2, we do exponential cwnd decrease */
403699fafafSLawrence Brakmo 				int dec;
404699fafafSLawrence Brakmo 
405*40570375SEric Dumazet 				dec = max(2U, ((tcp_snd_cwnd(tp) - max_win) *
406699fafafSLawrence Brakmo 					       nv_cong_dec_mult) >> 7);
407*40570375SEric Dumazet 				tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec);
408699fafafSLawrence Brakmo 			} else if (nv_cong_dec_mult > 0) {
409*40570375SEric Dumazet 				tcp_snd_cwnd_set(tp, max_win);
410699fafafSLawrence Brakmo 			}
411699fafafSLawrence Brakmo 			if (ca->cwnd_growth_factor > 0)
412699fafafSLawrence Brakmo 				ca->cwnd_growth_factor = 0;
413699fafafSLawrence Brakmo 			ca->nv_no_cong_cnt = 0;
414*40570375SEric Dumazet 		} else if (tcp_snd_cwnd(tp) <= max_win - nv_pad_buffer) {
415699fafafSLawrence Brakmo 			/* There is no congestion, grow cwnd if allowed*/
416699fafafSLawrence Brakmo 			if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
417699fafafSLawrence Brakmo 				return;
418699fafafSLawrence Brakmo 
419699fafafSLawrence Brakmo 			ca->nv_allow_cwnd_growth = 1;
420699fafafSLawrence Brakmo 			ca->nv_no_cong_cnt++;
421699fafafSLawrence Brakmo 			if (ca->cwnd_growth_factor < 0 &&
422699fafafSLawrence Brakmo 			    nv_cwnd_growth_rate_neg > 0 &&
423699fafafSLawrence Brakmo 			    ca->nv_no_cong_cnt > nv_cwnd_growth_rate_neg) {
424699fafafSLawrence Brakmo 				ca->cwnd_growth_factor++;
425699fafafSLawrence Brakmo 				ca->nv_no_cong_cnt = 0;
426699fafafSLawrence Brakmo 			} else if (ca->cwnd_growth_factor >= 0 &&
427699fafafSLawrence Brakmo 				   nv_cwnd_growth_rate_pos > 0 &&
428699fafafSLawrence Brakmo 				   ca->nv_no_cong_cnt >
429699fafafSLawrence Brakmo 				   nv_cwnd_growth_rate_pos) {
430699fafafSLawrence Brakmo 				ca->cwnd_growth_factor++;
431699fafafSLawrence Brakmo 				ca->nv_no_cong_cnt = 0;
432699fafafSLawrence Brakmo 			}
433699fafafSLawrence Brakmo 		} else {
434699fafafSLawrence Brakmo 			/* cwnd is in-between, so do nothing */
435699fafafSLawrence Brakmo 			return;
436699fafafSLawrence Brakmo 		}
437699fafafSLawrence Brakmo 
438699fafafSLawrence Brakmo 		/* update state */
439699fafafSLawrence Brakmo 		ca->nv_eval_call_cnt = 0;
440699fafafSLawrence Brakmo 		ca->nv_rtt_cnt = 0;
441699fafafSLawrence Brakmo 		ca->nv_rtt_max_rate = 0;
442699fafafSLawrence Brakmo 
443699fafafSLawrence Brakmo 		/* Don't want to make cwnd < nv_min_cwnd
444699fafafSLawrence Brakmo 		 * (it wasn't before, if it is now is because nv
445699fafafSLawrence Brakmo 		 *  decreased it).
446699fafafSLawrence Brakmo 		 */
447*40570375SEric Dumazet 		if (tcp_snd_cwnd(tp) < nv_min_cwnd)
448*40570375SEric Dumazet 			tcp_snd_cwnd_set(tp, nv_min_cwnd);
449699fafafSLawrence Brakmo 	}
450699fafafSLawrence Brakmo }
451699fafafSLawrence Brakmo 
452699fafafSLawrence Brakmo /* Extract info for Tcp socket info provided via netlink */
tcpnv_get_info(struct sock * sk,u32 ext,int * attr,union tcp_cc_info * info)453c718c6d6Sstephen hemminger static size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
454699fafafSLawrence Brakmo 			     union tcp_cc_info *info)
455699fafafSLawrence Brakmo {
456699fafafSLawrence Brakmo 	const struct tcpnv *ca = inet_csk_ca(sk);
457699fafafSLawrence Brakmo 
458699fafafSLawrence Brakmo 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
459699fafafSLawrence Brakmo 		info->vegas.tcpv_enabled = 1;
460699fafafSLawrence Brakmo 		info->vegas.tcpv_rttcnt = ca->nv_rtt_cnt;
461699fafafSLawrence Brakmo 		info->vegas.tcpv_rtt = ca->nv_last_rtt;
462699fafafSLawrence Brakmo 		info->vegas.tcpv_minrtt = ca->nv_min_rtt;
463699fafafSLawrence Brakmo 
464699fafafSLawrence Brakmo 		*attr = INET_DIAG_VEGASINFO;
465699fafafSLawrence Brakmo 		return sizeof(struct tcpvegas_info);
466699fafafSLawrence Brakmo 	}
467699fafafSLawrence Brakmo 	return 0;
468699fafafSLawrence Brakmo }
469699fafafSLawrence Brakmo 
470699fafafSLawrence Brakmo static struct tcp_congestion_ops tcpnv __read_mostly = {
471699fafafSLawrence Brakmo 	.init		= tcpnv_init,
472699fafafSLawrence Brakmo 	.ssthresh	= tcpnv_recalc_ssthresh,
473699fafafSLawrence Brakmo 	.cong_avoid	= tcpnv_cong_avoid,
474699fafafSLawrence Brakmo 	.set_state	= tcpnv_state,
475f1722a1bSYuchung Cheng 	.undo_cwnd	= tcp_reno_undo_cwnd,
476699fafafSLawrence Brakmo 	.pkts_acked     = tcpnv_acked,
477699fafafSLawrence Brakmo 	.get_info	= tcpnv_get_info,
478699fafafSLawrence Brakmo 
479699fafafSLawrence Brakmo 	.owner		= THIS_MODULE,
480699fafafSLawrence Brakmo 	.name		= "nv",
481699fafafSLawrence Brakmo };
482699fafafSLawrence Brakmo 
tcpnv_register(void)483699fafafSLawrence Brakmo static int __init tcpnv_register(void)
484699fafafSLawrence Brakmo {
485699fafafSLawrence Brakmo 	BUILD_BUG_ON(sizeof(struct tcpnv) > ICSK_CA_PRIV_SIZE);
486699fafafSLawrence Brakmo 
487699fafafSLawrence Brakmo 	return tcp_register_congestion_control(&tcpnv);
488699fafafSLawrence Brakmo }
489699fafafSLawrence Brakmo 
tcpnv_unregister(void)490699fafafSLawrence Brakmo static void __exit tcpnv_unregister(void)
491699fafafSLawrence Brakmo {
492699fafafSLawrence Brakmo 	tcp_unregister_congestion_control(&tcpnv);
493699fafafSLawrence Brakmo }
494699fafafSLawrence Brakmo 
495699fafafSLawrence Brakmo module_init(tcpnv_register);
496699fafafSLawrence Brakmo module_exit(tcpnv_unregister);
497699fafafSLawrence Brakmo 
498699fafafSLawrence Brakmo MODULE_AUTHOR("Lawrence Brakmo");
499699fafafSLawrence Brakmo MODULE_LICENSE("GPL");
500699fafafSLawrence Brakmo MODULE_DESCRIPTION("TCP NV");
501699fafafSLawrence Brakmo MODULE_VERSION("1.0");
502