xref: /linux/net/dccp/ccids/ccid3.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *  Copyright (c) 2007   The University of Aberdeen, Scotland, UK
3  *  Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
4  *  Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
5  *
6  *  An implementation of the DCCP protocol
7  *
8  *  This code has been developed by the University of Waikato WAND
9  *  research group. For further information please see http://www.wand.net.nz/
10  *
11  *  This code also uses code from Lulea University, rereleased as GPL by its
12  *  authors:
13  *  Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
14  *
15  *  Changes to meet Linux coding standards, to make it meet latest ccid3 draft
16  *  and to make it work as a loadable module in the DCCP stack written by
17  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
18  *
19  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
20  *
21  *  This program is free software; you can redistribute it and/or modify
22  *  it under the terms of the GNU General Public License as published by
23  *  the Free Software Foundation; either version 2 of the License, or
24  *  (at your option) any later version.
25  *
26  *  This program is distributed in the hope that it will be useful,
27  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
28  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
29  *  GNU General Public License for more details.
30  *
31  *  You should have received a copy of the GNU General Public License
32  *  along with this program; if not, write to the Free Software
33  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35 #include "../dccp.h"
36 #include "ccid3.h"
37 
38 #include <asm/unaligned.h>
39 
40 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
41 static bool ccid3_debug;
42 #define ccid3_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid3_debug, format, ##a)
43 #else
44 #define ccid3_pr_debug(format, a...)
45 #endif
46 
47 /*
48  *	Transmitter Half-Connection Routines
49  */
50 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
51 static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
52 {
53 	static const char *const ccid3_state_names[] = {
54 	[TFRC_SSTATE_NO_SENT]  = "NO_SENT",
55 	[TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
56 	[TFRC_SSTATE_FBACK]    = "FBACK",
57 	};
58 
59 	return ccid3_state_names[state];
60 }
61 #endif
62 
63 static void ccid3_hc_tx_set_state(struct sock *sk,
64 				  enum ccid3_hc_tx_states state)
65 {
66 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
67 	enum ccid3_hc_tx_states oldstate = hc->tx_state;
68 
69 	ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
70 		       dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
71 		       ccid3_tx_state_name(state));
72 	WARN_ON(state == oldstate);
73 	hc->tx_state = state;
74 }
75 
76 /*
77  * Compute the initial sending rate X_init in the manner of RFC 3390:
78  *
79  *	X_init  =  min(4 * s, max(2 * s, 4380 bytes)) / RTT
80  *
81  * Note that RFC 3390 uses MSS, RFC 4342 refers to RFC 3390, and rfc3448bis
82  * (rev-02) clarifies the use of RFC 3390 with regard to the above formula.
83  * For consistency with other parts of the code, X_init is scaled by 2^6.
84  */
85 static inline u64 rfc3390_initial_rate(struct sock *sk)
86 {
87 	const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
88 	const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
89 
90 	return scaled_div(w_init << 6, hc->tx_rtt);
91 }
92 
93 /**
94  * ccid3_update_send_interval  -  Calculate new t_ipi = s / X_inst
95  * This respects the granularity of X_inst (64 * bytes/second).
96  */
97 static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
98 {
99 	hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
100 
101 	DCCP_BUG_ON(hc->tx_t_ipi == 0);
102 	ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
103 		       hc->tx_s, (unsigned int)(hc->tx_x >> 6));
104 }
105 
106 static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
107 {
108 	u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
109 
110 	return delta / hc->tx_rtt;
111 }
112 
113 /**
114  * ccid3_hc_tx_update_x  -  Update allowed sending rate X
115  * @stamp: most recent time if available - can be left NULL.
116  *
117  * This function tracks draft rfc3448bis, check there for latest details.
118  *
119  * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
120  *       fine-grained resolution of sending rates. This requires scaling by 2^6
121  *       throughout the code. Only X_calc is unscaled (in bytes/second).
122  *
123  */
124 static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
125 {
126 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
127 	__u64 min_rate = 2 * hc->tx_x_recv;
128 	const __u64 old_x = hc->tx_x;
129 	ktime_t now = stamp ? *stamp : ktime_get_real();
130 
131 	/*
132 	 * Handle IDLE periods: do not reduce below RFC3390 initial sending rate
133 	 * when idling [RFC 4342, 5.1]. Definition of idling is from rfc3448bis:
134 	 * a sender is idle if it has not sent anything over a 2-RTT-period.
135 	 * For consistency with X and X_recv, min_rate is also scaled by 2^6.
136 	 */
137 	if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) {
138 		min_rate = rfc3390_initial_rate(sk);
139 		min_rate = max(min_rate, 2 * hc->tx_x_recv);
140 	}
141 
142 	if (hc->tx_p > 0) {
143 
144 		hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate);
145 		hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
146 
147 	} else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) {
148 
149 		hc->tx_x = min(2 * hc->tx_x, min_rate);
150 		hc->tx_x = max(hc->tx_x,
151 			       scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt));
152 		hc->tx_t_ld = now;
153 	}
154 
155 	if (hc->tx_x != old_x) {
156 		ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
157 			       "X_recv=%u\n", (unsigned int)(old_x >> 6),
158 			       (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
159 			       (unsigned int)(hc->tx_x_recv >> 6));
160 
161 		ccid3_update_send_interval(hc);
162 	}
163 }
164 
165 /**
166  *	ccid3_hc_tx_update_s - Track the mean packet size `s'
167  *	@len: DCCP packet payload size in bytes
168  *
169  *	cf. RFC 4342, 5.3 and  RFC 3448, 4.1
170  */
171 static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
172 {
173 	const u16 old_s = hc->tx_s;
174 
175 	hc->tx_s = tfrc_ewma(hc->tx_s, len, 9);
176 
177 	if (hc->tx_s != old_s)
178 		ccid3_update_send_interval(hc);
179 }
180 
181 /*
182  *	Update Window Counter using the algorithm from [RFC 4342, 8.1].
183  *	As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt().
184  */
185 static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc,
186 						ktime_t now)
187 {
188 	u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count),
189 	    quarter_rtts = (4 * delta) / hc->tx_rtt;
190 
191 	if (quarter_rtts > 0) {
192 		hc->tx_t_last_win_count = now;
193 		hc->tx_last_win_count  += min(quarter_rtts, 5U);
194 		hc->tx_last_win_count  &= 0xF;		/* mod 16 */
195 	}
196 }
197 
198 static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
199 {
200 	struct sock *sk = (struct sock *)data;
201 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
202 	unsigned long t_nfb = USEC_PER_SEC / 5;
203 
204 	bh_lock_sock(sk);
205 	if (sock_owned_by_user(sk)) {
206 		/* Try again later. */
207 		/* XXX: set some sensible MIB */
208 		goto restart_timer;
209 	}
210 
211 	ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
212 		       ccid3_tx_state_name(hc->tx_state));
213 
214 	/* Ignore and do not restart after leaving the established state */
215 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
216 		goto out;
217 
218 	/* Reset feedback state to "no feedback received" */
219 	if (hc->tx_state == TFRC_SSTATE_FBACK)
220 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
221 
222 	/*
223 	 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
224 	 * RTO is 0 if and only if no feedback has been received yet.
225 	 */
226 	if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
227 
228 		/* halve send rate directly */
229 		hc->tx_x = max(hc->tx_x / 2,
230 			       (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
231 		ccid3_update_send_interval(hc);
232 	} else {
233 		/*
234 		 *  Modify the cached value of X_recv
235 		 *
236 		 *  If (X_calc > 2 * X_recv)
237 		 *    X_recv = max(X_recv / 2, s / (2 * t_mbi));
238 		 *  Else
239 		 *    X_recv = X_calc / 4;
240 		 *
241 		 *  Note that X_recv is scaled by 2^6 while X_calc is not
242 		 */
243 		if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
244 			hc->tx_x_recv =
245 				max(hc->tx_x_recv / 2,
246 				    (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI));
247 		else {
248 			hc->tx_x_recv = hc->tx_x_calc;
249 			hc->tx_x_recv <<= 4;
250 		}
251 		ccid3_hc_tx_update_x(sk, NULL);
252 	}
253 	ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n",
254 			(unsigned long long)hc->tx_x);
255 
256 	/*
257 	 * Set new timeout for the nofeedback timer.
258 	 * See comments in packet_recv() regarding the value of t_RTO.
259 	 */
260 	if (unlikely(hc->tx_t_rto == 0))	/* no feedback received yet */
261 		t_nfb = TFRC_INITIAL_TIMEOUT;
262 	else
263 		t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
264 
265 restart_timer:
266 	sk_reset_timer(sk, &hc->tx_no_feedback_timer,
267 			   jiffies + usecs_to_jiffies(t_nfb));
268 out:
269 	bh_unlock_sock(sk);
270 	sock_put(sk);
271 }
272 
273 /**
274  * ccid3_hc_tx_send_packet  -  Delay-based dequeueing of TX packets
275  * @skb: next packet candidate to send on @sk
276  *
277  * This function uses the convention of ccid_packet_dequeue_eval() and
278  * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
279  */
280 static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
281 {
282 	struct dccp_sock *dp = dccp_sk(sk);
283 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
284 	ktime_t now = ktime_get_real();
285 	s64 delay;
286 
287 	/*
288 	 * This function is called only for Data and DataAck packets. Sending
289 	 * zero-sized Data(Ack)s is theoretically possible, but for congestion
290 	 * control this case is pathological - ignore it.
291 	 */
292 	if (unlikely(skb->len == 0))
293 		return -EBADMSG;
294 
295 	if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
296 		sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
297 			       usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
298 		hc->tx_last_win_count	= 0;
299 		hc->tx_t_last_win_count = now;
300 
301 		/* Set t_0 for initial packet */
302 		hc->tx_t_nom = now;
303 
304 		hc->tx_s = skb->len;
305 
306 		/*
307 		 * Use initial RTT sample when available: recommended by erratum
308 		 * to RFC 4342. This implements the initialisation procedure of
309 		 * draft rfc3448bis, section 4.2. Remember, X is scaled by 2^6.
310 		 */
311 		if (dp->dccps_syn_rtt) {
312 			ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
313 			hc->tx_rtt  = dp->dccps_syn_rtt;
314 			hc->tx_x    = rfc3390_initial_rate(sk);
315 			hc->tx_t_ld = now;
316 		} else {
317 			/*
318 			 * Sender does not have RTT sample:
319 			 * - set fallback RTT (RFC 4340, 3.4) since a RTT value
320 			 *   is needed in several parts (e.g.  window counter);
321 			 * - set sending rate X_pps = 1pps as per RFC 3448, 4.2.
322 			 */
323 			hc->tx_rtt = DCCP_FALLBACK_RTT;
324 			hc->tx_x   = hc->tx_s;
325 			hc->tx_x <<= 6;
326 		}
327 		ccid3_update_send_interval(hc);
328 
329 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
330 
331 	} else {
332 		delay = ktime_us_delta(hc->tx_t_nom, now);
333 		ccid3_pr_debug("delay=%ld\n", (long)delay);
334 		/*
335 		 *	Scheduling of packet transmissions (RFC 5348, 8.3)
336 		 *
337 		 * if (t_now > t_nom - delta)
338 		 *       // send the packet now
339 		 * else
340 		 *       // send the packet in (t_nom - t_now) milliseconds.
341 		 */
342 		if (delay >= TFRC_T_DELTA)
343 			return (u32)delay / USEC_PER_MSEC;
344 
345 		ccid3_hc_tx_update_win_count(hc, now);
346 	}
347 
348 	/* prepare to send now (add options etc.) */
349 	dp->dccps_hc_tx_insert_options = 1;
350 	DCCP_SKB_CB(skb)->dccpd_ccval  = hc->tx_last_win_count;
351 
352 	/* set the nominal send time for the next following packet */
353 	hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
354 	return CCID_PACKET_SEND_AT_ONCE;
355 }
356 
357 static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
358 {
359 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
360 
361 	ccid3_hc_tx_update_s(hc, len);
362 
363 	if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
364 		DCCP_CRIT("packet history - out of memory!");
365 }
366 
367 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
368 {
369 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
370 	struct tfrc_tx_hist_entry *acked;
371 	ktime_t now;
372 	unsigned long t_nfb;
373 	u32 r_sample;
374 
375 	/* we are only interested in ACKs */
376 	if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
377 	      DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
378 		return;
379 	/*
380 	 * Locate the acknowledged packet in the TX history.
381 	 *
382 	 * Returning "entry not found" here can for instance happen when
383 	 *  - the host has not sent out anything (e.g. a passive server),
384 	 *  - the Ack is outdated (packet with higher Ack number was received),
385 	 *  - it is a bogus Ack (for a packet not sent on this connection).
386 	 */
387 	acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
388 	if (acked == NULL)
389 		return;
390 	/* For the sake of RTT sampling, ignore/remove all older entries */
391 	tfrc_tx_hist_purge(&acked->next);
392 
393 	/* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
394 	now	  = ktime_get_real();
395 	r_sample  = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
396 	hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
397 
398 	/*
399 	 * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
400 	 */
401 	if (hc->tx_state == TFRC_SSTATE_NO_FBACK) {
402 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
403 
404 		if (hc->tx_t_rto == 0) {
405 			/*
406 			 * Initial feedback packet: Larger Initial Windows (4.2)
407 			 */
408 			hc->tx_x    = rfc3390_initial_rate(sk);
409 			hc->tx_t_ld = now;
410 
411 			ccid3_update_send_interval(hc);
412 
413 			goto done_computing_x;
414 		} else if (hc->tx_p == 0) {
415 			/*
416 			 * First feedback after nofeedback timer expiry (4.3)
417 			 */
418 			goto done_computing_x;
419 		}
420 	}
421 
422 	/* Update sending rate (step 4 of [RFC 3448, 4.3]) */
423 	if (hc->tx_p > 0)
424 		hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p);
425 	ccid3_hc_tx_update_x(sk, &now);
426 
427 done_computing_x:
428 	ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, "
429 			       "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
430 			       dccp_role(sk), sk, hc->tx_rtt, r_sample,
431 			       hc->tx_s, hc->tx_p, hc->tx_x_calc,
432 			       (unsigned int)(hc->tx_x_recv >> 6),
433 			       (unsigned int)(hc->tx_x >> 6));
434 
435 	/* unschedule no feedback timer */
436 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
437 
438 	/*
439 	 * As we have calculated new ipi, delta, t_nom it is possible
440 	 * that we now can send a packet, so wake up dccp_wait_for_ccid
441 	 */
442 	sk->sk_write_space(sk);
443 
444 	/*
445 	 * Update timeout interval for the nofeedback timer. In order to control
446 	 * rate halving on networks with very low RTTs (<= 1 ms), use per-route
447 	 * tunable RTAX_RTO_MIN value as the lower bound.
448 	 */
449 	hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
450 				  USEC_PER_SEC/HZ * tcp_rto_min(sk));
451 	/*
452 	 * Schedule no feedback timer to expire in
453 	 * max(t_RTO, 2 * s/X)  =  max(t_RTO, 2 * t_ipi)
454 	 */
455 	t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
456 
457 	ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
458 		       "expire in %lu jiffies (%luus)\n",
459 		       dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb);
460 
461 	sk_reset_timer(sk, &hc->tx_no_feedback_timer,
462 			   jiffies + usecs_to_jiffies(t_nfb));
463 }
464 
465 static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
466 				     u8 option, u8 *optval, u8 optlen)
467 {
468 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
469 	__be32 opt_val;
470 
471 	switch (option) {
472 	case TFRC_OPT_RECEIVE_RATE:
473 	case TFRC_OPT_LOSS_EVENT_RATE:
474 		/* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
475 		if (packet_type == DCCP_PKT_DATA)
476 			break;
477 		if (unlikely(optlen != 4)) {
478 			DCCP_WARN("%s(%p), invalid len %d for %u\n",
479 				  dccp_role(sk), sk, optlen, option);
480 			return -EINVAL;
481 		}
482 		opt_val = ntohl(get_unaligned((__be32 *)optval));
483 
484 		if (option == TFRC_OPT_RECEIVE_RATE) {
485 			/* Receive Rate is kept in units of 64 bytes/second */
486 			hc->tx_x_recv = opt_val;
487 			hc->tx_x_recv <<= 6;
488 
489 			ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
490 				       dccp_role(sk), sk, opt_val);
491 		} else {
492 			/* Update the fixpoint Loss Event Rate fraction */
493 			hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
494 
495 			ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
496 				       dccp_role(sk), sk, opt_val);
497 		}
498 	}
499 	return 0;
500 }
501 
502 static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
503 {
504 	struct ccid3_hc_tx_sock *hc = ccid_priv(ccid);
505 
506 	hc->tx_state = TFRC_SSTATE_NO_SENT;
507 	hc->tx_hist  = NULL;
508 	setup_timer(&hc->tx_no_feedback_timer,
509 			ccid3_hc_tx_no_feedback_timer, (unsigned long)sk);
510 	return 0;
511 }
512 
513 static void ccid3_hc_tx_exit(struct sock *sk)
514 {
515 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
516 
517 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
518 	tfrc_tx_hist_purge(&hc->tx_hist);
519 }
520 
521 static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
522 {
523 	info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
524 	info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
525 }
526 
527 static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
528 				  u32 __user *optval, int __user *optlen)
529 {
530 	const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
531 	struct tfrc_tx_info tfrc;
532 	const void *val;
533 
534 	switch (optname) {
535 	case DCCP_SOCKOPT_CCID_TX_INFO:
536 		if (len < sizeof(tfrc))
537 			return -EINVAL;
538 		memset(&tfrc, 0, sizeof(tfrc));
539 		tfrc.tfrctx_x	   = hc->tx_x;
540 		tfrc.tfrctx_x_recv = hc->tx_x_recv;
541 		tfrc.tfrctx_x_calc = hc->tx_x_calc;
542 		tfrc.tfrctx_rtt	   = hc->tx_rtt;
543 		tfrc.tfrctx_p	   = hc->tx_p;
544 		tfrc.tfrctx_rto	   = hc->tx_t_rto;
545 		tfrc.tfrctx_ipi	   = hc->tx_t_ipi;
546 		len = sizeof(tfrc);
547 		val = &tfrc;
548 		break;
549 	default:
550 		return -ENOPROTOOPT;
551 	}
552 
553 	if (put_user(len, optlen) || copy_to_user(optval, val, len))
554 		return -EFAULT;
555 
556 	return 0;
557 }
558 
559 /*
560  *	Receiver Half-Connection Routines
561  */
562 
563 /* CCID3 feedback types */
564 enum ccid3_fback_type {
565 	CCID3_FBACK_NONE = 0,
566 	CCID3_FBACK_INITIAL,
567 	CCID3_FBACK_PERIODIC,
568 	CCID3_FBACK_PARAM_CHANGE
569 };
570 
571 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
572 static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
573 {
574 	static const char *const ccid3_rx_state_names[] = {
575 	[TFRC_RSTATE_NO_DATA] = "NO_DATA",
576 	[TFRC_RSTATE_DATA]    = "DATA",
577 	};
578 
579 	return ccid3_rx_state_names[state];
580 }
581 #endif
582 
583 static void ccid3_hc_rx_set_state(struct sock *sk,
584 				  enum ccid3_hc_rx_states state)
585 {
586 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
587 	enum ccid3_hc_rx_states oldstate = hc->rx_state;
588 
589 	ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
590 		       dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
591 		       ccid3_rx_state_name(state));
592 	WARN_ON(state == oldstate);
593 	hc->rx_state = state;
594 }
595 
596 static void ccid3_hc_rx_send_feedback(struct sock *sk,
597 				      const struct sk_buff *skb,
598 				      enum ccid3_fback_type fbtype)
599 {
600 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
601 	struct dccp_sock *dp = dccp_sk(sk);
602 	ktime_t now = ktime_get_real();
603 	s64 delta = 0;
604 
605 	switch (fbtype) {
606 	case CCID3_FBACK_INITIAL:
607 		hc->rx_x_recv = 0;
608 		hc->rx_pinv   = ~0U;   /* see RFC 4342, 8.5 */
609 		break;
610 	case CCID3_FBACK_PARAM_CHANGE:
611 		/*
612 		 * When parameters change (new loss or p > p_prev), we do not
613 		 * have a reliable estimate for R_m of [RFC 3448, 6.2] and so
614 		 * need to  reuse the previous value of X_recv. However, when
615 		 * X_recv was 0 (due to early loss), this would kill X down to
616 		 * s/t_mbi (i.e. one packet in 64 seconds).
617 		 * To avoid such drastic reduction, we approximate X_recv as
618 		 * the number of bytes since last feedback.
619 		 * This is a safe fallback, since X is bounded above by X_calc.
620 		 */
621 		if (hc->rx_x_recv > 0)
622 			break;
623 		/* fall through */
624 	case CCID3_FBACK_PERIODIC:
625 		delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
626 		if (delta <= 0)
627 			DCCP_BUG("delta (%ld) <= 0", (long)delta);
628 		else
629 			hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
630 		break;
631 	default:
632 		return;
633 	}
634 
635 	ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
636 		       hc->rx_x_recv, hc->rx_pinv);
637 
638 	hc->rx_tstamp_last_feedback = now;
639 	hc->rx_last_counter	    = dccp_hdr(skb)->dccph_ccval;
640 	hc->rx_bytes_recv	    = 0;
641 
642 	dp->dccps_hc_rx_insert_options = 1;
643 	dccp_send_ack(sk);
644 }
645 
646 static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
647 {
648 	const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
649 	__be32 x_recv, pinv;
650 
651 	if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
652 		return 0;
653 
654 	if (dccp_packet_without_ack(skb))
655 		return 0;
656 
657 	x_recv = htonl(hc->rx_x_recv);
658 	pinv   = htonl(hc->rx_pinv);
659 
660 	if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
661 			       &pinv, sizeof(pinv)) ||
662 	    dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
663 			       &x_recv, sizeof(x_recv)))
664 		return -1;
665 
666 	return 0;
667 }
668 
669 /**
670  * ccid3_first_li  -  Implements [RFC 5348, 6.3.1]
671  *
672  * Determine the length of the first loss interval via inverse lookup.
673  * Assume that X_recv can be computed by the throughput equation
674  *		    s
675  *	X_recv = --------
676  *		 R * fval
677  * Find some p such that f(p) = fval; return 1/p (scaled).
678  */
679 static u32 ccid3_first_li(struct sock *sk)
680 {
681 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
682 	u32 x_recv, p, delta;
683 	u64 fval;
684 
685 	if (hc->rx_rtt == 0) {
686 		DCCP_WARN("No RTT estimate available, using fallback RTT\n");
687 		hc->rx_rtt = DCCP_FALLBACK_RTT;
688 	}
689 
690 	delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
691 	x_recv = scaled_div32(hc->rx_bytes_recv, delta);
692 	if (x_recv == 0) {		/* would also trigger divide-by-zero */
693 		DCCP_WARN("X_recv==0\n");
694 		if (hc->rx_x_recv == 0) {
695 			DCCP_BUG("stored value of X_recv is zero");
696 			return ~0U;
697 		}
698 		x_recv = hc->rx_x_recv;
699 	}
700 
701 	fval = scaled_div(hc->rx_s, hc->rx_rtt);
702 	fval = scaled_div32(fval, x_recv);
703 	p = tfrc_calc_x_reverse_lookup(fval);
704 
705 	ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
706 		       "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
707 
708 	return p == 0 ? ~0U : scaled_div(1, p);
709 }
710 
711 static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
712 {
713 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
714 	enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE;
715 	const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
716 	const bool is_data_packet = dccp_data_packet(skb);
717 
718 	if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) {
719 		if (is_data_packet) {
720 			const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
721 			do_feedback = CCID3_FBACK_INITIAL;
722 			ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
723 			hc->rx_s = payload;
724 			/*
725 			 * Not necessary to update rx_bytes_recv here,
726 			 * since X_recv = 0 for the first feedback packet (cf.
727 			 * RFC 3448, 6.3) -- gerrit
728 			 */
729 		}
730 		goto update_records;
731 	}
732 
733 	if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
734 		return; /* done receiving */
735 
736 	if (is_data_packet) {
737 		const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
738 		/*
739 		 * Update moving-average of s and the sum of received payload bytes
740 		 */
741 		hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9);
742 		hc->rx_bytes_recv += payload;
743 	}
744 
745 	/*
746 	 * Perform loss detection and handle pending losses
747 	 */
748 	if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist,
749 				skb, ndp, ccid3_first_li, sk)) {
750 		do_feedback = CCID3_FBACK_PARAM_CHANGE;
751 		goto done_receiving;
752 	}
753 
754 	if (tfrc_rx_hist_loss_pending(&hc->rx_hist))
755 		return; /* done receiving */
756 
757 	/*
758 	 * Handle data packets: RTT sampling and monitoring p
759 	 */
760 	if (unlikely(!is_data_packet))
761 		goto update_records;
762 
763 	if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) {
764 		const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
765 		/*
766 		 * Empty loss history: no loss so far, hence p stays 0.
767 		 * Sample RTT values, since an RTT estimate is required for the
768 		 * computation of p when the first loss occurs; RFC 3448, 6.3.1.
769 		 */
770 		if (sample != 0)
771 			hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9);
772 
773 	} else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
774 		/*
775 		 * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean
776 		 * has decreased (resp. p has increased), send feedback now.
777 		 */
778 		do_feedback = CCID3_FBACK_PARAM_CHANGE;
779 	}
780 
781 	/*
782 	 * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
783 	 */
784 	if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
785 		do_feedback = CCID3_FBACK_PERIODIC;
786 
787 update_records:
788 	tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
789 
790 done_receiving:
791 	if (do_feedback)
792 		ccid3_hc_rx_send_feedback(sk, skb, do_feedback);
793 }
794 
795 static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
796 {
797 	struct ccid3_hc_rx_sock *hc = ccid_priv(ccid);
798 
799 	hc->rx_state = TFRC_RSTATE_NO_DATA;
800 	tfrc_lh_init(&hc->rx_li_hist);
801 	return tfrc_rx_hist_alloc(&hc->rx_hist);
802 }
803 
804 static void ccid3_hc_rx_exit(struct sock *sk)
805 {
806 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
807 
808 	tfrc_rx_hist_purge(&hc->rx_hist);
809 	tfrc_lh_cleanup(&hc->rx_li_hist);
810 }
811 
812 static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
813 {
814 	info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
815 	info->tcpi_options  |= TCPI_OPT_TIMESTAMPS;
816 	info->tcpi_rcv_rtt  = ccid3_hc_rx_sk(sk)->rx_rtt;
817 }
818 
819 static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
820 				  u32 __user *optval, int __user *optlen)
821 {
822 	const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
823 	struct tfrc_rx_info rx_info;
824 	const void *val;
825 
826 	switch (optname) {
827 	case DCCP_SOCKOPT_CCID_RX_INFO:
828 		if (len < sizeof(rx_info))
829 			return -EINVAL;
830 		rx_info.tfrcrx_x_recv = hc->rx_x_recv;
831 		rx_info.tfrcrx_rtt    = hc->rx_rtt;
832 		rx_info.tfrcrx_p      = tfrc_invert_loss_event_rate(hc->rx_pinv);
833 		len = sizeof(rx_info);
834 		val = &rx_info;
835 		break;
836 	default:
837 		return -ENOPROTOOPT;
838 	}
839 
840 	if (put_user(len, optlen) || copy_to_user(optval, val, len))
841 		return -EFAULT;
842 
843 	return 0;
844 }
845 
846 struct ccid_operations ccid3_ops = {
847 	.ccid_id		   = DCCPC_CCID3,
848 	.ccid_name		   = "TCP-Friendly Rate Control",
849 	.ccid_hc_tx_obj_size	   = sizeof(struct ccid3_hc_tx_sock),
850 	.ccid_hc_tx_init	   = ccid3_hc_tx_init,
851 	.ccid_hc_tx_exit	   = ccid3_hc_tx_exit,
852 	.ccid_hc_tx_send_packet	   = ccid3_hc_tx_send_packet,
853 	.ccid_hc_tx_packet_sent	   = ccid3_hc_tx_packet_sent,
854 	.ccid_hc_tx_packet_recv	   = ccid3_hc_tx_packet_recv,
855 	.ccid_hc_tx_parse_options  = ccid3_hc_tx_parse_options,
856 	.ccid_hc_rx_obj_size	   = sizeof(struct ccid3_hc_rx_sock),
857 	.ccid_hc_rx_init	   = ccid3_hc_rx_init,
858 	.ccid_hc_rx_exit	   = ccid3_hc_rx_exit,
859 	.ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
860 	.ccid_hc_rx_packet_recv	   = ccid3_hc_rx_packet_recv,
861 	.ccid_hc_rx_get_info	   = ccid3_hc_rx_get_info,
862 	.ccid_hc_tx_get_info	   = ccid3_hc_tx_get_info,
863 	.ccid_hc_rx_getsockopt	   = ccid3_hc_rx_getsockopt,
864 	.ccid_hc_tx_getsockopt	   = ccid3_hc_tx_getsockopt,
865 };
866 
867 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
868 module_param(ccid3_debug, bool, 0644);
869 MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages");
870 #endif
871