xref: /linux/net/ipv4/tcp_input.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:	$Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Florian La Roche, <flla@stud.uni-sb.de>
15  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *		Matthew Dillon, <dillon@apollo.west.oic.com>
19  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *		Jorge Cwik, <jorge@laser.satlink.net>
21  */
22 
23 /*
24  * Changes:
25  *		Pedro Roque	:	Fast Retransmit/Recovery.
26  *					Two receive queues.
27  *					Retransmit queue handled by TCP.
28  *					Better retransmit timer handling.
29  *					New congestion avoidance.
30  *					Header prediction.
31  *					Variable renaming.
32  *
33  *		Eric		:	Fast Retransmit.
34  *		Randy Scott	:	MSS option defines.
35  *		Eric Schenk	:	Fixes to slow start algorithm.
36  *		Eric Schenk	:	Yet another double ACK bug.
37  *		Eric Schenk	:	Delayed ACK bug fixes.
38  *		Eric Schenk	:	Floyd style fast retrans war avoidance.
39  *		David S. Miller	:	Don't allow zero congestion window.
40  *		Eric Schenk	:	Fix retransmitter so that it sends
41  *					next packet on ack of previous packet.
42  *		Andi Kleen	:	Moved open_request checking here
43  *					and process RSTs for open_requests.
44  *		Andi Kleen	:	Better prune_queue, and other fixes.
45  *		Andrey Savochkin:	Fix RTT measurements in the presence of
46  *					timestamps.
47  *		Andrey Savochkin:	Check sequence numbers correctly when
48  *					removing SACKs due to in sequence incoming
49  *					data segments.
50  *		Andi Kleen:		Make sure we never ack data there is not
51  *					enough room for. Also make this condition
52  *					a fatal error if it might still happen.
53  *		Andi Kleen:		Add tcp_measure_rcv_mss to make
54  *					connections with MSS<min(MTU,ann. MSS)
55  *					work without delayed acks.
56  *		Andi Kleen:		Process packets with PSH set in the
57  *					fast path.
58  *		J Hadi Salim:		ECN support
59  *	 	Andrei Gurtov,
60  *		Pasi Sarolahti,
61  *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
62  *					engine. Lots of bugs are found.
63  *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
64  */
65 
66 #include <linux/config.h>
67 #include <linux/mm.h>
68 #include <linux/module.h>
69 #include <linux/sysctl.h>
70 #include <net/tcp.h>
71 #include <net/inet_common.h>
72 #include <linux/ipsec.h>
73 #include <asm/unaligned.h>
74 #include <net/netdma.h>
75 
76 int sysctl_tcp_timestamps = 1;
77 int sysctl_tcp_window_scaling = 1;
78 int sysctl_tcp_sack = 1;
79 int sysctl_tcp_fack = 1;
80 int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
81 int sysctl_tcp_ecn;
82 int sysctl_tcp_dsack = 1;
83 int sysctl_tcp_app_win = 31;
84 int sysctl_tcp_adv_win_scale = 2;
85 
86 int sysctl_tcp_stdurg;
87 int sysctl_tcp_rfc1337;
88 int sysctl_tcp_max_orphans = NR_FILE;
89 int sysctl_tcp_frto;
90 int sysctl_tcp_nometrics_save;
91 
92 int sysctl_tcp_moderate_rcvbuf = 1;
93 int sysctl_tcp_abc = 1;
94 
95 #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
96 #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
97 #define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
98 #define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
99 #define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
100 #define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
101 #define FLAG_ECE		0x40 /* ECE in this ACK				*/
102 #define FLAG_DATA_LOST		0x80 /* SACK detected data lossage.		*/
103 #define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
104 
105 #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
106 #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
107 #define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
108 #define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
109 
110 #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
111 #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
112 #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
113 
114 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
115 
116 /* Adapt the MSS value used to make delayed ack decision to the
117  * real world.
118  */
119 static void tcp_measure_rcv_mss(struct sock *sk,
120 				const struct sk_buff *skb)
121 {
122 	struct inet_connection_sock *icsk = inet_csk(sk);
123 	const unsigned int lss = icsk->icsk_ack.last_seg_size;
124 	unsigned int len;
125 
126 	icsk->icsk_ack.last_seg_size = 0;
127 
128 	/* skb->len may jitter because of SACKs, even if peer
129 	 * sends good full-sized frames.
130 	 */
131 	len = skb->len;
132 	if (len >= icsk->icsk_ack.rcv_mss) {
133 		icsk->icsk_ack.rcv_mss = len;
134 	} else {
135 		/* Otherwise, we make more careful check taking into account,
136 		 * that SACKs block is variable.
137 		 *
138 		 * "len" is invariant segment length, including TCP header.
139 		 */
140 		len += skb->data - skb->h.raw;
141 		if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
142 		    /* If PSH is not set, packet should be
143 		     * full sized, provided peer TCP is not badly broken.
144 		     * This observation (if it is correct 8)) allows
145 		     * to handle super-low mtu links fairly.
146 		     */
147 		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
148 		     !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
149 			/* Subtract also invariant (if peer is RFC compliant),
150 			 * tcp header plus fixed timestamp option length.
151 			 * Resulting "len" is MSS free of SACK jitter.
152 			 */
153 			len -= tcp_sk(sk)->tcp_header_len;
154 			icsk->icsk_ack.last_seg_size = len;
155 			if (len == lss) {
156 				icsk->icsk_ack.rcv_mss = len;
157 				return;
158 			}
159 		}
160 		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
161 	}
162 }
163 
164 static void tcp_incr_quickack(struct sock *sk)
165 {
166 	struct inet_connection_sock *icsk = inet_csk(sk);
167 	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
168 
169 	if (quickacks==0)
170 		quickacks=2;
171 	if (quickacks > icsk->icsk_ack.quick)
172 		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
173 }
174 
175 void tcp_enter_quickack_mode(struct sock *sk)
176 {
177 	struct inet_connection_sock *icsk = inet_csk(sk);
178 	tcp_incr_quickack(sk);
179 	icsk->icsk_ack.pingpong = 0;
180 	icsk->icsk_ack.ato = TCP_ATO_MIN;
181 }
182 
183 /* Send ACKs quickly, if "quick" count is not exhausted
184  * and the session is not interactive.
185  */
186 
187 static inline int tcp_in_quickack_mode(const struct sock *sk)
188 {
189 	const struct inet_connection_sock *icsk = inet_csk(sk);
190 	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
191 }
192 
193 /* Buffer size and advertised window tuning.
194  *
195  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
196  */
197 
198 static void tcp_fixup_sndbuf(struct sock *sk)
199 {
200 	int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
201 		     sizeof(struct sk_buff);
202 
203 	if (sk->sk_sndbuf < 3 * sndmem)
204 		sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
205 }
206 
207 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
208  *
209  * All tcp_full_space() is split to two parts: "network" buffer, allocated
210  * forward and advertised in receiver window (tp->rcv_wnd) and
211  * "application buffer", required to isolate scheduling/application
212  * latencies from network.
213  * window_clamp is maximal advertised window. It can be less than
214  * tcp_full_space(), in this case tcp_full_space() - window_clamp
215  * is reserved for "application" buffer. The less window_clamp is
216  * the smoother our behaviour from viewpoint of network, but the lower
217  * throughput and the higher sensitivity of the connection to losses. 8)
218  *
219  * rcv_ssthresh is more strict window_clamp used at "slow start"
220  * phase to predict further behaviour of this connection.
221  * It is used for two goals:
222  * - to enforce header prediction at sender, even when application
223  *   requires some significant "application buffer". It is check #1.
224  * - to prevent pruning of receive queue because of misprediction
225  *   of receiver window. Check #2.
226  *
227  * The scheme does not work when sender sends good segments opening
228  * window and then starts to feed us spaghetti. But it should work
229  * in common situations. Otherwise, we have to rely on queue collapsing.
230  */
231 
232 /* Slow part of check#2. */
233 static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
234 			     const struct sk_buff *skb)
235 {
236 	/* Optimize this! */
237 	int truesize = tcp_win_from_space(skb->truesize)/2;
238 	int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
239 
240 	while (tp->rcv_ssthresh <= window) {
241 		if (truesize <= skb->len)
242 			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
243 
244 		truesize >>= 1;
245 		window >>= 1;
246 	}
247 	return 0;
248 }
249 
250 static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
251 			    struct sk_buff *skb)
252 {
253 	/* Check #1 */
254 	if (tp->rcv_ssthresh < tp->window_clamp &&
255 	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
256 	    !tcp_memory_pressure) {
257 		int incr;
258 
259 		/* Check #2. Increase window, if skb with such overhead
260 		 * will fit to rcvbuf in future.
261 		 */
262 		if (tcp_win_from_space(skb->truesize) <= skb->len)
263 			incr = 2*tp->advmss;
264 		else
265 			incr = __tcp_grow_window(sk, tp, skb);
266 
267 		if (incr) {
268 			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
269 			inet_csk(sk)->icsk_ack.quick |= 1;
270 		}
271 	}
272 }
273 
274 /* 3. Tuning rcvbuf, when connection enters established state. */
275 
276 static void tcp_fixup_rcvbuf(struct sock *sk)
277 {
278 	struct tcp_sock *tp = tcp_sk(sk);
279 	int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
280 
281 	/* Try to select rcvbuf so that 4 mss-sized segments
282 	 * will fit to window and corresponding skbs will fit to our rcvbuf.
283 	 * (was 3; 4 is minimum to allow fast retransmit to work.)
284 	 */
285 	while (tcp_win_from_space(rcvmem) < tp->advmss)
286 		rcvmem += 128;
287 	if (sk->sk_rcvbuf < 4 * rcvmem)
288 		sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
289 }
290 
291 /* 4. Try to fixup all. It is made immediately after connection enters
292  *    established state.
293  */
294 static void tcp_init_buffer_space(struct sock *sk)
295 {
296 	struct tcp_sock *tp = tcp_sk(sk);
297 	int maxwin;
298 
299 	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
300 		tcp_fixup_rcvbuf(sk);
301 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
302 		tcp_fixup_sndbuf(sk);
303 
304 	tp->rcvq_space.space = tp->rcv_wnd;
305 
306 	maxwin = tcp_full_space(sk);
307 
308 	if (tp->window_clamp >= maxwin) {
309 		tp->window_clamp = maxwin;
310 
311 		if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
312 			tp->window_clamp = max(maxwin -
313 					       (maxwin >> sysctl_tcp_app_win),
314 					       4 * tp->advmss);
315 	}
316 
317 	/* Force reservation of one segment. */
318 	if (sysctl_tcp_app_win &&
319 	    tp->window_clamp > 2 * tp->advmss &&
320 	    tp->window_clamp + tp->advmss > maxwin)
321 		tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
322 
323 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
324 	tp->snd_cwnd_stamp = tcp_time_stamp;
325 }
326 
327 /* 5. Recalculate window clamp after socket hit its memory bounds. */
328 static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
329 {
330 	struct inet_connection_sock *icsk = inet_csk(sk);
331 
332 	icsk->icsk_ack.quick = 0;
333 
334 	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
335 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
336 	    !tcp_memory_pressure &&
337 	    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
338 		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
339 				    sysctl_tcp_rmem[2]);
340 	}
341 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
342 		tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
343 }
344 
345 
346 /* Initialize RCV_MSS value.
347  * RCV_MSS is an our guess about MSS used by the peer.
348  * We haven't any direct information about the MSS.
349  * It's better to underestimate the RCV_MSS rather than overestimate.
350  * Overestimations make us ACKing less frequently than needed.
351  * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
352  */
353 void tcp_initialize_rcv_mss(struct sock *sk)
354 {
355 	struct tcp_sock *tp = tcp_sk(sk);
356 	unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
357 
358 	hint = min(hint, tp->rcv_wnd/2);
359 	hint = min(hint, TCP_MIN_RCVMSS);
360 	hint = max(hint, TCP_MIN_MSS);
361 
362 	inet_csk(sk)->icsk_ack.rcv_mss = hint;
363 }
364 
365 /* Receiver "autotuning" code.
366  *
367  * The algorithm for RTT estimation w/o timestamps is based on
368  * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
369  * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps>
370  *
371  * More detail on this code can be found at
372  * <http://www.psc.edu/~jheffner/senior_thesis.ps>,
373  * though this reference is out of date.  A new paper
374  * is pending.
375  */
376 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
377 {
378 	u32 new_sample = tp->rcv_rtt_est.rtt;
379 	long m = sample;
380 
381 	if (m == 0)
382 		m = 1;
383 
384 	if (new_sample != 0) {
385 		/* If we sample in larger samples in the non-timestamp
386 		 * case, we could grossly overestimate the RTT especially
387 		 * with chatty applications or bulk transfer apps which
388 		 * are stalled on filesystem I/O.
389 		 *
390 		 * Also, since we are only going for a minimum in the
391 		 * non-timestamp case, we do not smooth things out
392 		 * else with timestamps disabled convergence takes too
393 		 * long.
394 		 */
395 		if (!win_dep) {
396 			m -= (new_sample >> 3);
397 			new_sample += m;
398 		} else if (m < new_sample)
399 			new_sample = m << 3;
400 	} else {
401 		/* No previous measure. */
402 		new_sample = m << 3;
403 	}
404 
405 	if (tp->rcv_rtt_est.rtt != new_sample)
406 		tp->rcv_rtt_est.rtt = new_sample;
407 }
408 
409 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
410 {
411 	if (tp->rcv_rtt_est.time == 0)
412 		goto new_measure;
413 	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
414 		return;
415 	tcp_rcv_rtt_update(tp,
416 			   jiffies - tp->rcv_rtt_est.time,
417 			   1);
418 
419 new_measure:
420 	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
421 	tp->rcv_rtt_est.time = tcp_time_stamp;
422 }
423 
424 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
425 {
426 	struct tcp_sock *tp = tcp_sk(sk);
427 	if (tp->rx_opt.rcv_tsecr &&
428 	    (TCP_SKB_CB(skb)->end_seq -
429 	     TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
430 		tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
431 }
432 
433 /*
434  * This function should be called every time data is copied to user space.
435  * It calculates the appropriate TCP receive buffer space.
436  */
437 void tcp_rcv_space_adjust(struct sock *sk)
438 {
439 	struct tcp_sock *tp = tcp_sk(sk);
440 	int time;
441 	int space;
442 
443 	if (tp->rcvq_space.time == 0)
444 		goto new_measure;
445 
446 	time = tcp_time_stamp - tp->rcvq_space.time;
447 	if (time < (tp->rcv_rtt_est.rtt >> 3) ||
448 	    tp->rcv_rtt_est.rtt == 0)
449 		return;
450 
451 	space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
452 
453 	space = max(tp->rcvq_space.space, space);
454 
455 	if (tp->rcvq_space.space != space) {
456 		int rcvmem;
457 
458 		tp->rcvq_space.space = space;
459 
460 		if (sysctl_tcp_moderate_rcvbuf &&
461 		    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
462 			int new_clamp = space;
463 
464 			/* Receive space grows, normalize in order to
465 			 * take into account packet headers and sk_buff
466 			 * structure overhead.
467 			 */
468 			space /= tp->advmss;
469 			if (!space)
470 				space = 1;
471 			rcvmem = (tp->advmss + MAX_TCP_HEADER +
472 				  16 + sizeof(struct sk_buff));
473 			while (tcp_win_from_space(rcvmem) < tp->advmss)
474 				rcvmem += 128;
475 			space *= rcvmem;
476 			space = min(space, sysctl_tcp_rmem[2]);
477 			if (space > sk->sk_rcvbuf) {
478 				sk->sk_rcvbuf = space;
479 
480 				/* Make the window clamp follow along.  */
481 				tp->window_clamp = new_clamp;
482 			}
483 		}
484 	}
485 
486 new_measure:
487 	tp->rcvq_space.seq = tp->copied_seq;
488 	tp->rcvq_space.time = tcp_time_stamp;
489 }
490 
491 /* There is something which you must keep in mind when you analyze the
492  * behavior of the tp->ato delayed ack timeout interval.  When a
493  * connection starts up, we want to ack as quickly as possible.  The
494  * problem is that "good" TCP's do slow start at the beginning of data
495  * transmission.  The means that until we send the first few ACK's the
496  * sender will sit on his end and only queue most of his data, because
497  * he can only send snd_cwnd unacked packets at any given time.  For
498  * each ACK we send, he increments snd_cwnd and transmits more of his
499  * queue.  -DaveM
500  */
501 static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
502 {
503 	struct inet_connection_sock *icsk = inet_csk(sk);
504 	u32 now;
505 
506 	inet_csk_schedule_ack(sk);
507 
508 	tcp_measure_rcv_mss(sk, skb);
509 
510 	tcp_rcv_rtt_measure(tp);
511 
512 	now = tcp_time_stamp;
513 
514 	if (!icsk->icsk_ack.ato) {
515 		/* The _first_ data packet received, initialize
516 		 * delayed ACK engine.
517 		 */
518 		tcp_incr_quickack(sk);
519 		icsk->icsk_ack.ato = TCP_ATO_MIN;
520 	} else {
521 		int m = now - icsk->icsk_ack.lrcvtime;
522 
523 		if (m <= TCP_ATO_MIN/2) {
524 			/* The fastest case is the first. */
525 			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
526 		} else if (m < icsk->icsk_ack.ato) {
527 			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
528 			if (icsk->icsk_ack.ato > icsk->icsk_rto)
529 				icsk->icsk_ack.ato = icsk->icsk_rto;
530 		} else if (m > icsk->icsk_rto) {
531 			/* Too long gap. Apparently sender failed to
532 			 * restart window, so that we send ACKs quickly.
533 			 */
534 			tcp_incr_quickack(sk);
535 			sk_stream_mem_reclaim(sk);
536 		}
537 	}
538 	icsk->icsk_ack.lrcvtime = now;
539 
540 	TCP_ECN_check_ce(tp, skb);
541 
542 	if (skb->len >= 128)
543 		tcp_grow_window(sk, tp, skb);
544 }
545 
546 /* Called to compute a smoothed rtt estimate. The data fed to this
547  * routine either comes from timestamps, or from segments that were
548  * known _not_ to have been retransmitted [see Karn/Partridge
549  * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
550  * piece by Van Jacobson.
551  * NOTE: the next three routines used to be one big routine.
552  * To save cycles in the RFC 1323 implementation it was better to break
553  * it up into three procedures. -- erics
554  */
555 static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
556 {
557 	struct tcp_sock *tp = tcp_sk(sk);
558 	long m = mrtt; /* RTT */
559 
560 	/*	The following amusing code comes from Jacobson's
561 	 *	article in SIGCOMM '88.  Note that rtt and mdev
562 	 *	are scaled versions of rtt and mean deviation.
563 	 *	This is designed to be as fast as possible
564 	 *	m stands for "measurement".
565 	 *
566 	 *	On a 1990 paper the rto value is changed to:
567 	 *	RTO = rtt + 4 * mdev
568 	 *
569 	 * Funny. This algorithm seems to be very broken.
570 	 * These formulae increase RTO, when it should be decreased, increase
571 	 * too slowly, when it should be increased quickly, decrease too quickly
572 	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
573 	 * does not matter how to _calculate_ it. Seems, it was trap
574 	 * that VJ failed to avoid. 8)
575 	 */
576 	if(m == 0)
577 		m = 1;
578 	if (tp->srtt != 0) {
579 		m -= (tp->srtt >> 3);	/* m is now error in rtt est */
580 		tp->srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
581 		if (m < 0) {
582 			m = -m;		/* m is now abs(error) */
583 			m -= (tp->mdev >> 2);   /* similar update on mdev */
584 			/* This is similar to one of Eifel findings.
585 			 * Eifel blocks mdev updates when rtt decreases.
586 			 * This solution is a bit different: we use finer gain
587 			 * for mdev in this case (alpha*beta).
588 			 * Like Eifel it also prevents growth of rto,
589 			 * but also it limits too fast rto decreases,
590 			 * happening in pure Eifel.
591 			 */
592 			if (m > 0)
593 				m >>= 3;
594 		} else {
595 			m -= (tp->mdev >> 2);   /* similar update on mdev */
596 		}
597 		tp->mdev += m;	    	/* mdev = 3/4 mdev + 1/4 new */
598 		if (tp->mdev > tp->mdev_max) {
599 			tp->mdev_max = tp->mdev;
600 			if (tp->mdev_max > tp->rttvar)
601 				tp->rttvar = tp->mdev_max;
602 		}
603 		if (after(tp->snd_una, tp->rtt_seq)) {
604 			if (tp->mdev_max < tp->rttvar)
605 				tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
606 			tp->rtt_seq = tp->snd_nxt;
607 			tp->mdev_max = TCP_RTO_MIN;
608 		}
609 	} else {
610 		/* no previous measure. */
611 		tp->srtt = m<<3;	/* take the measured time to be rtt */
612 		tp->mdev = m<<1;	/* make sure rto = 3*rtt */
613 		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
614 		tp->rtt_seq = tp->snd_nxt;
615 	}
616 }
617 
618 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
619  * routine referred to above.
620  */
621 static inline void tcp_set_rto(struct sock *sk)
622 {
623 	const struct tcp_sock *tp = tcp_sk(sk);
624 	/* Old crap is replaced with new one. 8)
625 	 *
626 	 * More seriously:
627 	 * 1. If rtt variance happened to be less 50msec, it is hallucination.
628 	 *    It cannot be less due to utterly erratic ACK generation made
629 	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
630 	 *    to do with delayed acks, because at cwnd>2 true delack timeout
631 	 *    is invisible. Actually, Linux-2.4 also generates erratic
632 	 *    ACKs in some circumstances.
633 	 */
634 	inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
635 
636 	/* 2. Fixups made earlier cannot be right.
637 	 *    If we do not estimate RTO correctly without them,
638 	 *    all the algo is pure shit and should be replaced
639 	 *    with correct one. It is exactly, which we pretend to do.
640 	 */
641 }
642 
643 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
644  * guarantees that rto is higher.
645  */
646 static inline void tcp_bound_rto(struct sock *sk)
647 {
648 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
649 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
650 }
651 
652 /* Save metrics learned by this TCP session.
653    This function is called only, when TCP finishes successfully
654    i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
655  */
656 void tcp_update_metrics(struct sock *sk)
657 {
658 	struct tcp_sock *tp = tcp_sk(sk);
659 	struct dst_entry *dst = __sk_dst_get(sk);
660 
661 	if (sysctl_tcp_nometrics_save)
662 		return;
663 
664 	dst_confirm(dst);
665 
666 	if (dst && (dst->flags&DST_HOST)) {
667 		const struct inet_connection_sock *icsk = inet_csk(sk);
668 		int m;
669 
670 		if (icsk->icsk_backoff || !tp->srtt) {
671 			/* This session failed to estimate rtt. Why?
672 			 * Probably, no packets returned in time.
673 			 * Reset our results.
674 			 */
675 			if (!(dst_metric_locked(dst, RTAX_RTT)))
676 				dst->metrics[RTAX_RTT-1] = 0;
677 			return;
678 		}
679 
680 		m = dst_metric(dst, RTAX_RTT) - tp->srtt;
681 
682 		/* If newly calculated rtt larger than stored one,
683 		 * store new one. Otherwise, use EWMA. Remember,
684 		 * rtt overestimation is always better than underestimation.
685 		 */
686 		if (!(dst_metric_locked(dst, RTAX_RTT))) {
687 			if (m <= 0)
688 				dst->metrics[RTAX_RTT-1] = tp->srtt;
689 			else
690 				dst->metrics[RTAX_RTT-1] -= (m>>3);
691 		}
692 
693 		if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
694 			if (m < 0)
695 				m = -m;
696 
697 			/* Scale deviation to rttvar fixed point */
698 			m >>= 1;
699 			if (m < tp->mdev)
700 				m = tp->mdev;
701 
702 			if (m >= dst_metric(dst, RTAX_RTTVAR))
703 				dst->metrics[RTAX_RTTVAR-1] = m;
704 			else
705 				dst->metrics[RTAX_RTTVAR-1] -=
706 					(dst->metrics[RTAX_RTTVAR-1] - m)>>2;
707 		}
708 
709 		if (tp->snd_ssthresh >= 0xFFFF) {
710 			/* Slow start still did not finish. */
711 			if (dst_metric(dst, RTAX_SSTHRESH) &&
712 			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
713 			    (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
714 				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
715 			if (!dst_metric_locked(dst, RTAX_CWND) &&
716 			    tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
717 				dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
718 		} else if (tp->snd_cwnd > tp->snd_ssthresh &&
719 			   icsk->icsk_ca_state == TCP_CA_Open) {
720 			/* Cong. avoidance phase, cwnd is reliable. */
721 			if (!dst_metric_locked(dst, RTAX_SSTHRESH))
722 				dst->metrics[RTAX_SSTHRESH-1] =
723 					max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
724 			if (!dst_metric_locked(dst, RTAX_CWND))
725 				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1;
726 		} else {
727 			/* Else slow start did not finish, cwnd is non-sense,
728 			   ssthresh may be also invalid.
729 			 */
730 			if (!dst_metric_locked(dst, RTAX_CWND))
731 				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1;
732 			if (dst->metrics[RTAX_SSTHRESH-1] &&
733 			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
734 			    tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1])
735 				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
736 		}
737 
738 		if (!dst_metric_locked(dst, RTAX_REORDERING)) {
739 			if (dst->metrics[RTAX_REORDERING-1] < tp->reordering &&
740 			    tp->reordering != sysctl_tcp_reordering)
741 				dst->metrics[RTAX_REORDERING-1] = tp->reordering;
742 		}
743 	}
744 }
745 
746 /* Numbers are taken from RFC2414.  */
747 __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
748 {
749 	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
750 
751 	if (!cwnd) {
752 		if (tp->mss_cache > 1460)
753 			cwnd = 2;
754 		else
755 			cwnd = (tp->mss_cache > 1095) ? 3 : 4;
756 	}
757 	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
758 }
759 
760 /* Set slow start threshold and cwnd not falling to slow start */
761 void tcp_enter_cwr(struct sock *sk)
762 {
763 	struct tcp_sock *tp = tcp_sk(sk);
764 
765 	tp->prior_ssthresh = 0;
766 	tp->bytes_acked = 0;
767 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
768 		tp->undo_marker = 0;
769 		tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
770 		tp->snd_cwnd = min(tp->snd_cwnd,
771 				   tcp_packets_in_flight(tp) + 1U);
772 		tp->snd_cwnd_cnt = 0;
773 		tp->high_seq = tp->snd_nxt;
774 		tp->snd_cwnd_stamp = tcp_time_stamp;
775 		TCP_ECN_queue_cwr(tp);
776 
777 		tcp_set_ca_state(sk, TCP_CA_CWR);
778 	}
779 }
780 
781 /* Initialize metrics on socket. */
782 
783 static void tcp_init_metrics(struct sock *sk)
784 {
785 	struct tcp_sock *tp = tcp_sk(sk);
786 	struct dst_entry *dst = __sk_dst_get(sk);
787 
788 	if (dst == NULL)
789 		goto reset;
790 
791 	dst_confirm(dst);
792 
793 	if (dst_metric_locked(dst, RTAX_CWND))
794 		tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
795 	if (dst_metric(dst, RTAX_SSTHRESH)) {
796 		tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
797 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
798 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
799 	}
800 	if (dst_metric(dst, RTAX_REORDERING) &&
801 	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
802 		tp->rx_opt.sack_ok &= ~2;
803 		tp->reordering = dst_metric(dst, RTAX_REORDERING);
804 	}
805 
806 	if (dst_metric(dst, RTAX_RTT) == 0)
807 		goto reset;
808 
809 	if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
810 		goto reset;
811 
812 	/* Initial rtt is determined from SYN,SYN-ACK.
813 	 * The segment is small and rtt may appear much
814 	 * less than real one. Use per-dst memory
815 	 * to make it more realistic.
816 	 *
817 	 * A bit of theory. RTT is time passed after "normal" sized packet
818 	 * is sent until it is ACKed. In normal circumstances sending small
819 	 * packets force peer to delay ACKs and calculation is correct too.
820 	 * The algorithm is adaptive and, provided we follow specs, it
821 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
822 	 * tricks sort of "quick acks" for time long enough to decrease RTT
823 	 * to low value, and then abruptly stops to do it and starts to delay
824 	 * ACKs, wait for troubles.
825 	 */
826 	if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
827 		tp->srtt = dst_metric(dst, RTAX_RTT);
828 		tp->rtt_seq = tp->snd_nxt;
829 	}
830 	if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
831 		tp->mdev = dst_metric(dst, RTAX_RTTVAR);
832 		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
833 	}
834 	tcp_set_rto(sk);
835 	tcp_bound_rto(sk);
836 	if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
837 		goto reset;
838 	tp->snd_cwnd = tcp_init_cwnd(tp, dst);
839 	tp->snd_cwnd_stamp = tcp_time_stamp;
840 	return;
841 
842 reset:
843 	/* Play conservative. If timestamps are not
844 	 * supported, TCP will fail to recalculate correct
845 	 * rtt, if initial rto is too small. FORGET ALL AND RESET!
846 	 */
847 	if (!tp->rx_opt.saw_tstamp && tp->srtt) {
848 		tp->srtt = 0;
849 		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
850 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
851 	}
852 }
853 
854 static void tcp_update_reordering(struct sock *sk, const int metric,
855 				  const int ts)
856 {
857 	struct tcp_sock *tp = tcp_sk(sk);
858 	if (metric > tp->reordering) {
859 		tp->reordering = min(TCP_MAX_REORDERING, metric);
860 
861 		/* This exciting event is worth to be remembered. 8) */
862 		if (ts)
863 			NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
864 		else if (IsReno(tp))
865 			NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
866 		else if (IsFack(tp))
867 			NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
868 		else
869 			NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
870 #if FASTRETRANS_DEBUG > 1
871 		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
872 		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
873 		       tp->reordering,
874 		       tp->fackets_out,
875 		       tp->sacked_out,
876 		       tp->undo_marker ? tp->undo_retrans : 0);
877 #endif
878 		/* Disable FACK yet. */
879 		tp->rx_opt.sack_ok &= ~2;
880 	}
881 }
882 
883 /* This procedure tags the retransmission queue when SACKs arrive.
884  *
885  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
886  * Packets in queue with these bits set are counted in variables
887  * sacked_out, retrans_out and lost_out, correspondingly.
888  *
889  * Valid combinations are:
890  * Tag  InFlight	Description
891  * 0	1		- orig segment is in flight.
892  * S	0		- nothing flies, orig reached receiver.
893  * L	0		- nothing flies, orig lost by net.
894  * R	2		- both orig and retransmit are in flight.
895  * L|R	1		- orig is lost, retransmit is in flight.
896  * S|R  1		- orig reached receiver, retrans is still in flight.
897  * (L|S|R is logically valid, it could occur when L|R is sacked,
898  *  but it is equivalent to plain S and code short-curcuits it to S.
899  *  L|S is logically invalid, it would mean -1 packet in flight 8))
900  *
901  * These 6 states form finite state machine, controlled by the following events:
902  * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
903  * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
904  * 3. Loss detection event of one of three flavors:
905  *	A. Scoreboard estimator decided the packet is lost.
906  *	   A'. Reno "three dupacks" marks head of queue lost.
907  *	   A''. Its FACK modfication, head until snd.fack is lost.
908  *	B. SACK arrives sacking data transmitted after never retransmitted
909  *	   hole was sent out.
910  *	C. SACK arrives sacking SND.NXT at the moment, when the
911  *	   segment was retransmitted.
912  * 4. D-SACK added new rule: D-SACK changes any tag to S.
913  *
914  * It is pleasant to note, that state diagram turns out to be commutative,
915  * so that we are allowed not to be bothered by order of our actions,
916  * when multiple events arrive simultaneously. (see the function below).
917  *
918  * Reordering detection.
919  * --------------------
920  * Reordering metric is maximal distance, which a packet can be displaced
921  * in packet stream. With SACKs we can estimate it:
922  *
923  * 1. SACK fills old hole and the corresponding segment was not
924  *    ever retransmitted -> reordering. Alas, we cannot use it
925  *    when segment was retransmitted.
926  * 2. The last flaw is solved with D-SACK. D-SACK arrives
927  *    for retransmitted and already SACKed segment -> reordering..
928  * Both of these heuristics are not used in Loss state, when we cannot
929  * account for retransmits accurately.
930  */
931 static int
932 tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
933 {
934 	const struct inet_connection_sock *icsk = inet_csk(sk);
935 	struct tcp_sock *tp = tcp_sk(sk);
936 	unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
937 	struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
938 	int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
939 	int reord = tp->packets_out;
940 	int prior_fackets;
941 	u32 lost_retrans = 0;
942 	int flag = 0;
943 	int dup_sack = 0;
944 	int i;
945 
946 	if (!tp->sacked_out)
947 		tp->fackets_out = 0;
948 	prior_fackets = tp->fackets_out;
949 
950 	/* SACK fastpath:
951 	 * if the only SACK change is the increase of the end_seq of
952 	 * the first block then only apply that SACK block
953 	 * and use retrans queue hinting otherwise slowpath */
954 	flag = 1;
955 	for (i = 0; i< num_sacks; i++) {
956 		__u32 start_seq = ntohl(sp[i].start_seq);
957 		__u32 end_seq =	 ntohl(sp[i].end_seq);
958 
959 		if (i == 0){
960 			if (tp->recv_sack_cache[i].start_seq != start_seq)
961 				flag = 0;
962 		} else {
963 			if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
964 			    (tp->recv_sack_cache[i].end_seq != end_seq))
965 				flag = 0;
966 		}
967 		tp->recv_sack_cache[i].start_seq = start_seq;
968 		tp->recv_sack_cache[i].end_seq = end_seq;
969 
970 		/* Check for D-SACK. */
971 		if (i == 0) {
972 			u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
973 
974 			if (before(start_seq, ack)) {
975 				dup_sack = 1;
976 				tp->rx_opt.sack_ok |= 4;
977 				NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
978 			} else if (num_sacks > 1 &&
979 				   !after(end_seq, ntohl(sp[1].end_seq)) &&
980 				   !before(start_seq, ntohl(sp[1].start_seq))) {
981 				dup_sack = 1;
982 				tp->rx_opt.sack_ok |= 4;
983 				NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
984 			}
985 
986 			/* D-SACK for already forgotten data...
987 			 * Do dumb counting. */
988 			if (dup_sack &&
989 			    !after(end_seq, prior_snd_una) &&
990 			    after(end_seq, tp->undo_marker))
991 				tp->undo_retrans--;
992 
993 			/* Eliminate too old ACKs, but take into
994 			 * account more or less fresh ones, they can
995 			 * contain valid SACK info.
996 			 */
997 			if (before(ack, prior_snd_una - tp->max_window))
998 				return 0;
999 		}
1000 	}
1001 
1002 	if (flag)
1003 		num_sacks = 1;
1004 	else {
1005 		int j;
1006 		tp->fastpath_skb_hint = NULL;
1007 
1008 		/* order SACK blocks to allow in order walk of the retrans queue */
1009 		for (i = num_sacks-1; i > 0; i--) {
1010 			for (j = 0; j < i; j++){
1011 				if (after(ntohl(sp[j].start_seq),
1012 					  ntohl(sp[j+1].start_seq))){
1013 					sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
1014 					sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
1015 					sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
1016 					sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
1017 				}
1018 
1019 			}
1020 		}
1021 	}
1022 
1023 	/* clear flag as used for different purpose in following code */
1024 	flag = 0;
1025 
1026 	for (i=0; i<num_sacks; i++, sp++) {
1027 		struct sk_buff *skb;
1028 		__u32 start_seq = ntohl(sp->start_seq);
1029 		__u32 end_seq = ntohl(sp->end_seq);
1030 		int fack_count;
1031 
1032 		/* Use SACK fastpath hint if valid */
1033 		if (tp->fastpath_skb_hint) {
1034 			skb = tp->fastpath_skb_hint;
1035 			fack_count = tp->fastpath_cnt_hint;
1036 		} else {
1037 			skb = sk->sk_write_queue.next;
1038 			fack_count = 0;
1039 		}
1040 
1041 		/* Event "B" in the comment above. */
1042 		if (after(end_seq, tp->high_seq))
1043 			flag |= FLAG_DATA_LOST;
1044 
1045 		sk_stream_for_retrans_queue_from(skb, sk) {
1046 			int in_sack, pcount;
1047 			u8 sacked;
1048 
1049 			tp->fastpath_skb_hint = skb;
1050 			tp->fastpath_cnt_hint = fack_count;
1051 
1052 			/* The retransmission queue is always in order, so
1053 			 * we can short-circuit the walk early.
1054 			 */
1055 			if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1056 				break;
1057 
1058 			in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1059 				!before(end_seq, TCP_SKB_CB(skb)->end_seq);
1060 
1061 			pcount = tcp_skb_pcount(skb);
1062 
1063 			if (pcount > 1 && !in_sack &&
1064 			    after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1065 				unsigned int pkt_len;
1066 
1067 				in_sack = !after(start_seq,
1068 						 TCP_SKB_CB(skb)->seq);
1069 
1070 				if (!in_sack)
1071 					pkt_len = (start_seq -
1072 						   TCP_SKB_CB(skb)->seq);
1073 				else
1074 					pkt_len = (end_seq -
1075 						   TCP_SKB_CB(skb)->seq);
1076 				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
1077 					break;
1078 				pcount = tcp_skb_pcount(skb);
1079 			}
1080 
1081 			fack_count += pcount;
1082 
1083 			sacked = TCP_SKB_CB(skb)->sacked;
1084 
1085 			/* Account D-SACK for retransmitted packet. */
1086 			if ((dup_sack && in_sack) &&
1087 			    (sacked & TCPCB_RETRANS) &&
1088 			    after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1089 				tp->undo_retrans--;
1090 
1091 			/* The frame is ACKed. */
1092 			if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) {
1093 				if (sacked&TCPCB_RETRANS) {
1094 					if ((dup_sack && in_sack) &&
1095 					    (sacked&TCPCB_SACKED_ACKED))
1096 						reord = min(fack_count, reord);
1097 				} else {
1098 					/* If it was in a hole, we detected reordering. */
1099 					if (fack_count < prior_fackets &&
1100 					    !(sacked&TCPCB_SACKED_ACKED))
1101 						reord = min(fack_count, reord);
1102 				}
1103 
1104 				/* Nothing to do; acked frame is about to be dropped. */
1105 				continue;
1106 			}
1107 
1108 			if ((sacked&TCPCB_SACKED_RETRANS) &&
1109 			    after(end_seq, TCP_SKB_CB(skb)->ack_seq) &&
1110 			    (!lost_retrans || after(end_seq, lost_retrans)))
1111 				lost_retrans = end_seq;
1112 
1113 			if (!in_sack)
1114 				continue;
1115 
1116 			if (!(sacked&TCPCB_SACKED_ACKED)) {
1117 				if (sacked & TCPCB_SACKED_RETRANS) {
1118 					/* If the segment is not tagged as lost,
1119 					 * we do not clear RETRANS, believing
1120 					 * that retransmission is still in flight.
1121 					 */
1122 					if (sacked & TCPCB_LOST) {
1123 						TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1124 						tp->lost_out -= tcp_skb_pcount(skb);
1125 						tp->retrans_out -= tcp_skb_pcount(skb);
1126 
1127 						/* clear lost hint */
1128 						tp->retransmit_skb_hint = NULL;
1129 					}
1130 				} else {
1131 					/* New sack for not retransmitted frame,
1132 					 * which was in hole. It is reordering.
1133 					 */
1134 					if (!(sacked & TCPCB_RETRANS) &&
1135 					    fack_count < prior_fackets)
1136 						reord = min(fack_count, reord);
1137 
1138 					if (sacked & TCPCB_LOST) {
1139 						TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1140 						tp->lost_out -= tcp_skb_pcount(skb);
1141 
1142 						/* clear lost hint */
1143 						tp->retransmit_skb_hint = NULL;
1144 					}
1145 				}
1146 
1147 				TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
1148 				flag |= FLAG_DATA_SACKED;
1149 				tp->sacked_out += tcp_skb_pcount(skb);
1150 
1151 				if (fack_count > tp->fackets_out)
1152 					tp->fackets_out = fack_count;
1153 			} else {
1154 				if (dup_sack && (sacked&TCPCB_RETRANS))
1155 					reord = min(fack_count, reord);
1156 			}
1157 
1158 			/* D-SACK. We can detect redundant retransmission
1159 			 * in S|R and plain R frames and clear it.
1160 			 * undo_retrans is decreased above, L|R frames
1161 			 * are accounted above as well.
1162 			 */
1163 			if (dup_sack &&
1164 			    (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
1165 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1166 				tp->retrans_out -= tcp_skb_pcount(skb);
1167 				tp->retransmit_skb_hint = NULL;
1168 			}
1169 		}
1170 	}
1171 
1172 	/* Check for lost retransmit. This superb idea is
1173 	 * borrowed from "ratehalving". Event "C".
1174 	 * Later note: FACK people cheated me again 8),
1175 	 * we have to account for reordering! Ugly,
1176 	 * but should help.
1177 	 */
1178 	if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1179 		struct sk_buff *skb;
1180 
1181 		sk_stream_for_retrans_queue(skb, sk) {
1182 			if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
1183 				break;
1184 			if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1185 				continue;
1186 			if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) &&
1187 			    after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) &&
1188 			    (IsFack(tp) ||
1189 			     !before(lost_retrans,
1190 				     TCP_SKB_CB(skb)->ack_seq + tp->reordering *
1191 				     tp->mss_cache))) {
1192 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1193 				tp->retrans_out -= tcp_skb_pcount(skb);
1194 
1195 				/* clear lost hint */
1196 				tp->retransmit_skb_hint = NULL;
1197 
1198 				if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1199 					tp->lost_out += tcp_skb_pcount(skb);
1200 					TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1201 					flag |= FLAG_DATA_SACKED;
1202 					NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
1203 				}
1204 			}
1205 		}
1206 	}
1207 
1208 	tp->left_out = tp->sacked_out + tp->lost_out;
1209 
1210 	if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
1211 		tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1212 
1213 #if FASTRETRANS_DEBUG > 0
1214 	BUG_TRAP((int)tp->sacked_out >= 0);
1215 	BUG_TRAP((int)tp->lost_out >= 0);
1216 	BUG_TRAP((int)tp->retrans_out >= 0);
1217 	BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);
1218 #endif
1219 	return flag;
1220 }
1221 
1222 /* RTO occurred, but do not yet enter loss state. Instead, transmit two new
1223  * segments to see from the next ACKs whether any data was really missing.
1224  * If the RTO was spurious, new ACKs should arrive.
1225  */
1226 void tcp_enter_frto(struct sock *sk)
1227 {
1228 	const struct inet_connection_sock *icsk = inet_csk(sk);
1229 	struct tcp_sock *tp = tcp_sk(sk);
1230 	struct sk_buff *skb;
1231 
1232 	tp->frto_counter = 1;
1233 
1234 	if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1235             tp->snd_una == tp->high_seq ||
1236             (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1237 		tp->prior_ssthresh = tcp_current_ssthresh(sk);
1238 		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1239 		tcp_ca_event(sk, CA_EVENT_FRTO);
1240 	}
1241 
1242 	/* Have to clear retransmission markers here to keep the bookkeeping
1243 	 * in shape, even though we are not yet in Loss state.
1244 	 * If something was really lost, it is eventually caught up
1245 	 * in tcp_enter_frto_loss.
1246 	 */
1247 	tp->retrans_out = 0;
1248 	tp->undo_marker = tp->snd_una;
1249 	tp->undo_retrans = 0;
1250 
1251 	sk_stream_for_retrans_queue(skb, sk) {
1252 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
1253 	}
1254 	tcp_sync_left_out(tp);
1255 
1256 	tcp_set_ca_state(sk, TCP_CA_Open);
1257 	tp->frto_highmark = tp->snd_nxt;
1258 }
1259 
1260 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
1261  * which indicates that we should follow the traditional RTO recovery,
1262  * i.e. mark everything lost and do go-back-N retransmission.
1263  */
1264 static void tcp_enter_frto_loss(struct sock *sk)
1265 {
1266 	struct tcp_sock *tp = tcp_sk(sk);
1267 	struct sk_buff *skb;
1268 	int cnt = 0;
1269 
1270 	tp->sacked_out = 0;
1271 	tp->lost_out = 0;
1272 	tp->fackets_out = 0;
1273 
1274 	sk_stream_for_retrans_queue(skb, sk) {
1275 		cnt += tcp_skb_pcount(skb);
1276 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1277 		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1278 
1279 			/* Do not mark those segments lost that were
1280 			 * forward transmitted after RTO
1281 			 */
1282 			if (!after(TCP_SKB_CB(skb)->end_seq,
1283 				   tp->frto_highmark)) {
1284 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1285 				tp->lost_out += tcp_skb_pcount(skb);
1286 			}
1287 		} else {
1288 			tp->sacked_out += tcp_skb_pcount(skb);
1289 			tp->fackets_out = cnt;
1290 		}
1291 	}
1292 	tcp_sync_left_out(tp);
1293 
1294 	tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
1295 	tp->snd_cwnd_cnt = 0;
1296 	tp->snd_cwnd_stamp = tcp_time_stamp;
1297 	tp->undo_marker = 0;
1298 	tp->frto_counter = 0;
1299 
1300 	tp->reordering = min_t(unsigned int, tp->reordering,
1301 					     sysctl_tcp_reordering);
1302 	tcp_set_ca_state(sk, TCP_CA_Loss);
1303 	tp->high_seq = tp->frto_highmark;
1304 	TCP_ECN_queue_cwr(tp);
1305 
1306 	clear_all_retrans_hints(tp);
1307 }
1308 
1309 void tcp_clear_retrans(struct tcp_sock *tp)
1310 {
1311 	tp->left_out = 0;
1312 	tp->retrans_out = 0;
1313 
1314 	tp->fackets_out = 0;
1315 	tp->sacked_out = 0;
1316 	tp->lost_out = 0;
1317 
1318 	tp->undo_marker = 0;
1319 	tp->undo_retrans = 0;
1320 }
1321 
1322 /* Enter Loss state. If "how" is not zero, forget all SACK information
1323  * and reset tags completely, otherwise preserve SACKs. If receiver
1324  * dropped its ofo queue, we will know this due to reneging detection.
1325  */
1326 void tcp_enter_loss(struct sock *sk, int how)
1327 {
1328 	const struct inet_connection_sock *icsk = inet_csk(sk);
1329 	struct tcp_sock *tp = tcp_sk(sk);
1330 	struct sk_buff *skb;
1331 	int cnt = 0;
1332 
1333 	/* Reduce ssthresh if it has not yet been made inside this window. */
1334 	if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1335 	    (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1336 		tp->prior_ssthresh = tcp_current_ssthresh(sk);
1337 		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1338 		tcp_ca_event(sk, CA_EVENT_LOSS);
1339 	}
1340 	tp->snd_cwnd	   = 1;
1341 	tp->snd_cwnd_cnt   = 0;
1342 	tp->snd_cwnd_stamp = tcp_time_stamp;
1343 
1344 	tp->bytes_acked = 0;
1345 	tcp_clear_retrans(tp);
1346 
1347 	/* Push undo marker, if it was plain RTO and nothing
1348 	 * was retransmitted. */
1349 	if (!how)
1350 		tp->undo_marker = tp->snd_una;
1351 
1352 	sk_stream_for_retrans_queue(skb, sk) {
1353 		cnt += tcp_skb_pcount(skb);
1354 		if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1355 			tp->undo_marker = 0;
1356 		TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
1357 		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
1358 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1359 			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1360 			tp->lost_out += tcp_skb_pcount(skb);
1361 		} else {
1362 			tp->sacked_out += tcp_skb_pcount(skb);
1363 			tp->fackets_out = cnt;
1364 		}
1365 	}
1366 	tcp_sync_left_out(tp);
1367 
1368 	tp->reordering = min_t(unsigned int, tp->reordering,
1369 					     sysctl_tcp_reordering);
1370 	tcp_set_ca_state(sk, TCP_CA_Loss);
1371 	tp->high_seq = tp->snd_nxt;
1372 	TCP_ECN_queue_cwr(tp);
1373 
1374 	clear_all_retrans_hints(tp);
1375 }
1376 
1377 static int tcp_check_sack_reneging(struct sock *sk)
1378 {
1379 	struct sk_buff *skb;
1380 
1381 	/* If ACK arrived pointing to a remembered SACK,
1382 	 * it means that our remembered SACKs do not reflect
1383 	 * real state of receiver i.e.
1384 	 * receiver _host_ is heavily congested (or buggy).
1385 	 * Do processing similar to RTO timeout.
1386 	 */
1387 	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1388 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1389 		struct inet_connection_sock *icsk = inet_csk(sk);
1390 		NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1391 
1392 		tcp_enter_loss(sk, 1);
1393 		icsk->icsk_retransmits++;
1394 		tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1395 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1396 					  icsk->icsk_rto, TCP_RTO_MAX);
1397 		return 1;
1398 	}
1399 	return 0;
1400 }
1401 
1402 static inline int tcp_fackets_out(struct tcp_sock *tp)
1403 {
1404 	return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
1405 }
1406 
1407 static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
1408 {
1409 	return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
1410 }
1411 
1412 static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
1413 {
1414 	return tp->packets_out &&
1415 	       tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
1416 }
1417 
1418 /* Linux NewReno/SACK/FACK/ECN state machine.
1419  * --------------------------------------
1420  *
1421  * "Open"	Normal state, no dubious events, fast path.
1422  * "Disorder"   In all the respects it is "Open",
1423  *		but requires a bit more attention. It is entered when
1424  *		we see some SACKs or dupacks. It is split of "Open"
1425  *		mainly to move some processing from fast path to slow one.
1426  * "CWR"	CWND was reduced due to some Congestion Notification event.
1427  *		It can be ECN, ICMP source quench, local device congestion.
1428  * "Recovery"	CWND was reduced, we are fast-retransmitting.
1429  * "Loss"	CWND was reduced due to RTO timeout or SACK reneging.
1430  *
1431  * tcp_fastretrans_alert() is entered:
1432  * - each incoming ACK, if state is not "Open"
1433  * - when arrived ACK is unusual, namely:
1434  *	* SACK
1435  *	* Duplicate ACK.
1436  *	* ECN ECE.
1437  *
1438  * Counting packets in flight is pretty simple.
1439  *
1440  *	in_flight = packets_out - left_out + retrans_out
1441  *
1442  *	packets_out is SND.NXT-SND.UNA counted in packets.
1443  *
1444  *	retrans_out is number of retransmitted segments.
1445  *
1446  *	left_out is number of segments left network, but not ACKed yet.
1447  *
1448  *		left_out = sacked_out + lost_out
1449  *
1450  *     sacked_out: Packets, which arrived to receiver out of order
1451  *		   and hence not ACKed. With SACKs this number is simply
1452  *		   amount of SACKed data. Even without SACKs
1453  *		   it is easy to give pretty reliable estimate of this number,
1454  *		   counting duplicate ACKs.
1455  *
1456  *       lost_out: Packets lost by network. TCP has no explicit
1457  *		   "loss notification" feedback from network (for now).
1458  *		   It means that this number can be only _guessed_.
1459  *		   Actually, it is the heuristics to predict lossage that
1460  *		   distinguishes different algorithms.
1461  *
1462  *	F.e. after RTO, when all the queue is considered as lost,
1463  *	lost_out = packets_out and in_flight = retrans_out.
1464  *
1465  *		Essentially, we have now two algorithms counting
1466  *		lost packets.
1467  *
1468  *		FACK: It is the simplest heuristics. As soon as we decided
1469  *		that something is lost, we decide that _all_ not SACKed
1470  *		packets until the most forward SACK are lost. I.e.
1471  *		lost_out = fackets_out - sacked_out and left_out = fackets_out.
1472  *		It is absolutely correct estimate, if network does not reorder
1473  *		packets. And it loses any connection to reality when reordering
1474  *		takes place. We use FACK by default until reordering
1475  *		is suspected on the path to this destination.
1476  *
1477  *		NewReno: when Recovery is entered, we assume that one segment
1478  *		is lost (classic Reno). While we are in Recovery and
1479  *		a partial ACK arrives, we assume that one more packet
1480  *		is lost (NewReno). This heuristics are the same in NewReno
1481  *		and SACK.
1482  *
1483  *  Imagine, that's all! Forget about all this shamanism about CWND inflation
1484  *  deflation etc. CWND is real congestion window, never inflated, changes
1485  *  only according to classic VJ rules.
1486  *
1487  * Really tricky (and requiring careful tuning) part of algorithm
1488  * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
1489  * The first determines the moment _when_ we should reduce CWND and,
1490  * hence, slow down forward transmission. In fact, it determines the moment
1491  * when we decide that hole is caused by loss, rather than by a reorder.
1492  *
1493  * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
1494  * holes, caused by lost packets.
1495  *
1496  * And the most logically complicated part of algorithm is undo
1497  * heuristics. We detect false retransmits due to both too early
1498  * fast retransmit (reordering) and underestimated RTO, analyzing
1499  * timestamps and D-SACKs. When we detect that some segments were
1500  * retransmitted by mistake and CWND reduction was wrong, we undo
1501  * window reduction and abort recovery phase. This logic is hidden
1502  * inside several functions named tcp_try_undo_<something>.
1503  */
1504 
1505 /* This function decides, when we should leave Disordered state
1506  * and enter Recovery phase, reducing congestion window.
1507  *
1508  * Main question: may we further continue forward transmission
1509  * with the same cwnd?
1510  */
1511 static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1512 {
1513 	__u32 packets_out;
1514 
1515 	/* Trick#1: The loss is proven. */
1516 	if (tp->lost_out)
1517 		return 1;
1518 
1519 	/* Not-A-Trick#2 : Classic rule... */
1520 	if (tcp_fackets_out(tp) > tp->reordering)
1521 		return 1;
1522 
1523 	/* Trick#3 : when we use RFC2988 timer restart, fast
1524 	 * retransmit can be triggered by timeout of queue head.
1525 	 */
1526 	if (tcp_head_timedout(sk, tp))
1527 		return 1;
1528 
1529 	/* Trick#4: It is still not OK... But will it be useful to delay
1530 	 * recovery more?
1531 	 */
1532 	packets_out = tp->packets_out;
1533 	if (packets_out <= tp->reordering &&
1534 	    tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
1535 	    !tcp_may_send_now(sk, tp)) {
1536 		/* We have nothing to send. This connection is limited
1537 		 * either by receiver window or by application.
1538 		 */
1539 		return 1;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 /* If we receive more dupacks than we expected counting segments
1546  * in assumption of absent reordering, interpret this as reordering.
1547  * The only another reason could be bug in receiver TCP.
1548  */
1549 static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1550 {
1551 	struct tcp_sock *tp = tcp_sk(sk);
1552 	u32 holes;
1553 
1554 	holes = max(tp->lost_out, 1U);
1555 	holes = min(holes, tp->packets_out);
1556 
1557 	if ((tp->sacked_out + holes) > tp->packets_out) {
1558 		tp->sacked_out = tp->packets_out - holes;
1559 		tcp_update_reordering(sk, tp->packets_out + addend, 0);
1560 	}
1561 }
1562 
1563 /* Emulate SACKs for SACKless connection: account for a new dupack. */
1564 
1565 static void tcp_add_reno_sack(struct sock *sk)
1566 {
1567 	struct tcp_sock *tp = tcp_sk(sk);
1568 	tp->sacked_out++;
1569 	tcp_check_reno_reordering(sk, 0);
1570 	tcp_sync_left_out(tp);
1571 }
1572 
1573 /* Account for ACK, ACKing some data in Reno Recovery phase. */
1574 
1575 static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
1576 {
1577 	if (acked > 0) {
1578 		/* One ACK acked hole. The rest eat duplicate ACKs. */
1579 		if (acked-1 >= tp->sacked_out)
1580 			tp->sacked_out = 0;
1581 		else
1582 			tp->sacked_out -= acked-1;
1583 	}
1584 	tcp_check_reno_reordering(sk, acked);
1585 	tcp_sync_left_out(tp);
1586 }
1587 
1588 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1589 {
1590 	tp->sacked_out = 0;
1591 	tp->left_out = tp->lost_out;
1592 }
1593 
1594 /* Mark head of queue up as lost. */
1595 static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
1596 			       int packets, u32 high_seq)
1597 {
1598 	struct sk_buff *skb;
1599 	int cnt;
1600 
1601 	BUG_TRAP(packets <= tp->packets_out);
1602 	if (tp->lost_skb_hint) {
1603 		skb = tp->lost_skb_hint;
1604 		cnt = tp->lost_cnt_hint;
1605 	} else {
1606 		skb = sk->sk_write_queue.next;
1607 		cnt = 0;
1608 	}
1609 
1610 	sk_stream_for_retrans_queue_from(skb, sk) {
1611 		/* TODO: do this better */
1612 		/* this is not the most efficient way to do this... */
1613 		tp->lost_skb_hint = skb;
1614 		tp->lost_cnt_hint = cnt;
1615 		cnt += tcp_skb_pcount(skb);
1616 		if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq))
1617 			break;
1618 		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1619 			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1620 			tp->lost_out += tcp_skb_pcount(skb);
1621 
1622 			/* clear xmit_retransmit_queue hints
1623 			 *  if this is beyond hint */
1624 			if(tp->retransmit_skb_hint != NULL &&
1625 			   before(TCP_SKB_CB(skb)->seq,
1626 				  TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
1627 
1628 				tp->retransmit_skb_hint = NULL;
1629 			}
1630 		}
1631 	}
1632 	tcp_sync_left_out(tp);
1633 }
1634 
1635 /* Account newly detected lost packet(s) */
1636 
1637 static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1638 {
1639 	if (IsFack(tp)) {
1640 		int lost = tp->fackets_out - tp->reordering;
1641 		if (lost <= 0)
1642 			lost = 1;
1643 		tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
1644 	} else {
1645 		tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
1646 	}
1647 
1648 	/* New heuristics: it is possible only after we switched
1649 	 * to restart timer each time when something is ACKed.
1650 	 * Hence, we can detect timed out packets during fast
1651 	 * retransmit without falling to slow start.
1652 	 */
1653 	if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
1654 		struct sk_buff *skb;
1655 
1656 		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
1657 			: sk->sk_write_queue.next;
1658 
1659 		sk_stream_for_retrans_queue_from(skb, sk) {
1660 			if (!tcp_skb_timedout(sk, skb))
1661 				break;
1662 
1663 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1664 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1665 				tp->lost_out += tcp_skb_pcount(skb);
1666 
1667 				/* clear xmit_retrans hint */
1668 				if (tp->retransmit_skb_hint &&
1669 				    before(TCP_SKB_CB(skb)->seq,
1670 					   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
1671 
1672 					tp->retransmit_skb_hint = NULL;
1673 			}
1674 		}
1675 
1676 		tp->scoreboard_skb_hint = skb;
1677 
1678 		tcp_sync_left_out(tp);
1679 	}
1680 }
1681 
1682 /* CWND moderation, preventing bursts due to too big ACKs
1683  * in dubious situations.
1684  */
1685 static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1686 {
1687 	tp->snd_cwnd = min(tp->snd_cwnd,
1688 			   tcp_packets_in_flight(tp)+tcp_max_burst(tp));
1689 	tp->snd_cwnd_stamp = tcp_time_stamp;
1690 }
1691 
1692 /* Lower bound on congestion window is slow start threshold
1693  * unless congestion avoidance choice decides to overide it.
1694  */
1695 static inline u32 tcp_cwnd_min(const struct sock *sk)
1696 {
1697 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1698 
1699 	return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
1700 }
1701 
1702 /* Decrease cwnd each second ack. */
1703 static void tcp_cwnd_down(struct sock *sk)
1704 {
1705 	struct tcp_sock *tp = tcp_sk(sk);
1706 	int decr = tp->snd_cwnd_cnt + 1;
1707 
1708 	tp->snd_cwnd_cnt = decr&1;
1709 	decr >>= 1;
1710 
1711 	if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
1712 		tp->snd_cwnd -= decr;
1713 
1714 	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
1715 	tp->snd_cwnd_stamp = tcp_time_stamp;
1716 }
1717 
1718 /* Nothing was retransmitted or returned timestamp is less
1719  * than timestamp of the first retransmission.
1720  */
1721 static inline int tcp_packet_delayed(struct tcp_sock *tp)
1722 {
1723 	return !tp->retrans_stamp ||
1724 		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
1725 		 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
1726 }
1727 
1728 /* Undo procedures. */
1729 
1730 #if FASTRETRANS_DEBUG > 1
1731 static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1732 {
1733 	struct inet_sock *inet = inet_sk(sk);
1734 	printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
1735 	       msg,
1736 	       NIPQUAD(inet->daddr), ntohs(inet->dport),
1737 	       tp->snd_cwnd, tp->left_out,
1738 	       tp->snd_ssthresh, tp->prior_ssthresh,
1739 	       tp->packets_out);
1740 }
1741 #else
1742 #define DBGUNDO(x...) do { } while (0)
1743 #endif
1744 
1745 static void tcp_undo_cwr(struct sock *sk, const int undo)
1746 {
1747 	struct tcp_sock *tp = tcp_sk(sk);
1748 
1749 	if (tp->prior_ssthresh) {
1750 		const struct inet_connection_sock *icsk = inet_csk(sk);
1751 
1752 		if (icsk->icsk_ca_ops->undo_cwnd)
1753 			tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1754 		else
1755 			tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1756 
1757 		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
1758 			tp->snd_ssthresh = tp->prior_ssthresh;
1759 			TCP_ECN_withdraw_cwr(tp);
1760 		}
1761 	} else {
1762 		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
1763 	}
1764 	tcp_moderate_cwnd(tp);
1765 	tp->snd_cwnd_stamp = tcp_time_stamp;
1766 
1767 	/* There is something screwy going on with the retrans hints after
1768 	   an undo */
1769 	clear_all_retrans_hints(tp);
1770 }
1771 
1772 static inline int tcp_may_undo(struct tcp_sock *tp)
1773 {
1774 	return tp->undo_marker &&
1775 		(!tp->undo_retrans || tcp_packet_delayed(tp));
1776 }
1777 
1778 /* People celebrate: "We love our President!" */
1779 static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1780 {
1781 	if (tcp_may_undo(tp)) {
1782 		/* Happy end! We did not retransmit anything
1783 		 * or our original transmission succeeded.
1784 		 */
1785 		DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1786 		tcp_undo_cwr(sk, 1);
1787 		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1788 			NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1789 		else
1790 			NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
1791 		tp->undo_marker = 0;
1792 	}
1793 	if (tp->snd_una == tp->high_seq && IsReno(tp)) {
1794 		/* Hold old state until something *above* high_seq
1795 		 * is ACKed. For Reno it is MUST to prevent false
1796 		 * fast retransmits (RFC2582). SACK TCP is safe. */
1797 		tcp_moderate_cwnd(tp);
1798 		return 1;
1799 	}
1800 	tcp_set_ca_state(sk, TCP_CA_Open);
1801 	return 0;
1802 }
1803 
1804 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
1805 static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1806 {
1807 	if (tp->undo_marker && !tp->undo_retrans) {
1808 		DBGUNDO(sk, tp, "D-SACK");
1809 		tcp_undo_cwr(sk, 1);
1810 		tp->undo_marker = 0;
1811 		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1812 	}
1813 }
1814 
1815 /* Undo during fast recovery after partial ACK. */
1816 
1817 static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1818 				int acked)
1819 {
1820 	/* Partial ACK arrived. Force Hoe's retransmit. */
1821 	int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
1822 
1823 	if (tcp_may_undo(tp)) {
1824 		/* Plain luck! Hole if filled with delayed
1825 		 * packet, rather than with a retransmit.
1826 		 */
1827 		if (tp->retrans_out == 0)
1828 			tp->retrans_stamp = 0;
1829 
1830 		tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1831 
1832 		DBGUNDO(sk, tp, "Hoe");
1833 		tcp_undo_cwr(sk, 0);
1834 		NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1835 
1836 		/* So... Do not make Hoe's retransmit yet.
1837 		 * If the first packet was delayed, the rest
1838 		 * ones are most probably delayed as well.
1839 		 */
1840 		failed = 0;
1841 	}
1842 	return failed;
1843 }
1844 
1845 /* Undo during loss recovery after partial ACK. */
1846 static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1847 {
1848 	if (tcp_may_undo(tp)) {
1849 		struct sk_buff *skb;
1850 		sk_stream_for_retrans_queue(skb, sk) {
1851 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1852 		}
1853 
1854 		clear_all_retrans_hints(tp);
1855 
1856 		DBGUNDO(sk, tp, "partial loss");
1857 		tp->lost_out = 0;
1858 		tp->left_out = tp->sacked_out;
1859 		tcp_undo_cwr(sk, 1);
1860 		NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1861 		inet_csk(sk)->icsk_retransmits = 0;
1862 		tp->undo_marker = 0;
1863 		if (!IsReno(tp))
1864 			tcp_set_ca_state(sk, TCP_CA_Open);
1865 		return 1;
1866 	}
1867 	return 0;
1868 }
1869 
1870 static inline void tcp_complete_cwr(struct sock *sk)
1871 {
1872 	struct tcp_sock *tp = tcp_sk(sk);
1873 	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1874 	tp->snd_cwnd_stamp = tcp_time_stamp;
1875 	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1876 }
1877 
1878 static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1879 {
1880 	tp->left_out = tp->sacked_out;
1881 
1882 	if (tp->retrans_out == 0)
1883 		tp->retrans_stamp = 0;
1884 
1885 	if (flag&FLAG_ECE)
1886 		tcp_enter_cwr(sk);
1887 
1888 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
1889 		int state = TCP_CA_Open;
1890 
1891 		if (tp->left_out || tp->retrans_out || tp->undo_marker)
1892 			state = TCP_CA_Disorder;
1893 
1894 		if (inet_csk(sk)->icsk_ca_state != state) {
1895 			tcp_set_ca_state(sk, state);
1896 			tp->high_seq = tp->snd_nxt;
1897 		}
1898 		tcp_moderate_cwnd(tp);
1899 	} else {
1900 		tcp_cwnd_down(sk);
1901 	}
1902 }
1903 
1904 static void tcp_mtup_probe_failed(struct sock *sk)
1905 {
1906 	struct inet_connection_sock *icsk = inet_csk(sk);
1907 
1908 	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
1909 	icsk->icsk_mtup.probe_size = 0;
1910 }
1911 
1912 static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
1913 {
1914 	struct tcp_sock *tp = tcp_sk(sk);
1915 	struct inet_connection_sock *icsk = inet_csk(sk);
1916 
1917 	/* FIXME: breaks with very large cwnd */
1918 	tp->prior_ssthresh = tcp_current_ssthresh(sk);
1919 	tp->snd_cwnd = tp->snd_cwnd *
1920 		       tcp_mss_to_mtu(sk, tp->mss_cache) /
1921 		       icsk->icsk_mtup.probe_size;
1922 	tp->snd_cwnd_cnt = 0;
1923 	tp->snd_cwnd_stamp = tcp_time_stamp;
1924 	tp->rcv_ssthresh = tcp_current_ssthresh(sk);
1925 
1926 	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
1927 	icsk->icsk_mtup.probe_size = 0;
1928 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
1929 }
1930 
1931 
1932 /* Process an event, which can update packets-in-flight not trivially.
1933  * Main goal of this function is to calculate new estimate for left_out,
1934  * taking into account both packets sitting in receiver's buffer and
1935  * packets lost by network.
1936  *
1937  * Besides that it does CWND reduction, when packet loss is detected
1938  * and changes state of machine.
1939  *
1940  * It does _not_ decide what to send, it is made in function
1941  * tcp_xmit_retransmit_queue().
1942  */
1943 static void
1944 tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1945 		      int prior_packets, int flag)
1946 {
1947 	struct inet_connection_sock *icsk = inet_csk(sk);
1948 	struct tcp_sock *tp = tcp_sk(sk);
1949 	int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1950 
1951 	/* Some technical things:
1952 	 * 1. Reno does not count dupacks (sacked_out) automatically. */
1953 	if (!tp->packets_out)
1954 		tp->sacked_out = 0;
1955         /* 2. SACK counts snd_fack in packets inaccurately. */
1956 	if (tp->sacked_out == 0)
1957 		tp->fackets_out = 0;
1958 
1959         /* Now state machine starts.
1960 	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
1961 	if (flag&FLAG_ECE)
1962 		tp->prior_ssthresh = 0;
1963 
1964 	/* B. In all the states check for reneging SACKs. */
1965 	if (tp->sacked_out && tcp_check_sack_reneging(sk))
1966 		return;
1967 
1968 	/* C. Process data loss notification, provided it is valid. */
1969 	if ((flag&FLAG_DATA_LOST) &&
1970 	    before(tp->snd_una, tp->high_seq) &&
1971 	    icsk->icsk_ca_state != TCP_CA_Open &&
1972 	    tp->fackets_out > tp->reordering) {
1973 		tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1974 		NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
1975 	}
1976 
1977 	/* D. Synchronize left_out to current state. */
1978 	tcp_sync_left_out(tp);
1979 
1980 	/* E. Check state exit conditions. State can be terminated
1981 	 *    when high_seq is ACKed. */
1982 	if (icsk->icsk_ca_state == TCP_CA_Open) {
1983 		if (!sysctl_tcp_frto)
1984 			BUG_TRAP(tp->retrans_out == 0);
1985 		tp->retrans_stamp = 0;
1986 	} else if (!before(tp->snd_una, tp->high_seq)) {
1987 		switch (icsk->icsk_ca_state) {
1988 		case TCP_CA_Loss:
1989 			icsk->icsk_retransmits = 0;
1990 			if (tcp_try_undo_recovery(sk, tp))
1991 				return;
1992 			break;
1993 
1994 		case TCP_CA_CWR:
1995 			/* CWR is to be held something *above* high_seq
1996 			 * is ACKed for CWR bit to reach receiver. */
1997 			if (tp->snd_una != tp->high_seq) {
1998 				tcp_complete_cwr(sk);
1999 				tcp_set_ca_state(sk, TCP_CA_Open);
2000 			}
2001 			break;
2002 
2003 		case TCP_CA_Disorder:
2004 			tcp_try_undo_dsack(sk, tp);
2005 			if (!tp->undo_marker ||
2006 			    /* For SACK case do not Open to allow to undo
2007 			     * catching for all duplicate ACKs. */
2008 			    IsReno(tp) || tp->snd_una != tp->high_seq) {
2009 				tp->undo_marker = 0;
2010 				tcp_set_ca_state(sk, TCP_CA_Open);
2011 			}
2012 			break;
2013 
2014 		case TCP_CA_Recovery:
2015 			if (IsReno(tp))
2016 				tcp_reset_reno_sack(tp);
2017 			if (tcp_try_undo_recovery(sk, tp))
2018 				return;
2019 			tcp_complete_cwr(sk);
2020 			break;
2021 		}
2022 	}
2023 
2024 	/* F. Process state. */
2025 	switch (icsk->icsk_ca_state) {
2026 	case TCP_CA_Recovery:
2027 		if (prior_snd_una == tp->snd_una) {
2028 			if (IsReno(tp) && is_dupack)
2029 				tcp_add_reno_sack(sk);
2030 		} else {
2031 			int acked = prior_packets - tp->packets_out;
2032 			if (IsReno(tp))
2033 				tcp_remove_reno_sacks(sk, tp, acked);
2034 			is_dupack = tcp_try_undo_partial(sk, tp, acked);
2035 		}
2036 		break;
2037 	case TCP_CA_Loss:
2038 		if (flag&FLAG_DATA_ACKED)
2039 			icsk->icsk_retransmits = 0;
2040 		if (!tcp_try_undo_loss(sk, tp)) {
2041 			tcp_moderate_cwnd(tp);
2042 			tcp_xmit_retransmit_queue(sk);
2043 			return;
2044 		}
2045 		if (icsk->icsk_ca_state != TCP_CA_Open)
2046 			return;
2047 		/* Loss is undone; fall through to processing in Open state. */
2048 	default:
2049 		if (IsReno(tp)) {
2050 			if (tp->snd_una != prior_snd_una)
2051 				tcp_reset_reno_sack(tp);
2052 			if (is_dupack)
2053 				tcp_add_reno_sack(sk);
2054 		}
2055 
2056 		if (icsk->icsk_ca_state == TCP_CA_Disorder)
2057 			tcp_try_undo_dsack(sk, tp);
2058 
2059 		if (!tcp_time_to_recover(sk, tp)) {
2060 			tcp_try_to_open(sk, tp, flag);
2061 			return;
2062 		}
2063 
2064 		/* MTU probe failure: don't reduce cwnd */
2065 		if (icsk->icsk_ca_state < TCP_CA_CWR &&
2066 		    icsk->icsk_mtup.probe_size &&
2067 		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
2068 			tcp_mtup_probe_failed(sk);
2069 			/* Restores the reduction we did in tcp_mtup_probe() */
2070 			tp->snd_cwnd++;
2071 			tcp_simple_retransmit(sk);
2072 			return;
2073 		}
2074 
2075 		/* Otherwise enter Recovery state */
2076 
2077 		if (IsReno(tp))
2078 			NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
2079 		else
2080 			NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
2081 
2082 		tp->high_seq = tp->snd_nxt;
2083 		tp->prior_ssthresh = 0;
2084 		tp->undo_marker = tp->snd_una;
2085 		tp->undo_retrans = tp->retrans_out;
2086 
2087 		if (icsk->icsk_ca_state < TCP_CA_CWR) {
2088 			if (!(flag&FLAG_ECE))
2089 				tp->prior_ssthresh = tcp_current_ssthresh(sk);
2090 			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2091 			TCP_ECN_queue_cwr(tp);
2092 		}
2093 
2094 		tp->bytes_acked = 0;
2095 		tp->snd_cwnd_cnt = 0;
2096 		tcp_set_ca_state(sk, TCP_CA_Recovery);
2097 	}
2098 
2099 	if (is_dupack || tcp_head_timedout(sk, tp))
2100 		tcp_update_scoreboard(sk, tp);
2101 	tcp_cwnd_down(sk);
2102 	tcp_xmit_retransmit_queue(sk);
2103 }
2104 
2105 /* Read draft-ietf-tcplw-high-performance before mucking
2106  * with this code. (Supersedes RFC1323)
2107  */
2108 static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
2109 {
2110 	/* RTTM Rule: A TSecr value received in a segment is used to
2111 	 * update the averaged RTT measurement only if the segment
2112 	 * acknowledges some new data, i.e., only if it advances the
2113 	 * left edge of the send window.
2114 	 *
2115 	 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2116 	 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
2117 	 *
2118 	 * Changed: reset backoff as soon as we see the first valid sample.
2119 	 * If we do not, we get strongly overestimated rto. With timestamps
2120 	 * samples are accepted even from very old segments: f.e., when rtt=1
2121 	 * increases to 8, we retransmit 5 times and after 8 seconds delayed
2122 	 * answer arrives rto becomes 120 seconds! If at least one of segments
2123 	 * in window is lost... Voila.	 			--ANK (010210)
2124 	 */
2125 	struct tcp_sock *tp = tcp_sk(sk);
2126 	const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2127 	tcp_rtt_estimator(sk, seq_rtt);
2128 	tcp_set_rto(sk);
2129 	inet_csk(sk)->icsk_backoff = 0;
2130 	tcp_bound_rto(sk);
2131 }
2132 
2133 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
2134 {
2135 	/* We don't have a timestamp. Can only use
2136 	 * packets that are not retransmitted to determine
2137 	 * rtt estimates. Also, we must not reset the
2138 	 * backoff for rto until we get a non-retransmitted
2139 	 * packet. This allows us to deal with a situation
2140 	 * where the network delay has increased suddenly.
2141 	 * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
2142 	 */
2143 
2144 	if (flag & FLAG_RETRANS_DATA_ACKED)
2145 		return;
2146 
2147 	tcp_rtt_estimator(sk, seq_rtt);
2148 	tcp_set_rto(sk);
2149 	inet_csk(sk)->icsk_backoff = 0;
2150 	tcp_bound_rto(sk);
2151 }
2152 
2153 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2154 				      const s32 seq_rtt)
2155 {
2156 	const struct tcp_sock *tp = tcp_sk(sk);
2157 	/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
2158 	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
2159 		tcp_ack_saw_tstamp(sk, flag);
2160 	else if (seq_rtt >= 0)
2161 		tcp_ack_no_tstamp(sk, seq_rtt, flag);
2162 }
2163 
2164 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
2165 			   u32 in_flight, int good)
2166 {
2167 	const struct inet_connection_sock *icsk = inet_csk(sk);
2168 	icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
2169 	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2170 }
2171 
2172 /* Restart timer after forward progress on connection.
2173  * RFC2988 recommends to restart timer to now+rto.
2174  */
2175 
2176 static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
2177 {
2178 	if (!tp->packets_out) {
2179 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
2180 	} else {
2181 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2182 	}
2183 }
2184 
2185 static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2186 			 __u32 now, __s32 *seq_rtt)
2187 {
2188 	struct tcp_sock *tp = tcp_sk(sk);
2189 	struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2190 	__u32 seq = tp->snd_una;
2191 	__u32 packets_acked;
2192 	int acked = 0;
2193 
2194 	/* If we get here, the whole TSO packet has not been
2195 	 * acked.
2196 	 */
2197 	BUG_ON(!after(scb->end_seq, seq));
2198 
2199 	packets_acked = tcp_skb_pcount(skb);
2200 	if (tcp_trim_head(sk, skb, seq - scb->seq))
2201 		return 0;
2202 	packets_acked -= tcp_skb_pcount(skb);
2203 
2204 	if (packets_acked) {
2205 		__u8 sacked = scb->sacked;
2206 
2207 		acked |= FLAG_DATA_ACKED;
2208 		if (sacked) {
2209 			if (sacked & TCPCB_RETRANS) {
2210 				if (sacked & TCPCB_SACKED_RETRANS)
2211 					tp->retrans_out -= packets_acked;
2212 				acked |= FLAG_RETRANS_DATA_ACKED;
2213 				*seq_rtt = -1;
2214 			} else if (*seq_rtt < 0)
2215 				*seq_rtt = now - scb->when;
2216 			if (sacked & TCPCB_SACKED_ACKED)
2217 				tp->sacked_out -= packets_acked;
2218 			if (sacked & TCPCB_LOST)
2219 				tp->lost_out -= packets_acked;
2220 			if (sacked & TCPCB_URG) {
2221 				if (tp->urg_mode &&
2222 				    !before(seq, tp->snd_up))
2223 					tp->urg_mode = 0;
2224 			}
2225 		} else if (*seq_rtt < 0)
2226 			*seq_rtt = now - scb->when;
2227 
2228 		if (tp->fackets_out) {
2229 			__u32 dval = min(tp->fackets_out, packets_acked);
2230 			tp->fackets_out -= dval;
2231 		}
2232 		tp->packets_out -= packets_acked;
2233 
2234 		BUG_ON(tcp_skb_pcount(skb) == 0);
2235 		BUG_ON(!before(scb->seq, scb->end_seq));
2236 	}
2237 
2238 	return acked;
2239 }
2240 
2241 static u32 tcp_usrtt(const struct sk_buff *skb)
2242 {
2243 	struct timeval tv, now;
2244 
2245 	do_gettimeofday(&now);
2246 	skb_get_timestamp(skb, &tv);
2247 	return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
2248 }
2249 
2250 /* Remove acknowledged frames from the retransmission queue. */
2251 static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2252 {
2253 	struct tcp_sock *tp = tcp_sk(sk);
2254 	const struct inet_connection_sock *icsk = inet_csk(sk);
2255 	struct sk_buff *skb;
2256 	__u32 now = tcp_time_stamp;
2257 	int acked = 0;
2258 	__s32 seq_rtt = -1;
2259 	u32 pkts_acked = 0;
2260 	void (*rtt_sample)(struct sock *sk, u32 usrtt)
2261 		= icsk->icsk_ca_ops->rtt_sample;
2262 
2263 	while ((skb = skb_peek(&sk->sk_write_queue)) &&
2264 	       skb != sk->sk_send_head) {
2265 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2266 		__u8 sacked = scb->sacked;
2267 
2268 		/* If our packet is before the ack sequence we can
2269 		 * discard it as it's confirmed to have arrived at
2270 		 * the other end.
2271 		 */
2272 		if (after(scb->end_seq, tp->snd_una)) {
2273 			if (tcp_skb_pcount(skb) > 1 &&
2274 			    after(tp->snd_una, scb->seq))
2275 				acked |= tcp_tso_acked(sk, skb,
2276 						       now, &seq_rtt);
2277 			break;
2278 		}
2279 
2280 		/* Initial outgoing SYN's get put onto the write_queue
2281 		 * just like anything else we transmit.  It is not
2282 		 * true data, and if we misinform our callers that
2283 		 * this ACK acks real data, we will erroneously exit
2284 		 * connection startup slow start one packet too
2285 		 * quickly.  This is severely frowned upon behavior.
2286 		 */
2287 		if (!(scb->flags & TCPCB_FLAG_SYN)) {
2288 			acked |= FLAG_DATA_ACKED;
2289 			++pkts_acked;
2290 		} else {
2291 			acked |= FLAG_SYN_ACKED;
2292 			tp->retrans_stamp = 0;
2293 		}
2294 
2295 		/* MTU probing checks */
2296 		if (icsk->icsk_mtup.probe_size) {
2297 			if (!after(tp->mtu_probe.probe_seq_end, TCP_SKB_CB(skb)->end_seq)) {
2298 				tcp_mtup_probe_success(sk, skb);
2299 			}
2300 		}
2301 
2302 		if (sacked) {
2303 			if (sacked & TCPCB_RETRANS) {
2304 				if(sacked & TCPCB_SACKED_RETRANS)
2305 					tp->retrans_out -= tcp_skb_pcount(skb);
2306 				acked |= FLAG_RETRANS_DATA_ACKED;
2307 				seq_rtt = -1;
2308 			} else if (seq_rtt < 0) {
2309 				seq_rtt = now - scb->when;
2310 				if (rtt_sample)
2311 					(*rtt_sample)(sk, tcp_usrtt(skb));
2312 			}
2313 			if (sacked & TCPCB_SACKED_ACKED)
2314 				tp->sacked_out -= tcp_skb_pcount(skb);
2315 			if (sacked & TCPCB_LOST)
2316 				tp->lost_out -= tcp_skb_pcount(skb);
2317 			if (sacked & TCPCB_URG) {
2318 				if (tp->urg_mode &&
2319 				    !before(scb->end_seq, tp->snd_up))
2320 					tp->urg_mode = 0;
2321 			}
2322 		} else if (seq_rtt < 0) {
2323 			seq_rtt = now - scb->when;
2324 			if (rtt_sample)
2325 				(*rtt_sample)(sk, tcp_usrtt(skb));
2326 		}
2327 		tcp_dec_pcount_approx(&tp->fackets_out, skb);
2328 		tcp_packets_out_dec(tp, skb);
2329 		__skb_unlink(skb, &sk->sk_write_queue);
2330 		sk_stream_free_skb(sk, skb);
2331 		clear_all_retrans_hints(tp);
2332 	}
2333 
2334 	if (acked&FLAG_ACKED) {
2335 		tcp_ack_update_rtt(sk, acked, seq_rtt);
2336 		tcp_ack_packets_out(sk, tp);
2337 
2338 		if (icsk->icsk_ca_ops->pkts_acked)
2339 			icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2340 	}
2341 
2342 #if FASTRETRANS_DEBUG > 0
2343 	BUG_TRAP((int)tp->sacked_out >= 0);
2344 	BUG_TRAP((int)tp->lost_out >= 0);
2345 	BUG_TRAP((int)tp->retrans_out >= 0);
2346 	if (!tp->packets_out && tp->rx_opt.sack_ok) {
2347 		const struct inet_connection_sock *icsk = inet_csk(sk);
2348 		if (tp->lost_out) {
2349 			printk(KERN_DEBUG "Leak l=%u %d\n",
2350 			       tp->lost_out, icsk->icsk_ca_state);
2351 			tp->lost_out = 0;
2352 		}
2353 		if (tp->sacked_out) {
2354 			printk(KERN_DEBUG "Leak s=%u %d\n",
2355 			       tp->sacked_out, icsk->icsk_ca_state);
2356 			tp->sacked_out = 0;
2357 		}
2358 		if (tp->retrans_out) {
2359 			printk(KERN_DEBUG "Leak r=%u %d\n",
2360 			       tp->retrans_out, icsk->icsk_ca_state);
2361 			tp->retrans_out = 0;
2362 		}
2363 	}
2364 #endif
2365 	*seq_rtt_p = seq_rtt;
2366 	return acked;
2367 }
2368 
2369 static void tcp_ack_probe(struct sock *sk)
2370 {
2371 	const struct tcp_sock *tp = tcp_sk(sk);
2372 	struct inet_connection_sock *icsk = inet_csk(sk);
2373 
2374 	/* Was it a usable window open? */
2375 
2376 	if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
2377 		   tp->snd_una + tp->snd_wnd)) {
2378 		icsk->icsk_backoff = 0;
2379 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
2380 		/* Socket must be waked up by subsequent tcp_data_snd_check().
2381 		 * This function is not for random using!
2382 		 */
2383 	} else {
2384 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2385 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2386 					  TCP_RTO_MAX);
2387 	}
2388 }
2389 
2390 static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2391 {
2392 	return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2393 		inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2394 }
2395 
2396 static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2397 {
2398 	const struct tcp_sock *tp = tcp_sk(sk);
2399 	return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2400 		!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2401 }
2402 
2403 /* Check that window update is acceptable.
2404  * The function assumes that snd_una<=ack<=snd_next.
2405  */
2406 static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
2407 					const u32 ack_seq, const u32 nwin)
2408 {
2409 	return (after(ack, tp->snd_una) ||
2410 		after(ack_seq, tp->snd_wl1) ||
2411 		(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
2412 }
2413 
2414 /* Update our send window.
2415  *
2416  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
2417  * and in FreeBSD. NetBSD's one is even worse.) is wrong.
2418  */
2419 static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
2420 				 struct sk_buff *skb, u32 ack, u32 ack_seq)
2421 {
2422 	int flag = 0;
2423 	u32 nwin = ntohs(skb->h.th->window);
2424 
2425 	if (likely(!skb->h.th->syn))
2426 		nwin <<= tp->rx_opt.snd_wscale;
2427 
2428 	if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
2429 		flag |= FLAG_WIN_UPDATE;
2430 		tcp_update_wl(tp, ack, ack_seq);
2431 
2432 		if (tp->snd_wnd != nwin) {
2433 			tp->snd_wnd = nwin;
2434 
2435 			/* Note, it is the only place, where
2436 			 * fast path is recovered for sending TCP.
2437 			 */
2438 			tp->pred_flags = 0;
2439 			tcp_fast_path_check(sk, tp);
2440 
2441 			if (nwin > tp->max_window) {
2442 				tp->max_window = nwin;
2443 				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
2444 			}
2445 		}
2446 	}
2447 
2448 	tp->snd_una = ack;
2449 
2450 	return flag;
2451 }
2452 
2453 static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2454 {
2455 	struct tcp_sock *tp = tcp_sk(sk);
2456 
2457 	tcp_sync_left_out(tp);
2458 
2459 	if (tp->snd_una == prior_snd_una ||
2460 	    !before(tp->snd_una, tp->frto_highmark)) {
2461 		/* RTO was caused by loss, start retransmitting in
2462 		 * go-back-N slow start
2463 		 */
2464 		tcp_enter_frto_loss(sk);
2465 		return;
2466 	}
2467 
2468 	if (tp->frto_counter == 1) {
2469 		/* First ACK after RTO advances the window: allow two new
2470 		 * segments out.
2471 		 */
2472 		tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
2473 	} else {
2474 		/* Also the second ACK after RTO advances the window.
2475 		 * The RTO was likely spurious. Reduce cwnd and continue
2476 		 * in congestion avoidance
2477 		 */
2478 		tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2479 		tcp_moderate_cwnd(tp);
2480 	}
2481 
2482 	/* F-RTO affects on two new ACKs following RTO.
2483 	 * At latest on third ACK the TCP behavior is back to normal.
2484 	 */
2485 	tp->frto_counter = (tp->frto_counter + 1) % 3;
2486 }
2487 
2488 /* This routine deals with incoming acks, but not outgoing ones. */
2489 static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2490 {
2491 	struct inet_connection_sock *icsk = inet_csk(sk);
2492 	struct tcp_sock *tp = tcp_sk(sk);
2493 	u32 prior_snd_una = tp->snd_una;
2494 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
2495 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
2496 	u32 prior_in_flight;
2497 	s32 seq_rtt;
2498 	int prior_packets;
2499 
2500 	/* If the ack is newer than sent or older than previous acks
2501 	 * then we can probably ignore it.
2502 	 */
2503 	if (after(ack, tp->snd_nxt))
2504 		goto uninteresting_ack;
2505 
2506 	if (before(ack, prior_snd_una))
2507 		goto old_ack;
2508 
2509 	if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR)
2510 		tp->bytes_acked += ack - prior_snd_una;
2511 
2512 	if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2513 		/* Window is constant, pure forward advance.
2514 		 * No more checks are required.
2515 		 * Note, we use the fact that SND.UNA>=SND.WL2.
2516 		 */
2517 		tcp_update_wl(tp, ack, ack_seq);
2518 		tp->snd_una = ack;
2519 		flag |= FLAG_WIN_UPDATE;
2520 
2521 		tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2522 
2523 		NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2524 	} else {
2525 		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
2526 			flag |= FLAG_DATA;
2527 		else
2528 			NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
2529 
2530 		flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
2531 
2532 		if (TCP_SKB_CB(skb)->sacked)
2533 			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2534 
2535 		if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2536 			flag |= FLAG_ECE;
2537 
2538 		tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2539 	}
2540 
2541 	/* We passed data and got it acked, remove any soft error
2542 	 * log. Something worked...
2543 	 */
2544 	sk->sk_err_soft = 0;
2545 	tp->rcv_tstamp = tcp_time_stamp;
2546 	prior_packets = tp->packets_out;
2547 	if (!prior_packets)
2548 		goto no_queue;
2549 
2550 	prior_in_flight = tcp_packets_in_flight(tp);
2551 
2552 	/* See if we can take anything off of the retransmit queue. */
2553 	flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
2554 
2555 	if (tp->frto_counter)
2556 		tcp_process_frto(sk, prior_snd_una);
2557 
2558 	if (tcp_ack_is_dubious(sk, flag)) {
2559 		/* Advance CWND, if state allows this. */
2560 		if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2561 			tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
2562 		tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2563 	} else {
2564 		if ((flag & FLAG_DATA_ACKED))
2565 			tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2566 	}
2567 
2568 	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
2569 		dst_confirm(sk->sk_dst_cache);
2570 
2571 	return 1;
2572 
2573 no_queue:
2574 	icsk->icsk_probes_out = 0;
2575 
2576 	/* If this ack opens up a zero window, clear backoff.  It was
2577 	 * being used to time the probes, and is probably far higher than
2578 	 * it needs to be for normal retransmission.
2579 	 */
2580 	if (sk->sk_send_head)
2581 		tcp_ack_probe(sk);
2582 	return 1;
2583 
2584 old_ack:
2585 	if (TCP_SKB_CB(skb)->sacked)
2586 		tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2587 
2588 uninteresting_ack:
2589 	SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
2590 	return 0;
2591 }
2592 
2593 
2594 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
2595  * But, this can also be called on packets in the established flow when
2596  * the fast version below fails.
2597  */
2598 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
2599 {
2600 	unsigned char *ptr;
2601 	struct tcphdr *th = skb->h.th;
2602 	int length=(th->doff*4)-sizeof(struct tcphdr);
2603 
2604 	ptr = (unsigned char *)(th + 1);
2605 	opt_rx->saw_tstamp = 0;
2606 
2607 	while(length>0) {
2608 	  	int opcode=*ptr++;
2609 		int opsize;
2610 
2611 		switch (opcode) {
2612 			case TCPOPT_EOL:
2613 				return;
2614 			case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
2615 				length--;
2616 				continue;
2617 			default:
2618 				opsize=*ptr++;
2619 				if (opsize < 2) /* "silly options" */
2620 					return;
2621 				if (opsize > length)
2622 					return;	/* don't parse partial options */
2623 	  			switch(opcode) {
2624 				case TCPOPT_MSS:
2625 					if(opsize==TCPOLEN_MSS && th->syn && !estab) {
2626 						u16 in_mss = ntohs(get_unaligned((__u16 *)ptr));
2627 						if (in_mss) {
2628 							if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
2629 								in_mss = opt_rx->user_mss;
2630 							opt_rx->mss_clamp = in_mss;
2631 						}
2632 					}
2633 					break;
2634 				case TCPOPT_WINDOW:
2635 					if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
2636 						if (sysctl_tcp_window_scaling) {
2637 							__u8 snd_wscale = *(__u8 *) ptr;
2638 							opt_rx->wscale_ok = 1;
2639 							if (snd_wscale > 14) {
2640 								if(net_ratelimit())
2641 									printk(KERN_INFO "tcp_parse_options: Illegal window "
2642 									       "scaling value %d >14 received.\n",
2643 									       snd_wscale);
2644 								snd_wscale = 14;
2645 							}
2646 							opt_rx->snd_wscale = snd_wscale;
2647 						}
2648 					break;
2649 				case TCPOPT_TIMESTAMP:
2650 					if(opsize==TCPOLEN_TIMESTAMP) {
2651 						if ((estab && opt_rx->tstamp_ok) ||
2652 						    (!estab && sysctl_tcp_timestamps)) {
2653 							opt_rx->saw_tstamp = 1;
2654 							opt_rx->rcv_tsval = ntohl(get_unaligned((__u32 *)ptr));
2655 							opt_rx->rcv_tsecr = ntohl(get_unaligned((__u32 *)(ptr+4)));
2656 						}
2657 					}
2658 					break;
2659 				case TCPOPT_SACK_PERM:
2660 					if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
2661 						if (sysctl_tcp_sack) {
2662 							opt_rx->sack_ok = 1;
2663 							tcp_sack_reset(opt_rx);
2664 						}
2665 					}
2666 					break;
2667 
2668 				case TCPOPT_SACK:
2669 					if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
2670 					   !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
2671 					   opt_rx->sack_ok) {
2672 						TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
2673 					}
2674 	  			};
2675 	  			ptr+=opsize-2;
2676 	  			length-=opsize;
2677 	  	};
2678 	}
2679 }
2680 
2681 /* Fast parse options. This hopes to only see timestamps.
2682  * If it is wrong it falls back on tcp_parse_options().
2683  */
2684 static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
2685 				  struct tcp_sock *tp)
2686 {
2687 	if (th->doff == sizeof(struct tcphdr)>>2) {
2688 		tp->rx_opt.saw_tstamp = 0;
2689 		return 0;
2690 	} else if (tp->rx_opt.tstamp_ok &&
2691 		   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
2692 		__u32 *ptr = (__u32 *)(th + 1);
2693 		if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
2694 				  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
2695 			tp->rx_opt.saw_tstamp = 1;
2696 			++ptr;
2697 			tp->rx_opt.rcv_tsval = ntohl(*ptr);
2698 			++ptr;
2699 			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
2700 			return 1;
2701 		}
2702 	}
2703 	tcp_parse_options(skb, &tp->rx_opt, 1);
2704 	return 1;
2705 }
2706 
2707 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
2708 {
2709 	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
2710 	tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
2711 }
2712 
2713 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2714 {
2715 	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
2716 		/* PAWS bug workaround wrt. ACK frames, the PAWS discard
2717 		 * extra check below makes sure this can only happen
2718 		 * for pure ACK frames.  -DaveM
2719 		 *
2720 		 * Not only, also it occurs for expired timestamps.
2721 		 */
2722 
2723 		if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
2724 		   xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
2725 			tcp_store_ts_recent(tp);
2726 	}
2727 }
2728 
2729 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
2730  *
2731  * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
2732  * it can pass through stack. So, the following predicate verifies that
2733  * this segment is not used for anything but congestion avoidance or
2734  * fast retransmit. Moreover, we even are able to eliminate most of such
2735  * second order effects, if we apply some small "replay" window (~RTO)
2736  * to timestamp space.
2737  *
2738  * All these measures still do not guarantee that we reject wrapped ACKs
2739  * on networks with high bandwidth, when sequence space is recycled fastly,
2740  * but it guarantees that such events will be very rare and do not affect
2741  * connection seriously. This doesn't look nice, but alas, PAWS is really
2742  * buggy extension.
2743  *
2744  * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
2745  * states that events when retransmit arrives after original data are rare.
2746  * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
2747  * the biggest problem on large power networks even with minor reordering.
2748  * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
2749  * up to bandwidth of 18Gigabit/sec. 8) ]
2750  */
2751 
2752 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
2753 {
2754 	struct tcp_sock *tp = tcp_sk(sk);
2755 	struct tcphdr *th = skb->h.th;
2756 	u32 seq = TCP_SKB_CB(skb)->seq;
2757 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
2758 
2759 	return (/* 1. Pure ACK with correct sequence number. */
2760 		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
2761 
2762 		/* 2. ... and duplicate ACK. */
2763 		ack == tp->snd_una &&
2764 
2765 		/* 3. ... and does not update window. */
2766 		!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
2767 
2768 		/* 4. ... and sits in replay window. */
2769 		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
2770 }
2771 
2772 static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
2773 {
2774 	const struct tcp_sock *tp = tcp_sk(sk);
2775 	return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
2776 		xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
2777 		!tcp_disordered_ack(sk, skb));
2778 }
2779 
2780 /* Check segment sequence number for validity.
2781  *
2782  * Segment controls are considered valid, if the segment
2783  * fits to the window after truncation to the window. Acceptability
2784  * of data (and SYN, FIN, of course) is checked separately.
2785  * See tcp_data_queue(), for example.
2786  *
2787  * Also, controls (RST is main one) are accepted using RCV.WUP instead
2788  * of RCV.NXT. Peer still did not advance his SND.UNA when we
2789  * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
2790  * (borrowed from freebsd)
2791  */
2792 
2793 static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
2794 {
2795 	return	!before(end_seq, tp->rcv_wup) &&
2796 		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
2797 }
2798 
2799 /* When we get a reset we do this. */
2800 static void tcp_reset(struct sock *sk)
2801 {
2802 	/* We want the right error as BSD sees it (and indeed as we do). */
2803 	switch (sk->sk_state) {
2804 		case TCP_SYN_SENT:
2805 			sk->sk_err = ECONNREFUSED;
2806 			break;
2807 		case TCP_CLOSE_WAIT:
2808 			sk->sk_err = EPIPE;
2809 			break;
2810 		case TCP_CLOSE:
2811 			return;
2812 		default:
2813 			sk->sk_err = ECONNRESET;
2814 	}
2815 
2816 	if (!sock_flag(sk, SOCK_DEAD))
2817 		sk->sk_error_report(sk);
2818 
2819 	tcp_done(sk);
2820 }
2821 
2822 /*
2823  * 	Process the FIN bit. This now behaves as it is supposed to work
2824  *	and the FIN takes effect when it is validly part of sequence
2825  *	space. Not before when we get holes.
2826  *
2827  *	If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
2828  *	(and thence onto LAST-ACK and finally, CLOSE, we never enter
2829  *	TIME-WAIT)
2830  *
2831  *	If we are in FINWAIT-1, a received FIN indicates simultaneous
2832  *	close and we go into CLOSING (and later onto TIME-WAIT)
2833  *
2834  *	If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
2835  */
2836 static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2837 {
2838 	struct tcp_sock *tp = tcp_sk(sk);
2839 
2840 	inet_csk_schedule_ack(sk);
2841 
2842 	sk->sk_shutdown |= RCV_SHUTDOWN;
2843 	sock_set_flag(sk, SOCK_DONE);
2844 
2845 	switch (sk->sk_state) {
2846 		case TCP_SYN_RECV:
2847 		case TCP_ESTABLISHED:
2848 			/* Move to CLOSE_WAIT */
2849 			tcp_set_state(sk, TCP_CLOSE_WAIT);
2850 			inet_csk(sk)->icsk_ack.pingpong = 1;
2851 			break;
2852 
2853 		case TCP_CLOSE_WAIT:
2854 		case TCP_CLOSING:
2855 			/* Received a retransmission of the FIN, do
2856 			 * nothing.
2857 			 */
2858 			break;
2859 		case TCP_LAST_ACK:
2860 			/* RFC793: Remain in the LAST-ACK state. */
2861 			break;
2862 
2863 		case TCP_FIN_WAIT1:
2864 			/* This case occurs when a simultaneous close
2865 			 * happens, we must ack the received FIN and
2866 			 * enter the CLOSING state.
2867 			 */
2868 			tcp_send_ack(sk);
2869 			tcp_set_state(sk, TCP_CLOSING);
2870 			break;
2871 		case TCP_FIN_WAIT2:
2872 			/* Received a FIN -- send ACK and enter TIME_WAIT. */
2873 			tcp_send_ack(sk);
2874 			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
2875 			break;
2876 		default:
2877 			/* Only TCP_LISTEN and TCP_CLOSE are left, in these
2878 			 * cases we should never reach this piece of code.
2879 			 */
2880 			printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
2881 			       __FUNCTION__, sk->sk_state);
2882 			break;
2883 	};
2884 
2885 	/* It _is_ possible, that we have something out-of-order _after_ FIN.
2886 	 * Probably, we should reset in this case. For now drop them.
2887 	 */
2888 	__skb_queue_purge(&tp->out_of_order_queue);
2889 	if (tp->rx_opt.sack_ok)
2890 		tcp_sack_reset(&tp->rx_opt);
2891 	sk_stream_mem_reclaim(sk);
2892 
2893 	if (!sock_flag(sk, SOCK_DEAD)) {
2894 		sk->sk_state_change(sk);
2895 
2896 		/* Do not send POLL_HUP for half duplex close. */
2897 		if (sk->sk_shutdown == SHUTDOWN_MASK ||
2898 		    sk->sk_state == TCP_CLOSE)
2899 			sk_wake_async(sk, 1, POLL_HUP);
2900 		else
2901 			sk_wake_async(sk, 1, POLL_IN);
2902 	}
2903 }
2904 
2905 static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
2906 {
2907 	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
2908 		if (before(seq, sp->start_seq))
2909 			sp->start_seq = seq;
2910 		if (after(end_seq, sp->end_seq))
2911 			sp->end_seq = end_seq;
2912 		return 1;
2913 	}
2914 	return 0;
2915 }
2916 
2917 static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
2918 {
2919 	if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
2920 		if (before(seq, tp->rcv_nxt))
2921 			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
2922 		else
2923 			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
2924 
2925 		tp->rx_opt.dsack = 1;
2926 		tp->duplicate_sack[0].start_seq = seq;
2927 		tp->duplicate_sack[0].end_seq = end_seq;
2928 		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
2929 	}
2930 }
2931 
2932 static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
2933 {
2934 	if (!tp->rx_opt.dsack)
2935 		tcp_dsack_set(tp, seq, end_seq);
2936 	else
2937 		tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
2938 }
2939 
2940 static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
2941 {
2942 	struct tcp_sock *tp = tcp_sk(sk);
2943 
2944 	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
2945 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2946 		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
2947 		tcp_enter_quickack_mode(sk);
2948 
2949 		if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
2950 			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2951 
2952 			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
2953 				end_seq = tp->rcv_nxt;
2954 			tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
2955 		}
2956 	}
2957 
2958 	tcp_send_ack(sk);
2959 }
2960 
2961 /* These routines update the SACK block as out-of-order packets arrive or
2962  * in-order packets close up the sequence space.
2963  */
2964 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
2965 {
2966 	int this_sack;
2967 	struct tcp_sack_block *sp = &tp->selective_acks[0];
2968 	struct tcp_sack_block *swalk = sp+1;
2969 
2970 	/* See if the recent change to the first SACK eats into
2971 	 * or hits the sequence space of other SACK blocks, if so coalesce.
2972 	 */
2973 	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
2974 		if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
2975 			int i;
2976 
2977 			/* Zap SWALK, by moving every further SACK up by one slot.
2978 			 * Decrease num_sacks.
2979 			 */
2980 			tp->rx_opt.num_sacks--;
2981 			tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
2982 			for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
2983 				sp[i] = sp[i+1];
2984 			continue;
2985 		}
2986 		this_sack++, swalk++;
2987 	}
2988 }
2989 
2990 static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
2991 {
2992 	__u32 tmp;
2993 
2994 	tmp = sack1->start_seq;
2995 	sack1->start_seq = sack2->start_seq;
2996 	sack2->start_seq = tmp;
2997 
2998 	tmp = sack1->end_seq;
2999 	sack1->end_seq = sack2->end_seq;
3000 	sack2->end_seq = tmp;
3001 }
3002 
3003 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
3004 {
3005 	struct tcp_sock *tp = tcp_sk(sk);
3006 	struct tcp_sack_block *sp = &tp->selective_acks[0];
3007 	int cur_sacks = tp->rx_opt.num_sacks;
3008 	int this_sack;
3009 
3010 	if (!cur_sacks)
3011 		goto new_sack;
3012 
3013 	for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
3014 		if (tcp_sack_extend(sp, seq, end_seq)) {
3015 			/* Rotate this_sack to the first one. */
3016 			for (; this_sack>0; this_sack--, sp--)
3017 				tcp_sack_swap(sp, sp-1);
3018 			if (cur_sacks > 1)
3019 				tcp_sack_maybe_coalesce(tp);
3020 			return;
3021 		}
3022 	}
3023 
3024 	/* Could not find an adjacent existing SACK, build a new one,
3025 	 * put it at the front, and shift everyone else down.  We
3026 	 * always know there is at least one SACK present already here.
3027 	 *
3028 	 * If the sack array is full, forget about the last one.
3029 	 */
3030 	if (this_sack >= 4) {
3031 		this_sack--;
3032 		tp->rx_opt.num_sacks--;
3033 		sp--;
3034 	}
3035 	for(; this_sack > 0; this_sack--, sp--)
3036 		*sp = *(sp-1);
3037 
3038 new_sack:
3039 	/* Build the new head SACK, and we're done. */
3040 	sp->start_seq = seq;
3041 	sp->end_seq = end_seq;
3042 	tp->rx_opt.num_sacks++;
3043 	tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3044 }
3045 
3046 /* RCV.NXT advances, some SACKs should be eaten. */
3047 
3048 static void tcp_sack_remove(struct tcp_sock *tp)
3049 {
3050 	struct tcp_sack_block *sp = &tp->selective_acks[0];
3051 	int num_sacks = tp->rx_opt.num_sacks;
3052 	int this_sack;
3053 
3054 	/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
3055 	if (skb_queue_empty(&tp->out_of_order_queue)) {
3056 		tp->rx_opt.num_sacks = 0;
3057 		tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
3058 		return;
3059 	}
3060 
3061 	for(this_sack = 0; this_sack < num_sacks; ) {
3062 		/* Check if the start of the sack is covered by RCV.NXT. */
3063 		if (!before(tp->rcv_nxt, sp->start_seq)) {
3064 			int i;
3065 
3066 			/* RCV.NXT must cover all the block! */
3067 			BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
3068 
3069 			/* Zap this SACK, by moving forward any other SACKS. */
3070 			for (i=this_sack+1; i < num_sacks; i++)
3071 				tp->selective_acks[i-1] = tp->selective_acks[i];
3072 			num_sacks--;
3073 			continue;
3074 		}
3075 		this_sack++;
3076 		sp++;
3077 	}
3078 	if (num_sacks != tp->rx_opt.num_sacks) {
3079 		tp->rx_opt.num_sacks = num_sacks;
3080 		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3081 	}
3082 }
3083 
3084 /* This one checks to see if we can put data from the
3085  * out_of_order queue into the receive_queue.
3086  */
3087 static void tcp_ofo_queue(struct sock *sk)
3088 {
3089 	struct tcp_sock *tp = tcp_sk(sk);
3090 	__u32 dsack_high = tp->rcv_nxt;
3091 	struct sk_buff *skb;
3092 
3093 	while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
3094 		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
3095 			break;
3096 
3097 		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
3098 			__u32 dsack = dsack_high;
3099 			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3100 				dsack_high = TCP_SKB_CB(skb)->end_seq;
3101 			tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
3102 		}
3103 
3104 		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3105 			SOCK_DEBUG(sk, "ofo packet was already received \n");
3106 			__skb_unlink(skb, &tp->out_of_order_queue);
3107 			__kfree_skb(skb);
3108 			continue;
3109 		}
3110 		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
3111 			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
3112 			   TCP_SKB_CB(skb)->end_seq);
3113 
3114 		__skb_unlink(skb, &tp->out_of_order_queue);
3115 		__skb_queue_tail(&sk->sk_receive_queue, skb);
3116 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3117 		if(skb->h.th->fin)
3118 			tcp_fin(skb, sk, skb->h.th);
3119 	}
3120 }
3121 
3122 static int tcp_prune_queue(struct sock *sk);
3123 
3124 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3125 {
3126 	struct tcphdr *th = skb->h.th;
3127 	struct tcp_sock *tp = tcp_sk(sk);
3128 	int eaten = -1;
3129 
3130 	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
3131 		goto drop;
3132 
3133 	__skb_pull(skb, th->doff*4);
3134 
3135 	TCP_ECN_accept_cwr(tp, skb);
3136 
3137 	if (tp->rx_opt.dsack) {
3138 		tp->rx_opt.dsack = 0;
3139 		tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
3140 						    4 - tp->rx_opt.tstamp_ok);
3141 	}
3142 
3143 	/*  Queue data for delivery to the user.
3144 	 *  Packets in sequence go to the receive queue.
3145 	 *  Out of sequence packets to the out_of_order_queue.
3146 	 */
3147 	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
3148 		if (tcp_receive_window(tp) == 0)
3149 			goto out_of_window;
3150 
3151 		/* Ok. In sequence. In window. */
3152 		if (tp->ucopy.task == current &&
3153 		    tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
3154 		    sock_owned_by_user(sk) && !tp->urg_data) {
3155 			int chunk = min_t(unsigned int, skb->len,
3156 							tp->ucopy.len);
3157 
3158 			__set_current_state(TASK_RUNNING);
3159 
3160 			local_bh_enable();
3161 			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
3162 				tp->ucopy.len -= chunk;
3163 				tp->copied_seq += chunk;
3164 				eaten = (chunk == skb->len && !th->fin);
3165 				tcp_rcv_space_adjust(sk);
3166 			}
3167 			local_bh_disable();
3168 		}
3169 
3170 		if (eaten <= 0) {
3171 queue_and_out:
3172 			if (eaten < 0 &&
3173 			    (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3174 			     !sk_stream_rmem_schedule(sk, skb))) {
3175 				if (tcp_prune_queue(sk) < 0 ||
3176 				    !sk_stream_rmem_schedule(sk, skb))
3177 					goto drop;
3178 			}
3179 			sk_stream_set_owner_r(skb, sk);
3180 			__skb_queue_tail(&sk->sk_receive_queue, skb);
3181 		}
3182 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3183 		if(skb->len)
3184 			tcp_event_data_recv(sk, tp, skb);
3185 		if(th->fin)
3186 			tcp_fin(skb, sk, th);
3187 
3188 		if (!skb_queue_empty(&tp->out_of_order_queue)) {
3189 			tcp_ofo_queue(sk);
3190 
3191 			/* RFC2581. 4.2. SHOULD send immediate ACK, when
3192 			 * gap in queue is filled.
3193 			 */
3194 			if (skb_queue_empty(&tp->out_of_order_queue))
3195 				inet_csk(sk)->icsk_ack.pingpong = 0;
3196 		}
3197 
3198 		if (tp->rx_opt.num_sacks)
3199 			tcp_sack_remove(tp);
3200 
3201 		tcp_fast_path_check(sk, tp);
3202 
3203 		if (eaten > 0)
3204 			__kfree_skb(skb);
3205 		else if (!sock_flag(sk, SOCK_DEAD))
3206 			sk->sk_data_ready(sk, 0);
3207 		return;
3208 	}
3209 
3210 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3211 		/* A retransmit, 2nd most common case.  Force an immediate ack. */
3212 		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
3213 		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3214 
3215 out_of_window:
3216 		tcp_enter_quickack_mode(sk);
3217 		inet_csk_schedule_ack(sk);
3218 drop:
3219 		__kfree_skb(skb);
3220 		return;
3221 	}
3222 
3223 	/* Out of window. F.e. zero window probe. */
3224 	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
3225 		goto out_of_window;
3226 
3227 	tcp_enter_quickack_mode(sk);
3228 
3229 	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3230 		/* Partial packet, seq < rcv_next < end_seq */
3231 		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
3232 			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
3233 			   TCP_SKB_CB(skb)->end_seq);
3234 
3235 		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
3236 
3237 		/* If window is closed, drop tail of packet. But after
3238 		 * remembering D-SACK for its head made in previous line.
3239 		 */
3240 		if (!tcp_receive_window(tp))
3241 			goto out_of_window;
3242 		goto queue_and_out;
3243 	}
3244 
3245 	TCP_ECN_check_ce(tp, skb);
3246 
3247 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3248 	    !sk_stream_rmem_schedule(sk, skb)) {
3249 		if (tcp_prune_queue(sk) < 0 ||
3250 		    !sk_stream_rmem_schedule(sk, skb))
3251 			goto drop;
3252 	}
3253 
3254 	/* Disable header prediction. */
3255 	tp->pred_flags = 0;
3256 	inet_csk_schedule_ack(sk);
3257 
3258 	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3259 		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3260 
3261 	sk_stream_set_owner_r(skb, sk);
3262 
3263 	if (!skb_peek(&tp->out_of_order_queue)) {
3264 		/* Initial out of order segment, build 1 SACK. */
3265 		if (tp->rx_opt.sack_ok) {
3266 			tp->rx_opt.num_sacks = 1;
3267 			tp->rx_opt.dsack     = 0;
3268 			tp->rx_opt.eff_sacks = 1;
3269 			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
3270 			tp->selective_acks[0].end_seq =
3271 						TCP_SKB_CB(skb)->end_seq;
3272 		}
3273 		__skb_queue_head(&tp->out_of_order_queue,skb);
3274 	} else {
3275 		struct sk_buff *skb1 = tp->out_of_order_queue.prev;
3276 		u32 seq = TCP_SKB_CB(skb)->seq;
3277 		u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3278 
3279 		if (seq == TCP_SKB_CB(skb1)->end_seq) {
3280 			__skb_append(skb1, skb, &tp->out_of_order_queue);
3281 
3282 			if (!tp->rx_opt.num_sacks ||
3283 			    tp->selective_acks[0].end_seq != seq)
3284 				goto add_sack;
3285 
3286 			/* Common case: data arrive in order after hole. */
3287 			tp->selective_acks[0].end_seq = end_seq;
3288 			return;
3289 		}
3290 
3291 		/* Find place to insert this segment. */
3292 		do {
3293 			if (!after(TCP_SKB_CB(skb1)->seq, seq))
3294 				break;
3295 		} while ((skb1 = skb1->prev) !=
3296 			 (struct sk_buff*)&tp->out_of_order_queue);
3297 
3298 		/* Do skb overlap to previous one? */
3299 		if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
3300 		    before(seq, TCP_SKB_CB(skb1)->end_seq)) {
3301 			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3302 				/* All the bits are present. Drop. */
3303 				__kfree_skb(skb);
3304 				tcp_dsack_set(tp, seq, end_seq);
3305 				goto add_sack;
3306 			}
3307 			if (after(seq, TCP_SKB_CB(skb1)->seq)) {
3308 				/* Partial overlap. */
3309 				tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
3310 			} else {
3311 				skb1 = skb1->prev;
3312 			}
3313 		}
3314 		__skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
3315 
3316 		/* And clean segments covered by new one as whole. */
3317 		while ((skb1 = skb->next) !=
3318 		       (struct sk_buff*)&tp->out_of_order_queue &&
3319 		       after(end_seq, TCP_SKB_CB(skb1)->seq)) {
3320 		       if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3321 			       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3322 			       break;
3323 		       }
3324 		       __skb_unlink(skb1, &tp->out_of_order_queue);
3325 		       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3326 		       __kfree_skb(skb1);
3327 		}
3328 
3329 add_sack:
3330 		if (tp->rx_opt.sack_ok)
3331 			tcp_sack_new_ofo_skb(sk, seq, end_seq);
3332 	}
3333 }
3334 
3335 /* Collapse contiguous sequence of skbs head..tail with
3336  * sequence numbers start..end.
3337  * Segments with FIN/SYN are not collapsed (only because this
3338  * simplifies code)
3339  */
3340 static void
3341 tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3342 	     struct sk_buff *head, struct sk_buff *tail,
3343 	     u32 start, u32 end)
3344 {
3345 	struct sk_buff *skb;
3346 
3347 	/* First, check that queue is collapsible and find
3348 	 * the point where collapsing can be useful. */
3349 	for (skb = head; skb != tail; ) {
3350 		/* No new bits? It is possible on ofo queue. */
3351 		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3352 			struct sk_buff *next = skb->next;
3353 			__skb_unlink(skb, list);
3354 			__kfree_skb(skb);
3355 			NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3356 			skb = next;
3357 			continue;
3358 		}
3359 
3360 		/* The first skb to collapse is:
3361 		 * - not SYN/FIN and
3362 		 * - bloated or contains data before "start" or
3363 		 *   overlaps to the next one.
3364 		 */
3365 		if (!skb->h.th->syn && !skb->h.th->fin &&
3366 		    (tcp_win_from_space(skb->truesize) > skb->len ||
3367 		     before(TCP_SKB_CB(skb)->seq, start) ||
3368 		     (skb->next != tail &&
3369 		      TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
3370 			break;
3371 
3372 		/* Decided to skip this, advance start seq. */
3373 		start = TCP_SKB_CB(skb)->end_seq;
3374 		skb = skb->next;
3375 	}
3376 	if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3377 		return;
3378 
3379 	while (before(start, end)) {
3380 		struct sk_buff *nskb;
3381 		int header = skb_headroom(skb);
3382 		int copy = SKB_MAX_ORDER(header, 0);
3383 
3384 		/* Too big header? This can happen with IPv6. */
3385 		if (copy < 0)
3386 			return;
3387 		if (end-start < copy)
3388 			copy = end-start;
3389 		nskb = alloc_skb(copy+header, GFP_ATOMIC);
3390 		if (!nskb)
3391 			return;
3392 		skb_reserve(nskb, header);
3393 		memcpy(nskb->head, skb->head, header);
3394 		nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
3395 		nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
3396 		nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3397 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3398 		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3399 		__skb_insert(nskb, skb->prev, skb, list);
3400 		sk_stream_set_owner_r(nskb, sk);
3401 
3402 		/* Copy data, releasing collapsed skbs. */
3403 		while (copy > 0) {
3404 			int offset = start - TCP_SKB_CB(skb)->seq;
3405 			int size = TCP_SKB_CB(skb)->end_seq - start;
3406 
3407 			BUG_ON(offset < 0);
3408 			if (size > 0) {
3409 				size = min(copy, size);
3410 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
3411 					BUG();
3412 				TCP_SKB_CB(nskb)->end_seq += size;
3413 				copy -= size;
3414 				start += size;
3415 			}
3416 			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3417 				struct sk_buff *next = skb->next;
3418 				__skb_unlink(skb, list);
3419 				__kfree_skb(skb);
3420 				NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3421 				skb = next;
3422 				if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3423 					return;
3424 			}
3425 		}
3426 	}
3427 }
3428 
3429 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
3430  * and tcp_collapse() them until all the queue is collapsed.
3431  */
3432 static void tcp_collapse_ofo_queue(struct sock *sk)
3433 {
3434 	struct tcp_sock *tp = tcp_sk(sk);
3435 	struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
3436 	struct sk_buff *head;
3437 	u32 start, end;
3438 
3439 	if (skb == NULL)
3440 		return;
3441 
3442 	start = TCP_SKB_CB(skb)->seq;
3443 	end = TCP_SKB_CB(skb)->end_seq;
3444 	head = skb;
3445 
3446 	for (;;) {
3447 		skb = skb->next;
3448 
3449 		/* Segment is terminated when we see gap or when
3450 		 * we are at the end of all the queue. */
3451 		if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3452 		    after(TCP_SKB_CB(skb)->seq, end) ||
3453 		    before(TCP_SKB_CB(skb)->end_seq, start)) {
3454 			tcp_collapse(sk, &tp->out_of_order_queue,
3455 				     head, skb, start, end);
3456 			head = skb;
3457 			if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3458 				break;
3459 			/* Start new segment */
3460 			start = TCP_SKB_CB(skb)->seq;
3461 			end = TCP_SKB_CB(skb)->end_seq;
3462 		} else {
3463 			if (before(TCP_SKB_CB(skb)->seq, start))
3464 				start = TCP_SKB_CB(skb)->seq;
3465 			if (after(TCP_SKB_CB(skb)->end_seq, end))
3466 				end = TCP_SKB_CB(skb)->end_seq;
3467 		}
3468 	}
3469 }
3470 
3471 /* Reduce allocated memory if we can, trying to get
3472  * the socket within its memory limits again.
3473  *
3474  * Return less than zero if we should start dropping frames
3475  * until the socket owning process reads some of the data
3476  * to stabilize the situation.
3477  */
3478 static int tcp_prune_queue(struct sock *sk)
3479 {
3480 	struct tcp_sock *tp = tcp_sk(sk);
3481 
3482 	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
3483 
3484 	NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
3485 
3486 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
3487 		tcp_clamp_window(sk, tp);
3488 	else if (tcp_memory_pressure)
3489 		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3490 
3491 	tcp_collapse_ofo_queue(sk);
3492 	tcp_collapse(sk, &sk->sk_receive_queue,
3493 		     sk->sk_receive_queue.next,
3494 		     (struct sk_buff*)&sk->sk_receive_queue,
3495 		     tp->copied_seq, tp->rcv_nxt);
3496 	sk_stream_mem_reclaim(sk);
3497 
3498 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3499 		return 0;
3500 
3501 	/* Collapsing did not help, destructive actions follow.
3502 	 * This must not ever occur. */
3503 
3504 	/* First, purge the out_of_order queue. */
3505 	if (!skb_queue_empty(&tp->out_of_order_queue)) {
3506 		NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
3507 		__skb_queue_purge(&tp->out_of_order_queue);
3508 
3509 		/* Reset SACK state.  A conforming SACK implementation will
3510 		 * do the same at a timeout based retransmit.  When a connection
3511 		 * is in a sad state like this, we care only about integrity
3512 		 * of the connection not performance.
3513 		 */
3514 		if (tp->rx_opt.sack_ok)
3515 			tcp_sack_reset(&tp->rx_opt);
3516 		sk_stream_mem_reclaim(sk);
3517 	}
3518 
3519 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3520 		return 0;
3521 
3522 	/* If we are really being abused, tell the caller to silently
3523 	 * drop receive data on the floor.  It will get retransmitted
3524 	 * and hopefully then we'll have sufficient space.
3525 	 */
3526 	NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
3527 
3528 	/* Massive buffer overcommit. */
3529 	tp->pred_flags = 0;
3530 	return -1;
3531 }
3532 
3533 
3534 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
3535  * As additional protections, we do not touch cwnd in retransmission phases,
3536  * and if application hit its sndbuf limit recently.
3537  */
3538 void tcp_cwnd_application_limited(struct sock *sk)
3539 {
3540 	struct tcp_sock *tp = tcp_sk(sk);
3541 
3542 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3543 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3544 		/* Limited by application or receiver window. */
3545 		u32 win_used = max(tp->snd_cwnd_used, 2U);
3546 		if (win_used < tp->snd_cwnd) {
3547 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
3548 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3549 		}
3550 		tp->snd_cwnd_used = 0;
3551 	}
3552 	tp->snd_cwnd_stamp = tcp_time_stamp;
3553 }
3554 
3555 static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
3556 {
3557 	/* If the user specified a specific send buffer setting, do
3558 	 * not modify it.
3559 	 */
3560 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
3561 		return 0;
3562 
3563 	/* If we are under global TCP memory pressure, do not expand.  */
3564 	if (tcp_memory_pressure)
3565 		return 0;
3566 
3567 	/* If we are under soft global TCP memory pressure, do not expand.  */
3568 	if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
3569 		return 0;
3570 
3571 	/* If we filled the congestion window, do not expand.  */
3572 	if (tp->packets_out >= tp->snd_cwnd)
3573 		return 0;
3574 
3575 	return 1;
3576 }
3577 
3578 /* When incoming ACK allowed to free some skb from write_queue,
3579  * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
3580  * on the exit from tcp input handler.
3581  *
3582  * PROBLEM: sndbuf expansion does not work well with largesend.
3583  */
3584 static void tcp_new_space(struct sock *sk)
3585 {
3586 	struct tcp_sock *tp = tcp_sk(sk);
3587 
3588 	if (tcp_should_expand_sndbuf(sk, tp)) {
3589  		int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
3590 			MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
3591 		    demanded = max_t(unsigned int, tp->snd_cwnd,
3592 						   tp->reordering + 1);
3593 		sndmem *= 2*demanded;
3594 		if (sndmem > sk->sk_sndbuf)
3595 			sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
3596 		tp->snd_cwnd_stamp = tcp_time_stamp;
3597 	}
3598 
3599 	sk->sk_write_space(sk);
3600 }
3601 
3602 static void tcp_check_space(struct sock *sk)
3603 {
3604 	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
3605 		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
3606 		if (sk->sk_socket &&
3607 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
3608 			tcp_new_space(sk);
3609 	}
3610 }
3611 
3612 static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
3613 {
3614 	tcp_push_pending_frames(sk, tp);
3615 	tcp_check_space(sk);
3616 }
3617 
3618 /*
3619  * Check if sending an ack is needed.
3620  */
3621 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3622 {
3623 	struct tcp_sock *tp = tcp_sk(sk);
3624 
3625 	    /* More than one full frame received... */
3626 	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
3627 	     /* ... and right edge of window advances far enough.
3628 	      * (tcp_recvmsg() will send ACK otherwise). Or...
3629 	      */
3630 	     && __tcp_select_window(sk) >= tp->rcv_wnd) ||
3631 	    /* We ACK each frame or... */
3632 	    tcp_in_quickack_mode(sk) ||
3633 	    /* We have out of order data. */
3634 	    (ofo_possible &&
3635 	     skb_peek(&tp->out_of_order_queue))) {
3636 		/* Then ack it now */
3637 		tcp_send_ack(sk);
3638 	} else {
3639 		/* Else, send delayed ack. */
3640 		tcp_send_delayed_ack(sk);
3641 	}
3642 }
3643 
3644 static inline void tcp_ack_snd_check(struct sock *sk)
3645 {
3646 	if (!inet_csk_ack_scheduled(sk)) {
3647 		/* We sent a data segment already. */
3648 		return;
3649 	}
3650 	__tcp_ack_snd_check(sk, 1);
3651 }
3652 
3653 /*
3654  *	This routine is only called when we have urgent data
3655  *	signaled. Its the 'slow' part of tcp_urg. It could be
3656  *	moved inline now as tcp_urg is only called from one
3657  *	place. We handle URGent data wrong. We have to - as
3658  *	BSD still doesn't use the correction from RFC961.
3659  *	For 1003.1g we should support a new option TCP_STDURG to permit
3660  *	either form (or just set the sysctl tcp_stdurg).
3661  */
3662 
3663 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3664 {
3665 	struct tcp_sock *tp = tcp_sk(sk);
3666 	u32 ptr = ntohs(th->urg_ptr);
3667 
3668 	if (ptr && !sysctl_tcp_stdurg)
3669 		ptr--;
3670 	ptr += ntohl(th->seq);
3671 
3672 	/* Ignore urgent data that we've already seen and read. */
3673 	if (after(tp->copied_seq, ptr))
3674 		return;
3675 
3676 	/* Do not replay urg ptr.
3677 	 *
3678 	 * NOTE: interesting situation not covered by specs.
3679 	 * Misbehaving sender may send urg ptr, pointing to segment,
3680 	 * which we already have in ofo queue. We are not able to fetch
3681 	 * such data and will stay in TCP_URG_NOTYET until will be eaten
3682 	 * by recvmsg(). Seems, we are not obliged to handle such wicked
3683 	 * situations. But it is worth to think about possibility of some
3684 	 * DoSes using some hypothetical application level deadlock.
3685 	 */
3686 	if (before(ptr, tp->rcv_nxt))
3687 		return;
3688 
3689 	/* Do we already have a newer (or duplicate) urgent pointer? */
3690 	if (tp->urg_data && !after(ptr, tp->urg_seq))
3691 		return;
3692 
3693 	/* Tell the world about our new urgent pointer. */
3694 	sk_send_sigurg(sk);
3695 
3696 	/* We may be adding urgent data when the last byte read was
3697 	 * urgent. To do this requires some care. We cannot just ignore
3698 	 * tp->copied_seq since we would read the last urgent byte again
3699 	 * as data, nor can we alter copied_seq until this data arrives
3700 	 * or we break the semantics of SIOCATMARK (and thus sockatmark())
3701 	 *
3702 	 * NOTE. Double Dutch. Rendering to plain English: author of comment
3703 	 * above did something sort of 	send("A", MSG_OOB); send("B", MSG_OOB);
3704 	 * and expect that both A and B disappear from stream. This is _wrong_.
3705 	 * Though this happens in BSD with high probability, this is occasional.
3706 	 * Any application relying on this is buggy. Note also, that fix "works"
3707 	 * only in this artificial test. Insert some normal data between A and B and we will
3708 	 * decline of BSD again. Verdict: it is better to remove to trap
3709 	 * buggy users.
3710 	 */
3711 	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
3712 	    !sock_flag(sk, SOCK_URGINLINE) &&
3713 	    tp->copied_seq != tp->rcv_nxt) {
3714 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3715 		tp->copied_seq++;
3716 		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3717 			__skb_unlink(skb, &sk->sk_receive_queue);
3718 			__kfree_skb(skb);
3719 		}
3720 	}
3721 
3722 	tp->urg_data   = TCP_URG_NOTYET;
3723 	tp->urg_seq    = ptr;
3724 
3725 	/* Disable header prediction. */
3726 	tp->pred_flags = 0;
3727 }
3728 
3729 /* This is the 'fast' part of urgent handling. */
3730 static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
3731 {
3732 	struct tcp_sock *tp = tcp_sk(sk);
3733 
3734 	/* Check if we get a new urgent pointer - normally not. */
3735 	if (th->urg)
3736 		tcp_check_urg(sk,th);
3737 
3738 	/* Do we wait for any urgent data? - normally not... */
3739 	if (tp->urg_data == TCP_URG_NOTYET) {
3740 		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
3741 			  th->syn;
3742 
3743 		/* Is the urgent pointer pointing into this packet? */
3744 		if (ptr < skb->len) {
3745 			u8 tmp;
3746 			if (skb_copy_bits(skb, ptr, &tmp, 1))
3747 				BUG();
3748 			tp->urg_data = TCP_URG_VALID | tmp;
3749 			if (!sock_flag(sk, SOCK_DEAD))
3750 				sk->sk_data_ready(sk, 0);
3751 		}
3752 	}
3753 }
3754 
3755 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
3756 {
3757 	struct tcp_sock *tp = tcp_sk(sk);
3758 	int chunk = skb->len - hlen;
3759 	int err;
3760 
3761 	local_bh_enable();
3762 	if (skb->ip_summed==CHECKSUM_UNNECESSARY)
3763 		err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
3764 	else
3765 		err = skb_copy_and_csum_datagram_iovec(skb, hlen,
3766 						       tp->ucopy.iov);
3767 
3768 	if (!err) {
3769 		tp->ucopy.len -= chunk;
3770 		tp->copied_seq += chunk;
3771 		tcp_rcv_space_adjust(sk);
3772 	}
3773 
3774 	local_bh_disable();
3775 	return err;
3776 }
3777 
3778 static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3779 {
3780 	int result;
3781 
3782 	if (sock_owned_by_user(sk)) {
3783 		local_bh_enable();
3784 		result = __tcp_checksum_complete(skb);
3785 		local_bh_disable();
3786 	} else {
3787 		result = __tcp_checksum_complete(skb);
3788 	}
3789 	return result;
3790 }
3791 
3792 static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3793 {
3794 	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
3795 		__tcp_checksum_complete_user(sk, skb);
3796 }
3797 
3798 #ifdef CONFIG_NET_DMA
3799 static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
3800 {
3801 	struct tcp_sock *tp = tcp_sk(sk);
3802 	int chunk = skb->len - hlen;
3803 	int dma_cookie;
3804 	int copied_early = 0;
3805 
3806 	if (tp->ucopy.wakeup)
3807           	return 0;
3808 
3809 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
3810 		tp->ucopy.dma_chan = get_softnet_dma();
3811 
3812 	if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) {
3813 
3814 		dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
3815 			skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
3816 
3817 		if (dma_cookie < 0)
3818 			goto out;
3819 
3820 		tp->ucopy.dma_cookie = dma_cookie;
3821 		copied_early = 1;
3822 
3823 		tp->ucopy.len -= chunk;
3824 		tp->copied_seq += chunk;
3825 		tcp_rcv_space_adjust(sk);
3826 
3827 		if ((tp->ucopy.len == 0) ||
3828 		    (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
3829 		    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
3830 			tp->ucopy.wakeup = 1;
3831 			sk->sk_data_ready(sk, 0);
3832 		}
3833 	} else if (chunk > 0) {
3834 		tp->ucopy.wakeup = 1;
3835 		sk->sk_data_ready(sk, 0);
3836 	}
3837 out:
3838 	return copied_early;
3839 }
3840 #endif /* CONFIG_NET_DMA */
3841 
3842 /*
3843  *	TCP receive function for the ESTABLISHED state.
3844  *
3845  *	It is split into a fast path and a slow path. The fast path is
3846  * 	disabled when:
3847  *	- A zero window was announced from us - zero window probing
3848  *        is only handled properly in the slow path.
3849  *	- Out of order segments arrived.
3850  *	- Urgent data is expected.
3851  *	- There is no buffer space left
3852  *	- Unexpected TCP flags/window values/header lengths are received
3853  *	  (detected by checking the TCP header against pred_flags)
3854  *	- Data is sent in both directions. Fast path only supports pure senders
3855  *	  or pure receivers (this means either the sequence number or the ack
3856  *	  value must stay constant)
3857  *	- Unexpected TCP option.
3858  *
3859  *	When these conditions are not satisfied it drops into a standard
3860  *	receive procedure patterned after RFC793 to handle all cases.
3861  *	The first three cases are guaranteed by proper pred_flags setting,
3862  *	the rest is checked inline. Fast processing is turned on in
3863  *	tcp_data_queue when everything is OK.
3864  */
3865 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3866 			struct tcphdr *th, unsigned len)
3867 {
3868 	struct tcp_sock *tp = tcp_sk(sk);
3869 
3870 	/*
3871 	 *	Header prediction.
3872 	 *	The code loosely follows the one in the famous
3873 	 *	"30 instruction TCP receive" Van Jacobson mail.
3874 	 *
3875 	 *	Van's trick is to deposit buffers into socket queue
3876 	 *	on a device interrupt, to call tcp_recv function
3877 	 *	on the receive process context and checksum and copy
3878 	 *	the buffer to user space. smart...
3879 	 *
3880 	 *	Our current scheme is not silly either but we take the
3881 	 *	extra cost of the net_bh soft interrupt processing...
3882 	 *	We do checksum and copy also but from device to kernel.
3883 	 */
3884 
3885 	tp->rx_opt.saw_tstamp = 0;
3886 
3887 	/*	pred_flags is 0xS?10 << 16 + snd_wnd
3888 	 *	if header_prediction is to be made
3889 	 *	'S' will always be tp->tcp_header_len >> 2
3890 	 *	'?' will be 0 for the fast path, otherwise pred_flags is 0 to
3891 	 *  turn it off	(when there are holes in the receive
3892 	 *	 space for instance)
3893 	 *	PSH flag is ignored.
3894 	 */
3895 
3896 	if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
3897 		TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
3898 		int tcp_header_len = tp->tcp_header_len;
3899 
3900 		/* Timestamp header prediction: tcp_header_len
3901 		 * is automatically equal to th->doff*4 due to pred_flags
3902 		 * match.
3903 		 */
3904 
3905 		/* Check timestamp */
3906 		if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
3907 			__u32 *ptr = (__u32 *)(th + 1);
3908 
3909 			/* No? Slow path! */
3910 			if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3911 					  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
3912 				goto slow_path;
3913 
3914 			tp->rx_opt.saw_tstamp = 1;
3915 			++ptr;
3916 			tp->rx_opt.rcv_tsval = ntohl(*ptr);
3917 			++ptr;
3918 			tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3919 
3920 			/* If PAWS failed, check it more carefully in slow path */
3921 			if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
3922 				goto slow_path;
3923 
3924 			/* DO NOT update ts_recent here, if checksum fails
3925 			 * and timestamp was corrupted part, it will result
3926 			 * in a hung connection since we will drop all
3927 			 * future packets due to the PAWS test.
3928 			 */
3929 		}
3930 
3931 		if (len <= tcp_header_len) {
3932 			/* Bulk data transfer: sender */
3933 			if (len == tcp_header_len) {
3934 				/* Predicted packet is in window by definition.
3935 				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3936 				 * Hence, check seq<=rcv_wup reduces to:
3937 				 */
3938 				if (tcp_header_len ==
3939 				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
3940 				    tp->rcv_nxt == tp->rcv_wup)
3941 					tcp_store_ts_recent(tp);
3942 
3943 				/* We know that such packets are checksummed
3944 				 * on entry.
3945 				 */
3946 				tcp_ack(sk, skb, 0);
3947 				__kfree_skb(skb);
3948 				tcp_data_snd_check(sk, tp);
3949 				return 0;
3950 			} else { /* Header too small */
3951 				TCP_INC_STATS_BH(TCP_MIB_INERRS);
3952 				goto discard;
3953 			}
3954 		} else {
3955 			int eaten = 0;
3956 			int copied_early = 0;
3957 
3958 			if (tp->copied_seq == tp->rcv_nxt &&
3959 			    len - tcp_header_len <= tp->ucopy.len) {
3960 #ifdef CONFIG_NET_DMA
3961 				if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
3962 					copied_early = 1;
3963 					eaten = 1;
3964 				}
3965 #endif
3966 				if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
3967 					__set_current_state(TASK_RUNNING);
3968 
3969 					if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
3970 						eaten = 1;
3971 				}
3972 				if (eaten) {
3973 					/* Predicted packet is in window by definition.
3974 					 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3975 					 * Hence, check seq<=rcv_wup reduces to:
3976 					 */
3977 					if (tcp_header_len ==
3978 					    (sizeof(struct tcphdr) +
3979 					     TCPOLEN_TSTAMP_ALIGNED) &&
3980 					    tp->rcv_nxt == tp->rcv_wup)
3981 						tcp_store_ts_recent(tp);
3982 
3983 					tcp_rcv_rtt_measure_ts(sk, skb);
3984 
3985 					__skb_pull(skb, tcp_header_len);
3986 					tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3987 					NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
3988 				}
3989 				if (copied_early)
3990 					tcp_cleanup_rbuf(sk, skb->len);
3991 			}
3992 			if (!eaten) {
3993 				if (tcp_checksum_complete_user(sk, skb))
3994 					goto csum_error;
3995 
3996 				/* Predicted packet is in window by definition.
3997 				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3998 				 * Hence, check seq<=rcv_wup reduces to:
3999 				 */
4000 				if (tcp_header_len ==
4001 				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
4002 				    tp->rcv_nxt == tp->rcv_wup)
4003 					tcp_store_ts_recent(tp);
4004 
4005 				tcp_rcv_rtt_measure_ts(sk, skb);
4006 
4007 				if ((int)skb->truesize > sk->sk_forward_alloc)
4008 					goto step5;
4009 
4010 				NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
4011 
4012 				/* Bulk data transfer: receiver */
4013 				__skb_pull(skb,tcp_header_len);
4014 				__skb_queue_tail(&sk->sk_receive_queue, skb);
4015 				sk_stream_set_owner_r(skb, sk);
4016 				tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4017 			}
4018 
4019 			tcp_event_data_recv(sk, tp, skb);
4020 
4021 			if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
4022 				/* Well, only one small jumplet in fast path... */
4023 				tcp_ack(sk, skb, FLAG_DATA);
4024 				tcp_data_snd_check(sk, tp);
4025 				if (!inet_csk_ack_scheduled(sk))
4026 					goto no_ack;
4027 			}
4028 
4029 			__tcp_ack_snd_check(sk, 0);
4030 no_ack:
4031 #ifdef CONFIG_NET_DMA
4032 			if (copied_early)
4033 				__skb_queue_tail(&sk->sk_async_wait_queue, skb);
4034 			else
4035 #endif
4036 			if (eaten)
4037 				__kfree_skb(skb);
4038 			else
4039 				sk->sk_data_ready(sk, 0);
4040 			return 0;
4041 		}
4042 	}
4043 
4044 slow_path:
4045 	if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb))
4046 		goto csum_error;
4047 
4048 	/*
4049 	 * RFC1323: H1. Apply PAWS check first.
4050 	 */
4051 	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4052 	    tcp_paws_discard(sk, skb)) {
4053 		if (!th->rst) {
4054 			NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4055 			tcp_send_dupack(sk, skb);
4056 			goto discard;
4057 		}
4058 		/* Resets are accepted even if PAWS failed.
4059 
4060 		   ts_recent update must be made after we are sure
4061 		   that the packet is in window.
4062 		 */
4063 	}
4064 
4065 	/*
4066 	 *	Standard slow path.
4067 	 */
4068 
4069 	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4070 		/* RFC793, page 37: "In all states except SYN-SENT, all reset
4071 		 * (RST) segments are validated by checking their SEQ-fields."
4072 		 * And page 69: "If an incoming segment is not acceptable,
4073 		 * an acknowledgment should be sent in reply (unless the RST bit
4074 		 * is set, if so drop the segment and return)".
4075 		 */
4076 		if (!th->rst)
4077 			tcp_send_dupack(sk, skb);
4078 		goto discard;
4079 	}
4080 
4081 	if(th->rst) {
4082 		tcp_reset(sk);
4083 		goto discard;
4084 	}
4085 
4086 	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4087 
4088 	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4089 		TCP_INC_STATS_BH(TCP_MIB_INERRS);
4090 		NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
4091 		tcp_reset(sk);
4092 		return 1;
4093 	}
4094 
4095 step5:
4096 	if(th->ack)
4097 		tcp_ack(sk, skb, FLAG_SLOWPATH);
4098 
4099 	tcp_rcv_rtt_measure_ts(sk, skb);
4100 
4101 	/* Process urgent data. */
4102 	tcp_urg(sk, skb, th);
4103 
4104 	/* step 7: process the segment text */
4105 	tcp_data_queue(sk, skb);
4106 
4107 	tcp_data_snd_check(sk, tp);
4108 	tcp_ack_snd_check(sk);
4109 	return 0;
4110 
4111 csum_error:
4112 	TCP_INC_STATS_BH(TCP_MIB_INERRS);
4113 
4114 discard:
4115 	__kfree_skb(skb);
4116 	return 0;
4117 }
4118 
4119 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4120 					 struct tcphdr *th, unsigned len)
4121 {
4122 	struct tcp_sock *tp = tcp_sk(sk);
4123 	struct inet_connection_sock *icsk = inet_csk(sk);
4124 	int saved_clamp = tp->rx_opt.mss_clamp;
4125 
4126 	tcp_parse_options(skb, &tp->rx_opt, 0);
4127 
4128 	if (th->ack) {
4129 		/* rfc793:
4130 		 * "If the state is SYN-SENT then
4131 		 *    first check the ACK bit
4132 		 *      If the ACK bit is set
4133 		 *	  If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
4134 		 *        a reset (unless the RST bit is set, if so drop
4135 		 *        the segment and return)"
4136 		 *
4137 		 *  We do not send data with SYN, so that RFC-correct
4138 		 *  test reduces to:
4139 		 */
4140 		if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
4141 			goto reset_and_undo;
4142 
4143 		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4144 		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4145 			     tcp_time_stamp)) {
4146 			NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
4147 			goto reset_and_undo;
4148 		}
4149 
4150 		/* Now ACK is acceptable.
4151 		 *
4152 		 * "If the RST bit is set
4153 		 *    If the ACK was acceptable then signal the user "error:
4154 		 *    connection reset", drop the segment, enter CLOSED state,
4155 		 *    delete TCB, and return."
4156 		 */
4157 
4158 		if (th->rst) {
4159 			tcp_reset(sk);
4160 			goto discard;
4161 		}
4162 
4163 		/* rfc793:
4164 		 *   "fifth, if neither of the SYN or RST bits is set then
4165 		 *    drop the segment and return."
4166 		 *
4167 		 *    See note below!
4168 		 *                                        --ANK(990513)
4169 		 */
4170 		if (!th->syn)
4171 			goto discard_and_undo;
4172 
4173 		/* rfc793:
4174 		 *   "If the SYN bit is on ...
4175 		 *    are acceptable then ...
4176 		 *    (our SYN has been ACKed), change the connection
4177 		 *    state to ESTABLISHED..."
4178 		 */
4179 
4180 		TCP_ECN_rcv_synack(tp, th);
4181 		if (tp->ecn_flags&TCP_ECN_OK)
4182 			sock_set_flag(sk, SOCK_NO_LARGESEND);
4183 
4184 		tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
4185 		tcp_ack(sk, skb, FLAG_SLOWPATH);
4186 
4187 		/* Ok.. it's good. Set up sequence numbers and
4188 		 * move to established.
4189 		 */
4190 		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
4191 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
4192 
4193 		/* RFC1323: The window in SYN & SYN/ACK segments is
4194 		 * never scaled.
4195 		 */
4196 		tp->snd_wnd = ntohs(th->window);
4197 		tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
4198 
4199 		if (!tp->rx_opt.wscale_ok) {
4200 			tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
4201 			tp->window_clamp = min(tp->window_clamp, 65535U);
4202 		}
4203 
4204 		if (tp->rx_opt.saw_tstamp) {
4205 			tp->rx_opt.tstamp_ok	   = 1;
4206 			tp->tcp_header_len =
4207 				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
4208 			tp->advmss	    -= TCPOLEN_TSTAMP_ALIGNED;
4209 			tcp_store_ts_recent(tp);
4210 		} else {
4211 			tp->tcp_header_len = sizeof(struct tcphdr);
4212 		}
4213 
4214 		if (tp->rx_opt.sack_ok && sysctl_tcp_fack)
4215 			tp->rx_opt.sack_ok |= 2;
4216 
4217 		tcp_mtup_init(sk);
4218 		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
4219 		tcp_initialize_rcv_mss(sk);
4220 
4221 		/* Remember, tcp_poll() does not lock socket!
4222 		 * Change state from SYN-SENT only after copied_seq
4223 		 * is initialized. */
4224 		tp->copied_seq = tp->rcv_nxt;
4225 		mb();
4226 		tcp_set_state(sk, TCP_ESTABLISHED);
4227 
4228 		/* Make sure socket is routed, for correct metrics.  */
4229 		icsk->icsk_af_ops->rebuild_header(sk);
4230 
4231 		tcp_init_metrics(sk);
4232 
4233 		tcp_init_congestion_control(sk);
4234 
4235 		/* Prevent spurious tcp_cwnd_restart() on first data
4236 		 * packet.
4237 		 */
4238 		tp->lsndtime = tcp_time_stamp;
4239 
4240 		tcp_init_buffer_space(sk);
4241 
4242 		if (sock_flag(sk, SOCK_KEEPOPEN))
4243 			inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
4244 
4245 		if (!tp->rx_opt.snd_wscale)
4246 			__tcp_fast_path_on(tp, tp->snd_wnd);
4247 		else
4248 			tp->pred_flags = 0;
4249 
4250 		if (!sock_flag(sk, SOCK_DEAD)) {
4251 			sk->sk_state_change(sk);
4252 			sk_wake_async(sk, 0, POLL_OUT);
4253 		}
4254 
4255 		if (sk->sk_write_pending ||
4256 		    icsk->icsk_accept_queue.rskq_defer_accept ||
4257 		    icsk->icsk_ack.pingpong) {
4258 			/* Save one ACK. Data will be ready after
4259 			 * several ticks, if write_pending is set.
4260 			 *
4261 			 * It may be deleted, but with this feature tcpdumps
4262 			 * look so _wonderfully_ clever, that I was not able
4263 			 * to stand against the temptation 8)     --ANK
4264 			 */
4265 			inet_csk_schedule_ack(sk);
4266 			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
4267 			icsk->icsk_ack.ato	 = TCP_ATO_MIN;
4268 			tcp_incr_quickack(sk);
4269 			tcp_enter_quickack_mode(sk);
4270 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
4271 						  TCP_DELACK_MAX, TCP_RTO_MAX);
4272 
4273 discard:
4274 			__kfree_skb(skb);
4275 			return 0;
4276 		} else {
4277 			tcp_send_ack(sk);
4278 		}
4279 		return -1;
4280 	}
4281 
4282 	/* No ACK in the segment */
4283 
4284 	if (th->rst) {
4285 		/* rfc793:
4286 		 * "If the RST bit is set
4287 		 *
4288 		 *      Otherwise (no ACK) drop the segment and return."
4289 		 */
4290 
4291 		goto discard_and_undo;
4292 	}
4293 
4294 	/* PAWS check. */
4295 	if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
4296 		goto discard_and_undo;
4297 
4298 	if (th->syn) {
4299 		/* We see SYN without ACK. It is attempt of
4300 		 * simultaneous connect with crossed SYNs.
4301 		 * Particularly, it can be connect to self.
4302 		 */
4303 		tcp_set_state(sk, TCP_SYN_RECV);
4304 
4305 		if (tp->rx_opt.saw_tstamp) {
4306 			tp->rx_opt.tstamp_ok = 1;
4307 			tcp_store_ts_recent(tp);
4308 			tp->tcp_header_len =
4309 				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
4310 		} else {
4311 			tp->tcp_header_len = sizeof(struct tcphdr);
4312 		}
4313 
4314 		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
4315 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
4316 
4317 		/* RFC1323: The window in SYN & SYN/ACK segments is
4318 		 * never scaled.
4319 		 */
4320 		tp->snd_wnd    = ntohs(th->window);
4321 		tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
4322 		tp->max_window = tp->snd_wnd;
4323 
4324 		TCP_ECN_rcv_syn(tp, th);
4325 		if (tp->ecn_flags&TCP_ECN_OK)
4326 			sock_set_flag(sk, SOCK_NO_LARGESEND);
4327 
4328 		tcp_mtup_init(sk);
4329 		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
4330 		tcp_initialize_rcv_mss(sk);
4331 
4332 
4333 		tcp_send_synack(sk);
4334 #if 0
4335 		/* Note, we could accept data and URG from this segment.
4336 		 * There are no obstacles to make this.
4337 		 *
4338 		 * However, if we ignore data in ACKless segments sometimes,
4339 		 * we have no reasons to accept it sometimes.
4340 		 * Also, seems the code doing it in step6 of tcp_rcv_state_process
4341 		 * is not flawless. So, discard packet for sanity.
4342 		 * Uncomment this return to process the data.
4343 		 */
4344 		return -1;
4345 #else
4346 		goto discard;
4347 #endif
4348 	}
4349 	/* "fifth, if neither of the SYN or RST bits is set then
4350 	 * drop the segment and return."
4351 	 */
4352 
4353 discard_and_undo:
4354 	tcp_clear_options(&tp->rx_opt);
4355 	tp->rx_opt.mss_clamp = saved_clamp;
4356 	goto discard;
4357 
4358 reset_and_undo:
4359 	tcp_clear_options(&tp->rx_opt);
4360 	tp->rx_opt.mss_clamp = saved_clamp;
4361 	return 1;
4362 }
4363 
4364 
4365 /*
4366  *	This function implements the receiving procedure of RFC 793 for
4367  *	all states except ESTABLISHED and TIME_WAIT.
4368  *	It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
4369  *	address independent.
4370  */
4371 
4372 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4373 			  struct tcphdr *th, unsigned len)
4374 {
4375 	struct tcp_sock *tp = tcp_sk(sk);
4376 	struct inet_connection_sock *icsk = inet_csk(sk);
4377 	int queued = 0;
4378 
4379 	tp->rx_opt.saw_tstamp = 0;
4380 
4381 	switch (sk->sk_state) {
4382 	case TCP_CLOSE:
4383 		goto discard;
4384 
4385 	case TCP_LISTEN:
4386 		if(th->ack)
4387 			return 1;
4388 
4389 		if(th->rst)
4390 			goto discard;
4391 
4392 		if(th->syn) {
4393 			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
4394 				return 1;
4395 
4396 			/* Now we have several options: In theory there is
4397 			 * nothing else in the frame. KA9Q has an option to
4398 			 * send data with the syn, BSD accepts data with the
4399 			 * syn up to the [to be] advertised window and
4400 			 * Solaris 2.1 gives you a protocol error. For now
4401 			 * we just ignore it, that fits the spec precisely
4402 			 * and avoids incompatibilities. It would be nice in
4403 			 * future to drop through and process the data.
4404 			 *
4405 			 * Now that TTCP is starting to be used we ought to
4406 			 * queue this data.
4407 			 * But, this leaves one open to an easy denial of
4408 		 	 * service attack, and SYN cookies can't defend
4409 			 * against this problem. So, we drop the data
4410 			 * in the interest of security over speed.
4411 			 */
4412 			goto discard;
4413 		}
4414 		goto discard;
4415 
4416 	case TCP_SYN_SENT:
4417 		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
4418 		if (queued >= 0)
4419 			return queued;
4420 
4421 		/* Do step6 onward by hand. */
4422 		tcp_urg(sk, skb, th);
4423 		__kfree_skb(skb);
4424 		tcp_data_snd_check(sk, tp);
4425 		return 0;
4426 	}
4427 
4428 	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4429 	    tcp_paws_discard(sk, skb)) {
4430 		if (!th->rst) {
4431 			NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4432 			tcp_send_dupack(sk, skb);
4433 			goto discard;
4434 		}
4435 		/* Reset is accepted even if it did not pass PAWS. */
4436 	}
4437 
4438 	/* step 1: check sequence number */
4439 	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4440 		if (!th->rst)
4441 			tcp_send_dupack(sk, skb);
4442 		goto discard;
4443 	}
4444 
4445 	/* step 2: check RST bit */
4446 	if(th->rst) {
4447 		tcp_reset(sk);
4448 		goto discard;
4449 	}
4450 
4451 	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4452 
4453 	/* step 3: check security and precedence [ignored] */
4454 
4455 	/*	step 4:
4456 	 *
4457 	 *	Check for a SYN in window.
4458 	 */
4459 	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4460 		NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
4461 		tcp_reset(sk);
4462 		return 1;
4463 	}
4464 
4465 	/* step 5: check the ACK field */
4466 	if (th->ack) {
4467 		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
4468 
4469 		switch(sk->sk_state) {
4470 		case TCP_SYN_RECV:
4471 			if (acceptable) {
4472 				tp->copied_seq = tp->rcv_nxt;
4473 				mb();
4474 				tcp_set_state(sk, TCP_ESTABLISHED);
4475 				sk->sk_state_change(sk);
4476 
4477 				/* Note, that this wakeup is only for marginal
4478 				 * crossed SYN case. Passively open sockets
4479 				 * are not waked up, because sk->sk_sleep ==
4480 				 * NULL and sk->sk_socket == NULL.
4481 				 */
4482 				if (sk->sk_socket) {
4483 					sk_wake_async(sk,0,POLL_OUT);
4484 				}
4485 
4486 				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
4487 				tp->snd_wnd = ntohs(th->window) <<
4488 					      tp->rx_opt.snd_wscale;
4489 				tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
4490 					    TCP_SKB_CB(skb)->seq);
4491 
4492 				/* tcp_ack considers this ACK as duplicate
4493 				 * and does not calculate rtt.
4494 				 * Fix it at least with timestamps.
4495 				 */
4496 				if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4497 				    !tp->srtt)
4498 					tcp_ack_saw_tstamp(sk, 0);
4499 
4500 				if (tp->rx_opt.tstamp_ok)
4501 					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
4502 
4503 				/* Make sure socket is routed, for
4504 				 * correct metrics.
4505 				 */
4506 				icsk->icsk_af_ops->rebuild_header(sk);
4507 
4508 				tcp_init_metrics(sk);
4509 
4510 				tcp_init_congestion_control(sk);
4511 
4512 				/* Prevent spurious tcp_cwnd_restart() on
4513 				 * first data packet.
4514 				 */
4515 				tp->lsndtime = tcp_time_stamp;
4516 
4517 				tcp_mtup_init(sk);
4518 				tcp_initialize_rcv_mss(sk);
4519 				tcp_init_buffer_space(sk);
4520 				tcp_fast_path_on(tp);
4521 			} else {
4522 				return 1;
4523 			}
4524 			break;
4525 
4526 		case TCP_FIN_WAIT1:
4527 			if (tp->snd_una == tp->write_seq) {
4528 				tcp_set_state(sk, TCP_FIN_WAIT2);
4529 				sk->sk_shutdown |= SEND_SHUTDOWN;
4530 				dst_confirm(sk->sk_dst_cache);
4531 
4532 				if (!sock_flag(sk, SOCK_DEAD))
4533 					/* Wake up lingering close() */
4534 					sk->sk_state_change(sk);
4535 				else {
4536 					int tmo;
4537 
4538 					if (tp->linger2 < 0 ||
4539 					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4540 					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
4541 						tcp_done(sk);
4542 						NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
4543 						return 1;
4544 					}
4545 
4546 					tmo = tcp_fin_time(sk);
4547 					if (tmo > TCP_TIMEWAIT_LEN) {
4548 						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
4549 					} else if (th->fin || sock_owned_by_user(sk)) {
4550 						/* Bad case. We could lose such FIN otherwise.
4551 						 * It is not a big problem, but it looks confusing
4552 						 * and not so rare event. We still can lose it now,
4553 						 * if it spins in bh_lock_sock(), but it is really
4554 						 * marginal case.
4555 						 */
4556 						inet_csk_reset_keepalive_timer(sk, tmo);
4557 					} else {
4558 						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
4559 						goto discard;
4560 					}
4561 				}
4562 			}
4563 			break;
4564 
4565 		case TCP_CLOSING:
4566 			if (tp->snd_una == tp->write_seq) {
4567 				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4568 				goto discard;
4569 			}
4570 			break;
4571 
4572 		case TCP_LAST_ACK:
4573 			if (tp->snd_una == tp->write_seq) {
4574 				tcp_update_metrics(sk);
4575 				tcp_done(sk);
4576 				goto discard;
4577 			}
4578 			break;
4579 		}
4580 	} else
4581 		goto discard;
4582 
4583 	/* step 6: check the URG bit */
4584 	tcp_urg(sk, skb, th);
4585 
4586 	/* step 7: process the segment text */
4587 	switch (sk->sk_state) {
4588 	case TCP_CLOSE_WAIT:
4589 	case TCP_CLOSING:
4590 	case TCP_LAST_ACK:
4591 		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4592 			break;
4593 	case TCP_FIN_WAIT1:
4594 	case TCP_FIN_WAIT2:
4595 		/* RFC 793 says to queue data in these states,
4596 		 * RFC 1122 says we MUST send a reset.
4597 		 * BSD 4.4 also does reset.
4598 		 */
4599 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
4600 			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4601 			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
4602 				NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
4603 				tcp_reset(sk);
4604 				return 1;
4605 			}
4606 		}
4607 		/* Fall through */
4608 	case TCP_ESTABLISHED:
4609 		tcp_data_queue(sk, skb);
4610 		queued = 1;
4611 		break;
4612 	}
4613 
4614 	/* tcp_data could move socket to TIME-WAIT */
4615 	if (sk->sk_state != TCP_CLOSE) {
4616 		tcp_data_snd_check(sk, tp);
4617 		tcp_ack_snd_check(sk);
4618 	}
4619 
4620 	if (!queued) {
4621 discard:
4622 		__kfree_skb(skb);
4623 	}
4624 	return 0;
4625 }
4626 
4627 EXPORT_SYMBOL(sysctl_tcp_ecn);
4628 EXPORT_SYMBOL(sysctl_tcp_reordering);
4629 EXPORT_SYMBOL(tcp_parse_options);
4630 EXPORT_SYMBOL(tcp_rcv_established);
4631 EXPORT_SYMBOL(tcp_rcv_state_process);
4632 EXPORT_SYMBOL(tcp_initialize_rcv_mss);
4633