xref: /linux/net/ipv4/tcp_output.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Florian La Roche, <flla@stud.uni-sb.de>
15  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *		Matthew Dillon, <dillon@apollo.west.oic.com>
19  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *		Jorge Cwik, <jorge@laser.satlink.net>
21  */
22 
23 /*
24  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
25  *				:	Fragmentation on mtu decrease
26  *				:	Segment collapse on retransmit
27  *				:	AF independence
28  *
29  *		Linus Torvalds	:	send_delayed_ack
30  *		David S. Miller	:	Charge memory using the right skb
31  *					during syn/ack processing.
32  *		David S. Miller :	Output engine completely rewritten.
33  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
34  *		Cacophonix Gaul :	draft-minshall-nagle-01
35  *		J Hadi Salim	:	ECN support
36  *
37  */
38 
39 #include <net/tcp.h>
40 
41 #include <linux/compiler.h>
42 #include <linux/module.h>
43 #include <linux/smp_lock.h>
44 
45 /* People can turn this off for buggy TCP's found in printers etc. */
46 int sysctl_tcp_retrans_collapse __read_mostly = 1;
47 
48 /* People can turn this on to  work with those rare, broken TCPs that
49  * interpret the window field as a signed quantity.
50  */
51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
52 
53 /* This limits the percentage of the congestion window which we
54  * will allow a single TSO frame to consume.  Building TSO frames
55  * which are too large can cause TCP streams to be bursty.
56  */
57 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
58 
59 int sysctl_tcp_mtu_probing __read_mostly = 0;
60 int sysctl_tcp_base_mss __read_mostly = 512;
61 
62 /* By default, RFC2861 behavior.  */
63 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
64 
65 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
66 			     struct sk_buff *skb)
67 {
68 	sk->sk_send_head = skb->next;
69 	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
70 		sk->sk_send_head = NULL;
71 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
72 	tcp_packets_out_inc(sk, tp, skb);
73 }
74 
75 /* SND.NXT, if window was not shrunk.
76  * If window has been shrunk, what should we make? It is not clear at all.
77  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
78  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
79  * invalid. OK, let's make this for now:
80  */
81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
82 {
83 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
84 		return tp->snd_nxt;
85 	else
86 		return tp->snd_una+tp->snd_wnd;
87 }
88 
89 /* Calculate mss to advertise in SYN segment.
90  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
91  *
92  * 1. It is independent of path mtu.
93  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
94  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
95  *    attached devices, because some buggy hosts are confused by
96  *    large MSS.
97  * 4. We do not make 3, we advertise MSS, calculated from first
98  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
99  *    This may be overridden via information stored in routing table.
100  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
101  *    probably even Jumbo".
102  */
103 static __u16 tcp_advertise_mss(struct sock *sk)
104 {
105 	struct tcp_sock *tp = tcp_sk(sk);
106 	struct dst_entry *dst = __sk_dst_get(sk);
107 	int mss = tp->advmss;
108 
109 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
110 		mss = dst_metric(dst, RTAX_ADVMSS);
111 		tp->advmss = mss;
112 	}
113 
114 	return (__u16)mss;
115 }
116 
117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
118  * This is the first part of cwnd validation mechanism. */
119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
120 {
121 	struct tcp_sock *tp = tcp_sk(sk);
122 	s32 delta = tcp_time_stamp - tp->lsndtime;
123 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
124 	u32 cwnd = tp->snd_cwnd;
125 
126 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
127 
128 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
129 	restart_cwnd = min(restart_cwnd, cwnd);
130 
131 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
132 		cwnd >>= 1;
133 	tp->snd_cwnd = max(cwnd, restart_cwnd);
134 	tp->snd_cwnd_stamp = tcp_time_stamp;
135 	tp->snd_cwnd_used = 0;
136 }
137 
138 static void tcp_event_data_sent(struct tcp_sock *tp,
139 				struct sk_buff *skb, struct sock *sk)
140 {
141 	struct inet_connection_sock *icsk = inet_csk(sk);
142 	const u32 now = tcp_time_stamp;
143 
144 	if (sysctl_tcp_slow_start_after_idle &&
145 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
146 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
147 
148 	tp->lsndtime = now;
149 
150 	/* If it is a reply for ato after last received
151 	 * packet, enter pingpong mode.
152 	 */
153 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
154 		icsk->icsk_ack.pingpong = 1;
155 }
156 
157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
158 {
159 	tcp_dec_quickack_mode(sk, pkts);
160 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
161 }
162 
163 /* Determine a window scaling and initial window to offer.
164  * Based on the assumption that the given amount of space
165  * will be offered. Store the results in the tp structure.
166  * NOTE: for smooth operation initial space offering should
167  * be a multiple of mss if possible. We assume here that mss >= 1.
168  * This MUST be enforced by all callers.
169  */
170 void tcp_select_initial_window(int __space, __u32 mss,
171 			       __u32 *rcv_wnd, __u32 *window_clamp,
172 			       int wscale_ok, __u8 *rcv_wscale)
173 {
174 	unsigned int space = (__space < 0 ? 0 : __space);
175 
176 	/* If no clamp set the clamp to the max possible scaled window */
177 	if (*window_clamp == 0)
178 		(*window_clamp) = (65535 << 14);
179 	space = min(*window_clamp, space);
180 
181 	/* Quantize space offering to a multiple of mss if possible. */
182 	if (space > mss)
183 		space = (space / mss) * mss;
184 
185 	/* NOTE: offering an initial window larger than 32767
186 	 * will break some buggy TCP stacks. If the admin tells us
187 	 * it is likely we could be speaking with such a buggy stack
188 	 * we will truncate our initial window offering to 32K-1
189 	 * unless the remote has sent us a window scaling option,
190 	 * which we interpret as a sign the remote TCP is not
191 	 * misinterpreting the window field as a signed quantity.
192 	 */
193 	if (sysctl_tcp_workaround_signed_windows)
194 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
195 	else
196 		(*rcv_wnd) = space;
197 
198 	(*rcv_wscale) = 0;
199 	if (wscale_ok) {
200 		/* Set window scaling on max possible window
201 		 * See RFC1323 for an explanation of the limit to 14
202 		 */
203 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 		space = min_t(u32, space, *window_clamp);
205 		while (space > 65535 && (*rcv_wscale) < 14) {
206 			space >>= 1;
207 			(*rcv_wscale)++;
208 		}
209 	}
210 
211 	/* Set initial window to value enough for senders,
212 	 * following RFC2414. Senders, not following this RFC,
213 	 * will be satisfied with 2.
214 	 */
215 	if (mss > (1<<*rcv_wscale)) {
216 		int init_cwnd = 4;
217 		if (mss > 1460*3)
218 			init_cwnd = 2;
219 		else if (mss > 1460)
220 			init_cwnd = 3;
221 		if (*rcv_wnd > init_cwnd*mss)
222 			*rcv_wnd = init_cwnd*mss;
223 	}
224 
225 	/* Set the clamp no higher than max representable value */
226 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
227 }
228 
229 /* Chose a new window to advertise, update state in tcp_sock for the
230  * socket, and return result with RFC1323 scaling applied.  The return
231  * value can be stuffed directly into th->window for an outgoing
232  * frame.
233  */
234 static u16 tcp_select_window(struct sock *sk)
235 {
236 	struct tcp_sock *tp = tcp_sk(sk);
237 	u32 cur_win = tcp_receive_window(tp);
238 	u32 new_win = __tcp_select_window(sk);
239 
240 	/* Never shrink the offered window */
241 	if(new_win < cur_win) {
242 		/* Danger Will Robinson!
243 		 * Don't update rcv_wup/rcv_wnd here or else
244 		 * we will not be able to advertise a zero
245 		 * window in time.  --DaveM
246 		 *
247 		 * Relax Will Robinson.
248 		 */
249 		new_win = cur_win;
250 	}
251 	tp->rcv_wnd = new_win;
252 	tp->rcv_wup = tp->rcv_nxt;
253 
254 	/* Make sure we do not exceed the maximum possible
255 	 * scaled window.
256 	 */
257 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
258 		new_win = min(new_win, MAX_TCP_WINDOW);
259 	else
260 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
261 
262 	/* RFC1323 scaling applied */
263 	new_win >>= tp->rx_opt.rcv_wscale;
264 
265 	/* If we advertise zero window, disable fast path. */
266 	if (new_win == 0)
267 		tp->pred_flags = 0;
268 
269 	return new_win;
270 }
271 
272 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
273 					 __u32 tstamp, __u8 **md5_hash)
274 {
275 	if (tp->rx_opt.tstamp_ok) {
276 		*ptr++ = htonl((TCPOPT_NOP << 24) |
277 			       (TCPOPT_NOP << 16) |
278 			       (TCPOPT_TIMESTAMP << 8) |
279 			       TCPOLEN_TIMESTAMP);
280 		*ptr++ = htonl(tstamp);
281 		*ptr++ = htonl(tp->rx_opt.ts_recent);
282 	}
283 	if (tp->rx_opt.eff_sacks) {
284 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
285 		int this_sack;
286 
287 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
288 			       (TCPOPT_NOP  << 16) |
289 			       (TCPOPT_SACK <<  8) |
290 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
291 						     TCPOLEN_SACK_PERBLOCK)));
292 		for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
293 			*ptr++ = htonl(sp[this_sack].start_seq);
294 			*ptr++ = htonl(sp[this_sack].end_seq);
295 		}
296 		if (tp->rx_opt.dsack) {
297 			tp->rx_opt.dsack = 0;
298 			tp->rx_opt.eff_sacks--;
299 		}
300 	}
301 #ifdef CONFIG_TCP_MD5SIG
302 	if (md5_hash) {
303 		*ptr++ = htonl((TCPOPT_NOP << 24) |
304 			       (TCPOPT_NOP << 16) |
305 			       (TCPOPT_MD5SIG << 8) |
306 			       TCPOLEN_MD5SIG);
307 		*md5_hash = (__u8 *)ptr;
308 	}
309 #endif
310 }
311 
312 /* Construct a tcp options header for a SYN or SYN_ACK packet.
313  * If this is every changed make sure to change the definition of
314  * MAX_SYN_SIZE to match the new maximum number of options that you
315  * can generate.
316  *
317  * Note - that with the RFC2385 TCP option, we make room for the
318  * 16 byte MD5 hash. This will be filled in later, so the pointer for the
319  * location to be filled is passed back up.
320  */
321 static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
322 				  int offer_wscale, int wscale, __u32 tstamp,
323 				  __u32 ts_recent, __u8 **md5_hash)
324 {
325 	/* We always get an MSS option.
326 	 * The option bytes which will be seen in normal data
327 	 * packets should timestamps be used, must be in the MSS
328 	 * advertised.  But we subtract them from tp->mss_cache so
329 	 * that calculations in tcp_sendmsg are simpler etc.
330 	 * So account for this fact here if necessary.  If we
331 	 * don't do this correctly, as a receiver we won't
332 	 * recognize data packets as being full sized when we
333 	 * should, and thus we won't abide by the delayed ACK
334 	 * rules correctly.
335 	 * SACKs don't matter, we never delay an ACK when we
336 	 * have any of those going out.
337 	 */
338 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
339 	if (ts) {
340 		if(sack)
341 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
342 				       (TCPOLEN_SACK_PERM << 16) |
343 				       (TCPOPT_TIMESTAMP << 8) |
344 				       TCPOLEN_TIMESTAMP);
345 		else
346 			*ptr++ = htonl((TCPOPT_NOP << 24) |
347 				       (TCPOPT_NOP << 16) |
348 				       (TCPOPT_TIMESTAMP << 8) |
349 				       TCPOLEN_TIMESTAMP);
350 		*ptr++ = htonl(tstamp);		/* TSVAL */
351 		*ptr++ = htonl(ts_recent);	/* TSECR */
352 	} else if(sack)
353 		*ptr++ = htonl((TCPOPT_NOP << 24) |
354 			       (TCPOPT_NOP << 16) |
355 			       (TCPOPT_SACK_PERM << 8) |
356 			       TCPOLEN_SACK_PERM);
357 	if (offer_wscale)
358 		*ptr++ = htonl((TCPOPT_NOP << 24) |
359 			       (TCPOPT_WINDOW << 16) |
360 			       (TCPOLEN_WINDOW << 8) |
361 			       (wscale));
362 #ifdef CONFIG_TCP_MD5SIG
363 	/*
364 	 * If MD5 is enabled, then we set the option, and include the size
365 	 * (always 18). The actual MD5 hash is added just before the
366 	 * packet is sent.
367 	 */
368 	if (md5_hash) {
369 		*ptr++ = htonl((TCPOPT_NOP << 24) |
370 			       (TCPOPT_NOP << 16) |
371 			       (TCPOPT_MD5SIG << 8) |
372 			       TCPOLEN_MD5SIG);
373 		*md5_hash = (__u8 *) ptr;
374 	}
375 #endif
376 }
377 
378 /* This routine actually transmits TCP packets queued in by
379  * tcp_do_sendmsg().  This is used by both the initial
380  * transmission and possible later retransmissions.
381  * All SKB's seen here are completely headerless.  It is our
382  * job to build the TCP header, and pass the packet down to
383  * IP so it can do the same plus pass the packet off to the
384  * device.
385  *
386  * We are working here with either a clone of the original
387  * SKB, or a fresh unique copy made by the retransmit engine.
388  */
389 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
390 {
391 	const struct inet_connection_sock *icsk = inet_csk(sk);
392 	struct inet_sock *inet;
393 	struct tcp_sock *tp;
394 	struct tcp_skb_cb *tcb;
395 	int tcp_header_size;
396 #ifdef CONFIG_TCP_MD5SIG
397 	struct tcp_md5sig_key *md5;
398 	__u8 *md5_hash_location;
399 #endif
400 	struct tcphdr *th;
401 	int sysctl_flags;
402 	int err;
403 
404 	BUG_ON(!skb || !tcp_skb_pcount(skb));
405 
406 	/* If congestion control is doing timestamping, we must
407 	 * take such a timestamp before we potentially clone/copy.
408 	 */
409 	if (icsk->icsk_ca_ops->rtt_sample)
410 		__net_timestamp(skb);
411 
412 	if (likely(clone_it)) {
413 		if (unlikely(skb_cloned(skb)))
414 			skb = pskb_copy(skb, gfp_mask);
415 		else
416 			skb = skb_clone(skb, gfp_mask);
417 		if (unlikely(!skb))
418 			return -ENOBUFS;
419 	}
420 
421 	inet = inet_sk(sk);
422 	tp = tcp_sk(sk);
423 	tcb = TCP_SKB_CB(skb);
424 	tcp_header_size = tp->tcp_header_len;
425 
426 #define SYSCTL_FLAG_TSTAMPS	0x1
427 #define SYSCTL_FLAG_WSCALE	0x2
428 #define SYSCTL_FLAG_SACK	0x4
429 
430 	sysctl_flags = 0;
431 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
432 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
433 		if(sysctl_tcp_timestamps) {
434 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
435 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
436 		}
437 		if (sysctl_tcp_window_scaling) {
438 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
439 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
440 		}
441 		if (sysctl_tcp_sack) {
442 			sysctl_flags |= SYSCTL_FLAG_SACK;
443 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
444 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
445 		}
446 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
447 		/* A SACK is 2 pad bytes, a 2 byte header, plus
448 		 * 2 32-bit sequence numbers for each SACK block.
449 		 */
450 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
451 				    (tp->rx_opt.eff_sacks *
452 				     TCPOLEN_SACK_PERBLOCK));
453 	}
454 
455 	if (tcp_packets_in_flight(tp) == 0)
456 		tcp_ca_event(sk, CA_EVENT_TX_START);
457 
458 #ifdef CONFIG_TCP_MD5SIG
459 	/*
460 	 * Are we doing MD5 on this segment? If so - make
461 	 * room for it.
462 	 */
463 	md5 = tp->af_specific->md5_lookup(sk, sk);
464 	if (md5)
465 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
466 #endif
467 
468 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
469 	skb->h.th = th;
470 	skb_set_owner_w(skb, sk);
471 
472 	/* Build TCP header and checksum it. */
473 	th->source		= inet->sport;
474 	th->dest		= inet->dport;
475 	th->seq			= htonl(tcb->seq);
476 	th->ack_seq		= htonl(tp->rcv_nxt);
477 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
478 					tcb->flags);
479 
480 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
481 		/* RFC1323: The window in SYN & SYN/ACK segments
482 		 * is never scaled.
483 		 */
484 		th->window	= htons(min(tp->rcv_wnd, 65535U));
485 	} else {
486 		th->window	= htons(tcp_select_window(sk));
487 	}
488 	th->check		= 0;
489 	th->urg_ptr		= 0;
490 
491 	if (unlikely(tp->urg_mode &&
492 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
493 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
494 		th->urg			= 1;
495 	}
496 
497 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
498 		tcp_syn_build_options((__be32 *)(th + 1),
499 				      tcp_advertise_mss(sk),
500 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
501 				      (sysctl_flags & SYSCTL_FLAG_SACK),
502 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
503 				      tp->rx_opt.rcv_wscale,
504 				      tcb->when,
505 				      tp->rx_opt.ts_recent,
506 
507 #ifdef CONFIG_TCP_MD5SIG
508 				      md5 ? &md5_hash_location :
509 #endif
510 				      NULL);
511 	} else {
512 		tcp_build_and_update_options((__be32 *)(th + 1),
513 					     tp, tcb->when,
514 #ifdef CONFIG_TCP_MD5SIG
515 					     md5 ? &md5_hash_location :
516 #endif
517 					     NULL);
518 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
519 	}
520 
521 #ifdef CONFIG_TCP_MD5SIG
522 	/* Calculate the MD5 hash, as we have all we need now */
523 	if (md5) {
524 		tp->af_specific->calc_md5_hash(md5_hash_location,
525 					       md5,
526 					       sk, NULL, NULL,
527 					       skb->h.th,
528 					       sk->sk_protocol,
529 					       skb->len);
530 	}
531 #endif
532 
533 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
534 
535 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
536 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
537 
538 	if (skb->len != tcp_header_size)
539 		tcp_event_data_sent(tp, skb, sk);
540 
541 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
542 		TCP_INC_STATS(TCP_MIB_OUTSEGS);
543 
544 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
545 	if (likely(err <= 0))
546 		return err;
547 
548 	tcp_enter_cwr(sk);
549 
550 	return net_xmit_eval(err);
551 
552 #undef SYSCTL_FLAG_TSTAMPS
553 #undef SYSCTL_FLAG_WSCALE
554 #undef SYSCTL_FLAG_SACK
555 }
556 
557 
558 /* This routine just queue's the buffer
559  *
560  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
561  * otherwise socket can stall.
562  */
563 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
564 {
565 	struct tcp_sock *tp = tcp_sk(sk);
566 
567 	/* Advance write_seq and place onto the write_queue. */
568 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
569 	skb_header_release(skb);
570 	__skb_queue_tail(&sk->sk_write_queue, skb);
571 	sk_charge_skb(sk, skb);
572 
573 	/* Queue it, remembering where we must start sending. */
574 	if (sk->sk_send_head == NULL)
575 		sk->sk_send_head = skb;
576 }
577 
578 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
579 {
580 	if (skb->len <= mss_now || !sk_can_gso(sk)) {
581 		/* Avoid the costly divide in the normal
582 		 * non-TSO case.
583 		 */
584 		skb_shinfo(skb)->gso_segs = 1;
585 		skb_shinfo(skb)->gso_size = 0;
586 		skb_shinfo(skb)->gso_type = 0;
587 	} else {
588 		unsigned int factor;
589 
590 		factor = skb->len + (mss_now - 1);
591 		factor /= mss_now;
592 		skb_shinfo(skb)->gso_segs = factor;
593 		skb_shinfo(skb)->gso_size = mss_now;
594 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
595 	}
596 }
597 
598 /* Function to create two new TCP segments.  Shrinks the given segment
599  * to the specified size and appends a new segment with the rest of the
600  * packet to the list.  This won't be called frequently, I hope.
601  * Remember, these are still headerless SKBs at this point.
602  */
603 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
604 {
605 	struct tcp_sock *tp = tcp_sk(sk);
606 	struct sk_buff *buff;
607 	int nsize, old_factor;
608 	int nlen;
609 	u16 flags;
610 
611 	BUG_ON(len > skb->len);
612 
613 	clear_all_retrans_hints(tp);
614 	nsize = skb_headlen(skb) - len;
615 	if (nsize < 0)
616 		nsize = 0;
617 
618 	if (skb_cloned(skb) &&
619 	    skb_is_nonlinear(skb) &&
620 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
621 		return -ENOMEM;
622 
623 	/* Get a new skb... force flag on. */
624 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
625 	if (buff == NULL)
626 		return -ENOMEM; /* We'll just try again later. */
627 
628 	sk_charge_skb(sk, buff);
629 	nlen = skb->len - len - nsize;
630 	buff->truesize += nlen;
631 	skb->truesize -= nlen;
632 
633 	/* Correct the sequence numbers. */
634 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
635 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
636 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
637 
638 	/* PSH and FIN should only be set in the second packet. */
639 	flags = TCP_SKB_CB(skb)->flags;
640 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
641 	TCP_SKB_CB(buff)->flags = flags;
642 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
643 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
644 
645 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
646 		/* Copy and checksum data tail into the new buffer. */
647 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
648 						       nsize, 0);
649 
650 		skb_trim(skb, len);
651 
652 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
653 	} else {
654 		skb->ip_summed = CHECKSUM_PARTIAL;
655 		skb_split(skb, buff, len);
656 	}
657 
658 	buff->ip_summed = skb->ip_summed;
659 
660 	/* Looks stupid, but our code really uses when of
661 	 * skbs, which it never sent before. --ANK
662 	 */
663 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
664 	buff->tstamp = skb->tstamp;
665 
666 	old_factor = tcp_skb_pcount(skb);
667 
668 	/* Fix up tso_factor for both original and new SKB.  */
669 	tcp_set_skb_tso_segs(sk, skb, mss_now);
670 	tcp_set_skb_tso_segs(sk, buff, mss_now);
671 
672 	/* If this packet has been sent out already, we must
673 	 * adjust the various packet counters.
674 	 */
675 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
676 		int diff = old_factor - tcp_skb_pcount(skb) -
677 			tcp_skb_pcount(buff);
678 
679 		tp->packets_out -= diff;
680 
681 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
682 			tp->sacked_out -= diff;
683 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
684 			tp->retrans_out -= diff;
685 
686 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
687 			tp->lost_out -= diff;
688 			tp->left_out -= diff;
689 		}
690 
691 		if (diff > 0) {
692 			/* Adjust Reno SACK estimate. */
693 			if (!tp->rx_opt.sack_ok) {
694 				tp->sacked_out -= diff;
695 				if ((int)tp->sacked_out < 0)
696 					tp->sacked_out = 0;
697 				tcp_sync_left_out(tp);
698 			}
699 
700 			tp->fackets_out -= diff;
701 			if ((int)tp->fackets_out < 0)
702 				tp->fackets_out = 0;
703 		}
704 	}
705 
706 	/* Link BUFF into the send queue. */
707 	skb_header_release(buff);
708 	__skb_append(skb, buff, &sk->sk_write_queue);
709 
710 	return 0;
711 }
712 
713 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
714  * eventually). The difference is that pulled data not copied, but
715  * immediately discarded.
716  */
717 static void __pskb_trim_head(struct sk_buff *skb, int len)
718 {
719 	int i, k, eat;
720 
721 	eat = len;
722 	k = 0;
723 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
724 		if (skb_shinfo(skb)->frags[i].size <= eat) {
725 			put_page(skb_shinfo(skb)->frags[i].page);
726 			eat -= skb_shinfo(skb)->frags[i].size;
727 		} else {
728 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
729 			if (eat) {
730 				skb_shinfo(skb)->frags[k].page_offset += eat;
731 				skb_shinfo(skb)->frags[k].size -= eat;
732 				eat = 0;
733 			}
734 			k++;
735 		}
736 	}
737 	skb_shinfo(skb)->nr_frags = k;
738 
739 	skb->tail = skb->data;
740 	skb->data_len -= len;
741 	skb->len = skb->data_len;
742 }
743 
744 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
745 {
746 	if (skb_cloned(skb) &&
747 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
748 		return -ENOMEM;
749 
750 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
751 	if (unlikely(len < skb_headlen(skb)))
752 		__skb_pull(skb, len);
753 	else
754 		__pskb_trim_head(skb, len - skb_headlen(skb));
755 
756 	TCP_SKB_CB(skb)->seq += len;
757 	skb->ip_summed = CHECKSUM_PARTIAL;
758 
759 	skb->truesize	     -= len;
760 	sk->sk_wmem_queued   -= len;
761 	sk->sk_forward_alloc += len;
762 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
763 
764 	/* Any change of skb->len requires recalculation of tso
765 	 * factor and mss.
766 	 */
767 	if (tcp_skb_pcount(skb) > 1)
768 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
769 
770 	return 0;
771 }
772 
773 /* Not accounting for SACKs here. */
774 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
775 {
776 	struct tcp_sock *tp = tcp_sk(sk);
777 	struct inet_connection_sock *icsk = inet_csk(sk);
778 	int mss_now;
779 
780 	/* Calculate base mss without TCP options:
781 	   It is MMS_S - sizeof(tcphdr) of rfc1122
782 	 */
783 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
784 
785 	/* Clamp it (mss_clamp does not include tcp options) */
786 	if (mss_now > tp->rx_opt.mss_clamp)
787 		mss_now = tp->rx_opt.mss_clamp;
788 
789 	/* Now subtract optional transport overhead */
790 	mss_now -= icsk->icsk_ext_hdr_len;
791 
792 	/* Then reserve room for full set of TCP options and 8 bytes of data */
793 	if (mss_now < 48)
794 		mss_now = 48;
795 
796 	/* Now subtract TCP options size, not including SACKs */
797 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
798 
799 	return mss_now;
800 }
801 
802 /* Inverse of above */
803 int tcp_mss_to_mtu(struct sock *sk, int mss)
804 {
805 	struct tcp_sock *tp = tcp_sk(sk);
806 	struct inet_connection_sock *icsk = inet_csk(sk);
807 	int mtu;
808 
809 	mtu = mss +
810 	      tp->tcp_header_len +
811 	      icsk->icsk_ext_hdr_len +
812 	      icsk->icsk_af_ops->net_header_len;
813 
814 	return mtu;
815 }
816 
817 void tcp_mtup_init(struct sock *sk)
818 {
819 	struct tcp_sock *tp = tcp_sk(sk);
820 	struct inet_connection_sock *icsk = inet_csk(sk);
821 
822 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
823 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
824 			       icsk->icsk_af_ops->net_header_len;
825 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
826 	icsk->icsk_mtup.probe_size = 0;
827 }
828 
829 /* This function synchronize snd mss to current pmtu/exthdr set.
830 
831    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
832    for TCP options, but includes only bare TCP header.
833 
834    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
835    It is minimum of user_mss and mss received with SYN.
836    It also does not include TCP options.
837 
838    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
839 
840    tp->mss_cache is current effective sending mss, including
841    all tcp options except for SACKs. It is evaluated,
842    taking into account current pmtu, but never exceeds
843    tp->rx_opt.mss_clamp.
844 
845    NOTE1. rfc1122 clearly states that advertised MSS
846    DOES NOT include either tcp or ip options.
847 
848    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
849    are READ ONLY outside this function.		--ANK (980731)
850  */
851 
852 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
853 {
854 	struct tcp_sock *tp = tcp_sk(sk);
855 	struct inet_connection_sock *icsk = inet_csk(sk);
856 	int mss_now;
857 
858 	if (icsk->icsk_mtup.search_high > pmtu)
859 		icsk->icsk_mtup.search_high = pmtu;
860 
861 	mss_now = tcp_mtu_to_mss(sk, pmtu);
862 
863 	/* Bound mss with half of window */
864 	if (tp->max_window && mss_now > (tp->max_window>>1))
865 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
866 
867 	/* And store cached results */
868 	icsk->icsk_pmtu_cookie = pmtu;
869 	if (icsk->icsk_mtup.enabled)
870 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
871 	tp->mss_cache = mss_now;
872 
873 	return mss_now;
874 }
875 
876 /* Compute the current effective MSS, taking SACKs and IP options,
877  * and even PMTU discovery events into account.
878  *
879  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
880  * cannot be large. However, taking into account rare use of URG, this
881  * is not a big flaw.
882  */
883 unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
884 {
885 	struct tcp_sock *tp = tcp_sk(sk);
886 	struct dst_entry *dst = __sk_dst_get(sk);
887 	u32 mss_now;
888 	u16 xmit_size_goal;
889 	int doing_tso = 0;
890 
891 	mss_now = tp->mss_cache;
892 
893 	if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
894 		doing_tso = 1;
895 
896 	if (dst) {
897 		u32 mtu = dst_mtu(dst);
898 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
899 			mss_now = tcp_sync_mss(sk, mtu);
900 	}
901 
902 	if (tp->rx_opt.eff_sacks)
903 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
904 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
905 
906 #ifdef CONFIG_TCP_MD5SIG
907 	if (tp->af_specific->md5_lookup(sk, sk))
908 		mss_now -= TCPOLEN_MD5SIG_ALIGNED;
909 #endif
910 
911 	xmit_size_goal = mss_now;
912 
913 	if (doing_tso) {
914 		xmit_size_goal = (65535 -
915 				  inet_csk(sk)->icsk_af_ops->net_header_len -
916 				  inet_csk(sk)->icsk_ext_hdr_len -
917 				  tp->tcp_header_len);
918 
919 		if (tp->max_window &&
920 		    (xmit_size_goal > (tp->max_window >> 1)))
921 			xmit_size_goal = max((tp->max_window >> 1),
922 					     68U - tp->tcp_header_len);
923 
924 		xmit_size_goal -= (xmit_size_goal % mss_now);
925 	}
926 	tp->xmit_size_goal = xmit_size_goal;
927 
928 	return mss_now;
929 }
930 
931 /* Congestion window validation. (RFC2861) */
932 
933 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
934 {
935 	__u32 packets_out = tp->packets_out;
936 
937 	if (packets_out >= tp->snd_cwnd) {
938 		/* Network is feed fully. */
939 		tp->snd_cwnd_used = 0;
940 		tp->snd_cwnd_stamp = tcp_time_stamp;
941 	} else {
942 		/* Network starves. */
943 		if (tp->packets_out > tp->snd_cwnd_used)
944 			tp->snd_cwnd_used = tp->packets_out;
945 
946 		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
947 			tcp_cwnd_application_limited(sk);
948 	}
949 }
950 
951 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
952 {
953 	u32 window, cwnd_len;
954 
955 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
956 	cwnd_len = mss_now * cwnd;
957 	return min(window, cwnd_len);
958 }
959 
960 /* Can at least one segment of SKB be sent right now, according to the
961  * congestion window rules?  If so, return how many segments are allowed.
962  */
963 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
964 {
965 	u32 in_flight, cwnd;
966 
967 	/* Don't be strict about the congestion window for the final FIN.  */
968 	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
969 	    tcp_skb_pcount(skb) == 1)
970 		return 1;
971 
972 	in_flight = tcp_packets_in_flight(tp);
973 	cwnd = tp->snd_cwnd;
974 	if (in_flight < cwnd)
975 		return (cwnd - in_flight);
976 
977 	return 0;
978 }
979 
980 /* This must be invoked the first time we consider transmitting
981  * SKB onto the wire.
982  */
983 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
984 {
985 	int tso_segs = tcp_skb_pcount(skb);
986 
987 	if (!tso_segs ||
988 	    (tso_segs > 1 &&
989 	     tcp_skb_mss(skb) != mss_now)) {
990 		tcp_set_skb_tso_segs(sk, skb, mss_now);
991 		tso_segs = tcp_skb_pcount(skb);
992 	}
993 	return tso_segs;
994 }
995 
996 static inline int tcp_minshall_check(const struct tcp_sock *tp)
997 {
998 	return after(tp->snd_sml,tp->snd_una) &&
999 		!after(tp->snd_sml, tp->snd_nxt);
1000 }
1001 
1002 /* Return 0, if packet can be sent now without violation Nagle's rules:
1003  * 1. It is full sized.
1004  * 2. Or it contains FIN. (already checked by caller)
1005  * 3. Or TCP_NODELAY was set.
1006  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1007  *    With Minshall's modification: all sent small packets are ACKed.
1008  */
1009 
1010 static inline int tcp_nagle_check(const struct tcp_sock *tp,
1011 				  const struct sk_buff *skb,
1012 				  unsigned mss_now, int nonagle)
1013 {
1014 	return (skb->len < mss_now &&
1015 		((nonagle&TCP_NAGLE_CORK) ||
1016 		 (!nonagle &&
1017 		  tp->packets_out &&
1018 		  tcp_minshall_check(tp))));
1019 }
1020 
1021 /* Return non-zero if the Nagle test allows this packet to be
1022  * sent now.
1023  */
1024 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1025 				 unsigned int cur_mss, int nonagle)
1026 {
1027 	/* Nagle rule does not apply to frames, which sit in the middle of the
1028 	 * write_queue (they have no chances to get new data).
1029 	 *
1030 	 * This is implemented in the callers, where they modify the 'nonagle'
1031 	 * argument based upon the location of SKB in the send queue.
1032 	 */
1033 	if (nonagle & TCP_NAGLE_PUSH)
1034 		return 1;
1035 
1036 	/* Don't use the nagle rule for urgent data (or for the final FIN).  */
1037 	if (tp->urg_mode ||
1038 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1039 		return 1;
1040 
1041 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1042 		return 1;
1043 
1044 	return 0;
1045 }
1046 
1047 /* Does at least the first segment of SKB fit into the send window? */
1048 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1049 {
1050 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1051 
1052 	if (skb->len > cur_mss)
1053 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1054 
1055 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
1056 }
1057 
1058 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1059  * should be put on the wire right now.  If so, it returns the number of
1060  * packets allowed by the congestion window.
1061  */
1062 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1063 				 unsigned int cur_mss, int nonagle)
1064 {
1065 	struct tcp_sock *tp = tcp_sk(sk);
1066 	unsigned int cwnd_quota;
1067 
1068 	tcp_init_tso_segs(sk, skb, cur_mss);
1069 
1070 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1071 		return 0;
1072 
1073 	cwnd_quota = tcp_cwnd_test(tp, skb);
1074 	if (cwnd_quota &&
1075 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
1076 		cwnd_quota = 0;
1077 
1078 	return cwnd_quota;
1079 }
1080 
1081 static inline int tcp_skb_is_last(const struct sock *sk,
1082 				  const struct sk_buff *skb)
1083 {
1084 	return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1085 }
1086 
1087 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1088 {
1089 	struct sk_buff *skb = sk->sk_send_head;
1090 
1091 	return (skb &&
1092 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1093 			     (tcp_skb_is_last(sk, skb) ?
1094 			      TCP_NAGLE_PUSH :
1095 			      tp->nonagle)));
1096 }
1097 
1098 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1099  * which is put after SKB on the list.  It is very much like
1100  * tcp_fragment() except that it may make several kinds of assumptions
1101  * in order to speed up the splitting operation.  In particular, we
1102  * know that all the data is in scatter-gather pages, and that the
1103  * packet has never been sent out before (and thus is not cloned).
1104  */
1105 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1106 {
1107 	struct sk_buff *buff;
1108 	int nlen = skb->len - len;
1109 	u16 flags;
1110 
1111 	/* All of a TSO frame must be composed of paged data.  */
1112 	if (skb->len != skb->data_len)
1113 		return tcp_fragment(sk, skb, len, mss_now);
1114 
1115 	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1116 	if (unlikely(buff == NULL))
1117 		return -ENOMEM;
1118 
1119 	sk_charge_skb(sk, buff);
1120 	buff->truesize += nlen;
1121 	skb->truesize -= nlen;
1122 
1123 	/* Correct the sequence numbers. */
1124 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1125 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1126 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1127 
1128 	/* PSH and FIN should only be set in the second packet. */
1129 	flags = TCP_SKB_CB(skb)->flags;
1130 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1131 	TCP_SKB_CB(buff)->flags = flags;
1132 
1133 	/* This packet was never sent out yet, so no SACK bits. */
1134 	TCP_SKB_CB(buff)->sacked = 0;
1135 
1136 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1137 	skb_split(skb, buff, len);
1138 
1139 	/* Fix up tso_factor for both original and new SKB.  */
1140 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1141 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1142 
1143 	/* Link BUFF into the send queue. */
1144 	skb_header_release(buff);
1145 	__skb_append(skb, buff, &sk->sk_write_queue);
1146 
1147 	return 0;
1148 }
1149 
1150 /* Try to defer sending, if possible, in order to minimize the amount
1151  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1152  *
1153  * This algorithm is from John Heffner.
1154  */
1155 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1156 {
1157 	const struct inet_connection_sock *icsk = inet_csk(sk);
1158 	u32 send_win, cong_win, limit, in_flight;
1159 
1160 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1161 		goto send_now;
1162 
1163 	if (icsk->icsk_ca_state != TCP_CA_Open)
1164 		goto send_now;
1165 
1166 	/* Defer for less than two clock ticks. */
1167 	if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1168 		goto send_now;
1169 
1170 	in_flight = tcp_packets_in_flight(tp);
1171 
1172 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1173 	       (tp->snd_cwnd <= in_flight));
1174 
1175 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1176 
1177 	/* From in_flight test above, we know that cwnd > in_flight.  */
1178 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1179 
1180 	limit = min(send_win, cong_win);
1181 
1182 	/* If a full-sized TSO skb can be sent, do it. */
1183 	if (limit >= 65536)
1184 		goto send_now;
1185 
1186 	if (sysctl_tcp_tso_win_divisor) {
1187 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1188 
1189 		/* If at least some fraction of a window is available,
1190 		 * just use it.
1191 		 */
1192 		chunk /= sysctl_tcp_tso_win_divisor;
1193 		if (limit >= chunk)
1194 			goto send_now;
1195 	} else {
1196 		/* Different approach, try not to defer past a single
1197 		 * ACK.  Receiver should ACK every other full sized
1198 		 * frame, so if we have space for more than 3 frames
1199 		 * then send now.
1200 		 */
1201 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1202 			goto send_now;
1203 	}
1204 
1205 	/* Ok, it looks like it is advisable to defer.  */
1206 	tp->tso_deferred = 1 | (jiffies<<1);
1207 
1208 	return 1;
1209 
1210 send_now:
1211 	tp->tso_deferred = 0;
1212 	return 0;
1213 }
1214 
1215 /* Create a new MTU probe if we are ready.
1216  * Returns 0 if we should wait to probe (no cwnd available),
1217  *         1 if a probe was sent,
1218  *         -1 otherwise */
1219 static int tcp_mtu_probe(struct sock *sk)
1220 {
1221 	struct tcp_sock *tp = tcp_sk(sk);
1222 	struct inet_connection_sock *icsk = inet_csk(sk);
1223 	struct sk_buff *skb, *nskb, *next;
1224 	int len;
1225 	int probe_size;
1226 	unsigned int pif;
1227 	int copy;
1228 	int mss_now;
1229 
1230 	/* Not currently probing/verifying,
1231 	 * not in recovery,
1232 	 * have enough cwnd, and
1233 	 * not SACKing (the variable headers throw things off) */
1234 	if (!icsk->icsk_mtup.enabled ||
1235 	    icsk->icsk_mtup.probe_size ||
1236 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1237 	    tp->snd_cwnd < 11 ||
1238 	    tp->rx_opt.eff_sacks)
1239 		return -1;
1240 
1241 	/* Very simple search strategy: just double the MSS. */
1242 	mss_now = tcp_current_mss(sk, 0);
1243 	probe_size = 2*tp->mss_cache;
1244 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1245 		/* TODO: set timer for probe_converge_event */
1246 		return -1;
1247 	}
1248 
1249 	/* Have enough data in the send queue to probe? */
1250 	len = 0;
1251 	if ((skb = sk->sk_send_head) == NULL)
1252 		return -1;
1253 	while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1254 		skb = skb->next;
1255 	if (len < probe_size)
1256 		return -1;
1257 
1258 	/* Receive window check. */
1259 	if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
1260 		if (tp->snd_wnd < probe_size)
1261 			return -1;
1262 		else
1263 			return 0;
1264 	}
1265 
1266 	/* Do we need to wait to drain cwnd? */
1267 	pif = tcp_packets_in_flight(tp);
1268 	if (pif + 2 > tp->snd_cwnd) {
1269 		/* With no packets in flight, don't stall. */
1270 		if (pif == 0)
1271 			return -1;
1272 		else
1273 			return 0;
1274 	}
1275 
1276 	/* We're allowed to probe.  Build it now. */
1277 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1278 		return -1;
1279 	sk_charge_skb(sk, nskb);
1280 
1281 	skb = sk->sk_send_head;
1282 	__skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
1283 	sk->sk_send_head = nskb;
1284 
1285 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1286 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1287 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1288 	TCP_SKB_CB(nskb)->sacked = 0;
1289 	nskb->csum = 0;
1290 	nskb->ip_summed = skb->ip_summed;
1291 
1292 	len = 0;
1293 	while (len < probe_size) {
1294 		next = skb->next;
1295 
1296 		copy = min_t(int, skb->len, probe_size - len);
1297 		if (nskb->ip_summed)
1298 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1299 		else
1300 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1301 					 skb_put(nskb, copy), copy, nskb->csum);
1302 
1303 		if (skb->len <= copy) {
1304 			/* We've eaten all the data from this skb.
1305 			 * Throw it away. */
1306 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1307 			__skb_unlink(skb, &sk->sk_write_queue);
1308 			sk_stream_free_skb(sk, skb);
1309 		} else {
1310 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1311 						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1312 			if (!skb_shinfo(skb)->nr_frags) {
1313 				skb_pull(skb, copy);
1314 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1315 					skb->csum = csum_partial(skb->data, skb->len, 0);
1316 			} else {
1317 				__pskb_trim_head(skb, copy);
1318 				tcp_set_skb_tso_segs(sk, skb, mss_now);
1319 			}
1320 			TCP_SKB_CB(skb)->seq += copy;
1321 		}
1322 
1323 		len += copy;
1324 		skb = next;
1325 	}
1326 	tcp_init_tso_segs(sk, nskb, nskb->len);
1327 
1328 	/* We're ready to send.  If this fails, the probe will
1329 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1330 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1331 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1332 		/* Decrement cwnd here because we are sending
1333 		* effectively two packets. */
1334 		tp->snd_cwnd--;
1335 		update_send_head(sk, tp, nskb);
1336 
1337 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1338 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1339 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1340 
1341 		return 1;
1342 	}
1343 
1344 	return -1;
1345 }
1346 
1347 
1348 /* This routine writes packets to the network.  It advances the
1349  * send_head.  This happens as incoming acks open up the remote
1350  * window for us.
1351  *
1352  * Returns 1, if no segments are in flight and we have queued segments, but
1353  * cannot send anything now because of SWS or another problem.
1354  */
1355 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1356 {
1357 	struct tcp_sock *tp = tcp_sk(sk);
1358 	struct sk_buff *skb;
1359 	unsigned int tso_segs, sent_pkts;
1360 	int cwnd_quota;
1361 	int result;
1362 
1363 	/* If we are closed, the bytes will have to remain here.
1364 	 * In time closedown will finish, we empty the write queue and all
1365 	 * will be happy.
1366 	 */
1367 	if (unlikely(sk->sk_state == TCP_CLOSE))
1368 		return 0;
1369 
1370 	sent_pkts = 0;
1371 
1372 	/* Do MTU probing. */
1373 	if ((result = tcp_mtu_probe(sk)) == 0) {
1374 		return 0;
1375 	} else if (result > 0) {
1376 		sent_pkts = 1;
1377 	}
1378 
1379 	while ((skb = sk->sk_send_head)) {
1380 		unsigned int limit;
1381 
1382 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1383 		BUG_ON(!tso_segs);
1384 
1385 		cwnd_quota = tcp_cwnd_test(tp, skb);
1386 		if (!cwnd_quota)
1387 			break;
1388 
1389 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1390 			break;
1391 
1392 		if (tso_segs == 1) {
1393 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1394 						     (tcp_skb_is_last(sk, skb) ?
1395 						      nonagle : TCP_NAGLE_PUSH))))
1396 				break;
1397 		} else {
1398 			if (tcp_tso_should_defer(sk, tp, skb))
1399 				break;
1400 		}
1401 
1402 		limit = mss_now;
1403 		if (tso_segs > 1) {
1404 			limit = tcp_window_allows(tp, skb,
1405 						  mss_now, cwnd_quota);
1406 
1407 			if (skb->len < limit) {
1408 				unsigned int trim = skb->len % mss_now;
1409 
1410 				if (trim)
1411 					limit = skb->len - trim;
1412 			}
1413 		}
1414 
1415 		if (skb->len > limit &&
1416 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1417 			break;
1418 
1419 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1420 
1421 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
1422 			break;
1423 
1424 		/* Advance the send_head.  This one is sent out.
1425 		 * This call will increment packets_out.
1426 		 */
1427 		update_send_head(sk, tp, skb);
1428 
1429 		tcp_minshall_update(tp, mss_now, skb);
1430 		sent_pkts++;
1431 	}
1432 
1433 	if (likely(sent_pkts)) {
1434 		tcp_cwnd_validate(sk, tp);
1435 		return 0;
1436 	}
1437 	return !tp->packets_out && sk->sk_send_head;
1438 }
1439 
1440 /* Push out any pending frames which were held back due to
1441  * TCP_CORK or attempt at coalescing tiny packets.
1442  * The socket must be locked by the caller.
1443  */
1444 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1445 			       unsigned int cur_mss, int nonagle)
1446 {
1447 	struct sk_buff *skb = sk->sk_send_head;
1448 
1449 	if (skb) {
1450 		if (tcp_write_xmit(sk, cur_mss, nonagle))
1451 			tcp_check_probe_timer(sk, tp);
1452 	}
1453 }
1454 
1455 /* Send _single_ skb sitting at the send head. This function requires
1456  * true push pending frames to setup probe timer etc.
1457  */
1458 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1459 {
1460 	struct tcp_sock *tp = tcp_sk(sk);
1461 	struct sk_buff *skb = sk->sk_send_head;
1462 	unsigned int tso_segs, cwnd_quota;
1463 
1464 	BUG_ON(!skb || skb->len < mss_now);
1465 
1466 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1467 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1468 
1469 	if (likely(cwnd_quota)) {
1470 		unsigned int limit;
1471 
1472 		BUG_ON(!tso_segs);
1473 
1474 		limit = mss_now;
1475 		if (tso_segs > 1) {
1476 			limit = tcp_window_allows(tp, skb,
1477 						  mss_now, cwnd_quota);
1478 
1479 			if (skb->len < limit) {
1480 				unsigned int trim = skb->len % mss_now;
1481 
1482 				if (trim)
1483 					limit = skb->len - trim;
1484 			}
1485 		}
1486 
1487 		if (skb->len > limit &&
1488 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1489 			return;
1490 
1491 		/* Send it out now. */
1492 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1493 
1494 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1495 			update_send_head(sk, tp, skb);
1496 			tcp_cwnd_validate(sk, tp);
1497 			return;
1498 		}
1499 	}
1500 }
1501 
1502 /* This function returns the amount that we can raise the
1503  * usable window based on the following constraints
1504  *
1505  * 1. The window can never be shrunk once it is offered (RFC 793)
1506  * 2. We limit memory per socket
1507  *
1508  * RFC 1122:
1509  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1510  *  RECV.NEXT + RCV.WIN fixed until:
1511  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1512  *
1513  * i.e. don't raise the right edge of the window until you can raise
1514  * it at least MSS bytes.
1515  *
1516  * Unfortunately, the recommended algorithm breaks header prediction,
1517  * since header prediction assumes th->window stays fixed.
1518  *
1519  * Strictly speaking, keeping th->window fixed violates the receiver
1520  * side SWS prevention criteria. The problem is that under this rule
1521  * a stream of single byte packets will cause the right side of the
1522  * window to always advance by a single byte.
1523  *
1524  * Of course, if the sender implements sender side SWS prevention
1525  * then this will not be a problem.
1526  *
1527  * BSD seems to make the following compromise:
1528  *
1529  *	If the free space is less than the 1/4 of the maximum
1530  *	space available and the free space is less than 1/2 mss,
1531  *	then set the window to 0.
1532  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1533  *	Otherwise, just prevent the window from shrinking
1534  *	and from being larger than the largest representable value.
1535  *
1536  * This prevents incremental opening of the window in the regime
1537  * where TCP is limited by the speed of the reader side taking
1538  * data out of the TCP receive queue. It does nothing about
1539  * those cases where the window is constrained on the sender side
1540  * because the pipeline is full.
1541  *
1542  * BSD also seems to "accidentally" limit itself to windows that are a
1543  * multiple of MSS, at least until the free space gets quite small.
1544  * This would appear to be a side effect of the mbuf implementation.
1545  * Combining these two algorithms results in the observed behavior
1546  * of having a fixed window size at almost all times.
1547  *
1548  * Below we obtain similar behavior by forcing the offered window to
1549  * a multiple of the mss when it is feasible to do so.
1550  *
1551  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1552  * Regular options like TIMESTAMP are taken into account.
1553  */
1554 u32 __tcp_select_window(struct sock *sk)
1555 {
1556 	struct inet_connection_sock *icsk = inet_csk(sk);
1557 	struct tcp_sock *tp = tcp_sk(sk);
1558 	/* MSS for the peer's data.  Previous versions used mss_clamp
1559 	 * here.  I don't know if the value based on our guesses
1560 	 * of peer's MSS is better for the performance.  It's more correct
1561 	 * but may be worse for the performance because of rcv_mss
1562 	 * fluctuations.  --SAW  1998/11/1
1563 	 */
1564 	int mss = icsk->icsk_ack.rcv_mss;
1565 	int free_space = tcp_space(sk);
1566 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1567 	int window;
1568 
1569 	if (mss > full_space)
1570 		mss = full_space;
1571 
1572 	if (free_space < full_space/2) {
1573 		icsk->icsk_ack.quick = 0;
1574 
1575 		if (tcp_memory_pressure)
1576 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
1577 
1578 		if (free_space < mss)
1579 			return 0;
1580 	}
1581 
1582 	if (free_space > tp->rcv_ssthresh)
1583 		free_space = tp->rcv_ssthresh;
1584 
1585 	/* Don't do rounding if we are using window scaling, since the
1586 	 * scaled window will not line up with the MSS boundary anyway.
1587 	 */
1588 	window = tp->rcv_wnd;
1589 	if (tp->rx_opt.rcv_wscale) {
1590 		window = free_space;
1591 
1592 		/* Advertise enough space so that it won't get scaled away.
1593 		 * Import case: prevent zero window announcement if
1594 		 * 1<<rcv_wscale > mss.
1595 		 */
1596 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1597 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1598 				  << tp->rx_opt.rcv_wscale);
1599 	} else {
1600 		/* Get the largest window that is a nice multiple of mss.
1601 		 * Window clamp already applied above.
1602 		 * If our current window offering is within 1 mss of the
1603 		 * free space we just keep it. This prevents the divide
1604 		 * and multiply from happening most of the time.
1605 		 * We also don't do any window rounding when the free space
1606 		 * is too small.
1607 		 */
1608 		if (window <= free_space - mss || window > free_space)
1609 			window = (free_space/mss)*mss;
1610 	}
1611 
1612 	return window;
1613 }
1614 
1615 /* Attempt to collapse two adjacent SKB's during retransmission. */
1616 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1617 {
1618 	struct tcp_sock *tp = tcp_sk(sk);
1619 	struct sk_buff *next_skb = skb->next;
1620 
1621 	/* The first test we must make is that neither of these two
1622 	 * SKB's are still referenced by someone else.
1623 	 */
1624 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
1625 		int skb_size = skb->len, next_skb_size = next_skb->len;
1626 		u16 flags = TCP_SKB_CB(skb)->flags;
1627 
1628 		/* Also punt if next skb has been SACK'd. */
1629 		if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1630 			return;
1631 
1632 		/* Next skb is out of window. */
1633 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
1634 			return;
1635 
1636 		/* Punt if not enough space exists in the first SKB for
1637 		 * the data in the second, or the total combined payload
1638 		 * would exceed the MSS.
1639 		 */
1640 		if ((next_skb_size > skb_tailroom(skb)) ||
1641 		    ((skb_size + next_skb_size) > mss_now))
1642 			return;
1643 
1644 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
1645 		       tcp_skb_pcount(next_skb) != 1);
1646 
1647 		/* changing transmit queue under us so clear hints */
1648 		clear_all_retrans_hints(tp);
1649 
1650 		/* Ok.	We will be able to collapse the packet. */
1651 		__skb_unlink(next_skb, &sk->sk_write_queue);
1652 
1653 		memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1654 
1655 		if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1656 			skb->ip_summed = CHECKSUM_PARTIAL;
1657 
1658 		if (skb->ip_summed != CHECKSUM_PARTIAL)
1659 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1660 
1661 		/* Update sequence range on original skb. */
1662 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1663 
1664 		/* Merge over control information. */
1665 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1666 		TCP_SKB_CB(skb)->flags = flags;
1667 
1668 		/* All done, get rid of second SKB and account for it so
1669 		 * packet counting does not break.
1670 		 */
1671 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
1672 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
1673 			tp->retrans_out -= tcp_skb_pcount(next_skb);
1674 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
1675 			tp->lost_out -= tcp_skb_pcount(next_skb);
1676 			tp->left_out -= tcp_skb_pcount(next_skb);
1677 		}
1678 		/* Reno case is special. Sigh... */
1679 		if (!tp->rx_opt.sack_ok && tp->sacked_out) {
1680 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1681 			tp->left_out -= tcp_skb_pcount(next_skb);
1682 		}
1683 
1684 		/* Not quite right: it can be > snd.fack, but
1685 		 * it is better to underestimate fackets.
1686 		 */
1687 		tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
1688 		tcp_packets_out_dec(tp, next_skb);
1689 		sk_stream_free_skb(sk, next_skb);
1690 	}
1691 }
1692 
1693 /* Do a simple retransmit without using the backoff mechanisms in
1694  * tcp_timer. This is used for path mtu discovery.
1695  * The socket is already locked here.
1696  */
1697 void tcp_simple_retransmit(struct sock *sk)
1698 {
1699 	const struct inet_connection_sock *icsk = inet_csk(sk);
1700 	struct tcp_sock *tp = tcp_sk(sk);
1701 	struct sk_buff *skb;
1702 	unsigned int mss = tcp_current_mss(sk, 0);
1703 	int lost = 0;
1704 
1705 	sk_stream_for_retrans_queue(skb, sk) {
1706 		if (skb->len > mss &&
1707 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1708 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1709 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1710 				tp->retrans_out -= tcp_skb_pcount(skb);
1711 			}
1712 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
1713 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1714 				tp->lost_out += tcp_skb_pcount(skb);
1715 				lost = 1;
1716 			}
1717 		}
1718 	}
1719 
1720 	clear_all_retrans_hints(tp);
1721 
1722 	if (!lost)
1723 		return;
1724 
1725 	tcp_sync_left_out(tp);
1726 
1727 	/* Don't muck with the congestion window here.
1728 	 * Reason is that we do not increase amount of _data_
1729 	 * in network, but units changed and effective
1730 	 * cwnd/ssthresh really reduced now.
1731 	 */
1732 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
1733 		tp->high_seq = tp->snd_nxt;
1734 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
1735 		tp->prior_ssthresh = 0;
1736 		tp->undo_marker = 0;
1737 		tcp_set_ca_state(sk, TCP_CA_Loss);
1738 	}
1739 	tcp_xmit_retransmit_queue(sk);
1740 }
1741 
1742 /* This retransmits one SKB.  Policy decisions and retransmit queue
1743  * state updates are done by the caller.  Returns non-zero if an
1744  * error occurred which prevented the send.
1745  */
1746 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1747 {
1748 	struct tcp_sock *tp = tcp_sk(sk);
1749 	struct inet_connection_sock *icsk = inet_csk(sk);
1750 	unsigned int cur_mss = tcp_current_mss(sk, 0);
1751 	int err;
1752 
1753 	/* Inconslusive MTU probe */
1754 	if (icsk->icsk_mtup.probe_size) {
1755 		icsk->icsk_mtup.probe_size = 0;
1756 	}
1757 
1758 	/* Do not sent more than we queued. 1/4 is reserved for possible
1759 	 * copying overhead: fragmentation, tunneling, mangling etc.
1760 	 */
1761 	if (atomic_read(&sk->sk_wmem_alloc) >
1762 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1763 		return -EAGAIN;
1764 
1765 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1766 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1767 			BUG();
1768 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1769 			return -ENOMEM;
1770 	}
1771 
1772 	/* If receiver has shrunk his window, and skb is out of
1773 	 * new window, do not retransmit it. The exception is the
1774 	 * case, when window is shrunk to zero. In this case
1775 	 * our retransmit serves as a zero window probe.
1776 	 */
1777 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
1778 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
1779 		return -EAGAIN;
1780 
1781 	if (skb->len > cur_mss) {
1782 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1783 			return -ENOMEM; /* We'll try again later. */
1784 	}
1785 
1786 	/* Collapse two adjacent packets if worthwhile and we can. */
1787 	if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1788 	   (skb->len < (cur_mss >> 1)) &&
1789 	   (skb->next != sk->sk_send_head) &&
1790 	   (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
1791 	   (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
1792 	   (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
1793 	   (sysctl_tcp_retrans_collapse != 0))
1794 		tcp_retrans_try_collapse(sk, skb, cur_mss);
1795 
1796 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1797 		return -EHOSTUNREACH; /* Routing failure or similar. */
1798 
1799 	/* Some Solaris stacks overoptimize and ignore the FIN on a
1800 	 * retransmit when old data is attached.  So strip it off
1801 	 * since it is cheap to do so and saves bytes on the network.
1802 	 */
1803 	if(skb->len > 0 &&
1804 	   (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1805 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1806 		if (!pskb_trim(skb, 0)) {
1807 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1808 			skb_shinfo(skb)->gso_segs = 1;
1809 			skb_shinfo(skb)->gso_size = 0;
1810 			skb_shinfo(skb)->gso_type = 0;
1811 			skb->ip_summed = CHECKSUM_NONE;
1812 			skb->csum = 0;
1813 		}
1814 	}
1815 
1816 	/* Make a copy, if the first transmission SKB clone we made
1817 	 * is still in somebody's hands, else make a clone.
1818 	 */
1819 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1820 
1821 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1822 
1823 	if (err == 0) {
1824 		/* Update global TCP statistics. */
1825 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
1826 
1827 		tp->total_retrans++;
1828 
1829 #if FASTRETRANS_DEBUG > 0
1830 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1831 			if (net_ratelimit())
1832 				printk(KERN_DEBUG "retrans_out leaked.\n");
1833 		}
1834 #endif
1835 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1836 		tp->retrans_out += tcp_skb_pcount(skb);
1837 
1838 		/* Save stamp of the first retransmit. */
1839 		if (!tp->retrans_stamp)
1840 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1841 
1842 		tp->undo_retrans++;
1843 
1844 		/* snd_nxt is stored to detect loss of retransmitted segment,
1845 		 * see tcp_input.c tcp_sacktag_write_queue().
1846 		 */
1847 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1848 	}
1849 	return err;
1850 }
1851 
1852 /* This gets called after a retransmit timeout, and the initially
1853  * retransmitted data is acknowledged.  It tries to continue
1854  * resending the rest of the retransmit queue, until either
1855  * we've sent it all or the congestion window limit is reached.
1856  * If doing SACK, the first ACK which comes back for a timeout
1857  * based retransmit packet might feed us FACK information again.
1858  * If so, we use it to avoid unnecessarily retransmissions.
1859  */
1860 void tcp_xmit_retransmit_queue(struct sock *sk)
1861 {
1862 	const struct inet_connection_sock *icsk = inet_csk(sk);
1863 	struct tcp_sock *tp = tcp_sk(sk);
1864 	struct sk_buff *skb;
1865 	int packet_cnt;
1866 
1867 	if (tp->retransmit_skb_hint) {
1868 		skb = tp->retransmit_skb_hint;
1869 		packet_cnt = tp->retransmit_cnt_hint;
1870 	}else{
1871 		skb = sk->sk_write_queue.next;
1872 		packet_cnt = 0;
1873 	}
1874 
1875 	/* First pass: retransmit lost packets. */
1876 	if (tp->lost_out) {
1877 		sk_stream_for_retrans_queue_from(skb, sk) {
1878 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
1879 
1880 			/* we could do better than to assign each time */
1881 			tp->retransmit_skb_hint = skb;
1882 			tp->retransmit_cnt_hint = packet_cnt;
1883 
1884 			/* Assume this retransmit will generate
1885 			 * only one packet for congestion window
1886 			 * calculation purposes.  This works because
1887 			 * tcp_retransmit_skb() will chop up the
1888 			 * packet to be MSS sized and all the
1889 			 * packet counting works out.
1890 			 */
1891 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1892 				return;
1893 
1894 			if (sacked & TCPCB_LOST) {
1895 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1896 					if (tcp_retransmit_skb(sk, skb)) {
1897 						tp->retransmit_skb_hint = NULL;
1898 						return;
1899 					}
1900 					if (icsk->icsk_ca_state != TCP_CA_Loss)
1901 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1902 					else
1903 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1904 
1905 					if (skb ==
1906 					    skb_peek(&sk->sk_write_queue))
1907 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1908 									  inet_csk(sk)->icsk_rto,
1909 									  TCP_RTO_MAX);
1910 				}
1911 
1912 				packet_cnt += tcp_skb_pcount(skb);
1913 				if (packet_cnt >= tp->lost_out)
1914 					break;
1915 			}
1916 		}
1917 	}
1918 
1919 	/* OK, demanded retransmission is finished. */
1920 
1921 	/* Forward retransmissions are possible only during Recovery. */
1922 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
1923 		return;
1924 
1925 	/* No forward retransmissions in Reno are possible. */
1926 	if (!tp->rx_opt.sack_ok)
1927 		return;
1928 
1929 	/* Yeah, we have to make difficult choice between forward transmission
1930 	 * and retransmission... Both ways have their merits...
1931 	 *
1932 	 * For now we do not retransmit anything, while we have some new
1933 	 * segments to send.
1934 	 */
1935 
1936 	if (tcp_may_send_now(sk, tp))
1937 		return;
1938 
1939 	if (tp->forward_skb_hint) {
1940 		skb = tp->forward_skb_hint;
1941 		packet_cnt = tp->forward_cnt_hint;
1942 	} else{
1943 		skb = sk->sk_write_queue.next;
1944 		packet_cnt = 0;
1945 	}
1946 
1947 	sk_stream_for_retrans_queue_from(skb, sk) {
1948 		tp->forward_cnt_hint = packet_cnt;
1949 		tp->forward_skb_hint = skb;
1950 
1951 		/* Similar to the retransmit loop above we
1952 		 * can pretend that the retransmitted SKB
1953 		 * we send out here will be composed of one
1954 		 * real MSS sized packet because tcp_retransmit_skb()
1955 		 * will fragment it if necessary.
1956 		 */
1957 		if (++packet_cnt > tp->fackets_out)
1958 			break;
1959 
1960 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1961 			break;
1962 
1963 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
1964 			continue;
1965 
1966 		/* Ok, retransmit it. */
1967 		if (tcp_retransmit_skb(sk, skb)) {
1968 			tp->forward_skb_hint = NULL;
1969 			break;
1970 		}
1971 
1972 		if (skb == skb_peek(&sk->sk_write_queue))
1973 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1974 						  inet_csk(sk)->icsk_rto,
1975 						  TCP_RTO_MAX);
1976 
1977 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
1978 	}
1979 }
1980 
1981 
1982 /* Send a fin.  The caller locks the socket for us.  This cannot be
1983  * allowed to fail queueing a FIN frame under any circumstances.
1984  */
1985 void tcp_send_fin(struct sock *sk)
1986 {
1987 	struct tcp_sock *tp = tcp_sk(sk);
1988 	struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
1989 	int mss_now;
1990 
1991 	/* Optimization, tack on the FIN if we have a queue of
1992 	 * unsent frames.  But be careful about outgoing SACKS
1993 	 * and IP options.
1994 	 */
1995 	mss_now = tcp_current_mss(sk, 1);
1996 
1997 	if (sk->sk_send_head != NULL) {
1998 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
1999 		TCP_SKB_CB(skb)->end_seq++;
2000 		tp->write_seq++;
2001 	} else {
2002 		/* Socket is locked, keep trying until memory is available. */
2003 		for (;;) {
2004 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
2005 			if (skb)
2006 				break;
2007 			yield();
2008 		}
2009 
2010 		/* Reserve space for headers and prepare control bits. */
2011 		skb_reserve(skb, MAX_TCP_HEADER);
2012 		skb->csum = 0;
2013 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2014 		TCP_SKB_CB(skb)->sacked = 0;
2015 		skb_shinfo(skb)->gso_segs = 1;
2016 		skb_shinfo(skb)->gso_size = 0;
2017 		skb_shinfo(skb)->gso_type = 0;
2018 
2019 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2020 		TCP_SKB_CB(skb)->seq = tp->write_seq;
2021 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2022 		tcp_queue_skb(sk, skb);
2023 	}
2024 	__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
2025 }
2026 
2027 /* We get here when a process closes a file descriptor (either due to
2028  * an explicit close() or as a byproduct of exit()'ing) and there
2029  * was unread data in the receive queue.  This behavior is recommended
2030  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
2031  */
2032 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2033 {
2034 	struct tcp_sock *tp = tcp_sk(sk);
2035 	struct sk_buff *skb;
2036 
2037 	/* NOTE: No TCP options attached and we never retransmit this. */
2038 	skb = alloc_skb(MAX_TCP_HEADER, priority);
2039 	if (!skb) {
2040 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2041 		return;
2042 	}
2043 
2044 	/* Reserve space for headers and prepare control bits. */
2045 	skb_reserve(skb, MAX_TCP_HEADER);
2046 	skb->csum = 0;
2047 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2048 	TCP_SKB_CB(skb)->sacked = 0;
2049 	skb_shinfo(skb)->gso_segs = 1;
2050 	skb_shinfo(skb)->gso_size = 0;
2051 	skb_shinfo(skb)->gso_type = 0;
2052 
2053 	/* Send it off. */
2054 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
2055 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2056 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2057 	if (tcp_transmit_skb(sk, skb, 0, priority))
2058 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2059 }
2060 
2061 /* WARNING: This routine must only be called when we have already sent
2062  * a SYN packet that crossed the incoming SYN that caused this routine
2063  * to get called. If this assumption fails then the initial rcv_wnd
2064  * and rcv_wscale values will not be correct.
2065  */
2066 int tcp_send_synack(struct sock *sk)
2067 {
2068 	struct sk_buff* skb;
2069 
2070 	skb = skb_peek(&sk->sk_write_queue);
2071 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
2072 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2073 		return -EFAULT;
2074 	}
2075 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
2076 		if (skb_cloned(skb)) {
2077 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2078 			if (nskb == NULL)
2079 				return -ENOMEM;
2080 			__skb_unlink(skb, &sk->sk_write_queue);
2081 			skb_header_release(nskb);
2082 			__skb_queue_head(&sk->sk_write_queue, nskb);
2083 			sk_stream_free_skb(sk, skb);
2084 			sk_charge_skb(sk, nskb);
2085 			skb = nskb;
2086 		}
2087 
2088 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2089 		TCP_ECN_send_synack(tcp_sk(sk), skb);
2090 	}
2091 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2092 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2093 }
2094 
2095 /*
2096  * Prepare a SYN-ACK.
2097  */
2098 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2099 				 struct request_sock *req)
2100 {
2101 	struct inet_request_sock *ireq = inet_rsk(req);
2102 	struct tcp_sock *tp = tcp_sk(sk);
2103 	struct tcphdr *th;
2104 	int tcp_header_size;
2105 	struct sk_buff *skb;
2106 #ifdef CONFIG_TCP_MD5SIG
2107 	struct tcp_md5sig_key *md5;
2108 	__u8 *md5_hash_location;
2109 #endif
2110 
2111 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2112 	if (skb == NULL)
2113 		return NULL;
2114 
2115 	/* Reserve space for headers. */
2116 	skb_reserve(skb, MAX_TCP_HEADER);
2117 
2118 	skb->dst = dst_clone(dst);
2119 
2120 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
2121 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
2122 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2123 			   /* SACK_PERM is in the place of NOP NOP of TS */
2124 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2125 
2126 #ifdef CONFIG_TCP_MD5SIG
2127 	/* Are we doing MD5 on this segment? If so - make room for it */
2128 	md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2129 	if (md5)
2130 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2131 #endif
2132 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
2133 
2134 	memset(th, 0, sizeof(struct tcphdr));
2135 	th->syn = 1;
2136 	th->ack = 1;
2137 	TCP_ECN_make_synack(req, th);
2138 	th->source = inet_sk(sk)->sport;
2139 	th->dest = ireq->rmt_port;
2140 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2141 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2142 	TCP_SKB_CB(skb)->sacked = 0;
2143 	skb_shinfo(skb)->gso_segs = 1;
2144 	skb_shinfo(skb)->gso_size = 0;
2145 	skb_shinfo(skb)->gso_type = 0;
2146 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
2147 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2148 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2149 		__u8 rcv_wscale;
2150 		/* Set this up on the first call only */
2151 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2152 		/* tcp_full_space because it is guaranteed to be the first packet */
2153 		tcp_select_initial_window(tcp_full_space(sk),
2154 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2155 			&req->rcv_wnd,
2156 			&req->window_clamp,
2157 			ireq->wscale_ok,
2158 			&rcv_wscale);
2159 		ireq->rcv_wscale = rcv_wscale;
2160 	}
2161 
2162 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2163 	th->window = htons(min(req->rcv_wnd, 65535U));
2164 
2165 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2166 	tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2167 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2168 			      TCP_SKB_CB(skb)->when,
2169 			      req->ts_recent,
2170 			      (
2171 #ifdef CONFIG_TCP_MD5SIG
2172 			       md5 ? &md5_hash_location :
2173 #endif
2174 			       NULL)
2175 			      );
2176 
2177 	skb->csum = 0;
2178 	th->doff = (tcp_header_size >> 2);
2179 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
2180 
2181 #ifdef CONFIG_TCP_MD5SIG
2182 	/* Okay, we have all we need - do the md5 hash if needed */
2183 	if (md5) {
2184 		tp->af_specific->calc_md5_hash(md5_hash_location,
2185 					       md5,
2186 					       NULL, dst, req,
2187 					       skb->h.th, sk->sk_protocol,
2188 					       skb->len);
2189 	}
2190 #endif
2191 
2192 	return skb;
2193 }
2194 
2195 /*
2196  * Do all connect socket setups that can be done AF independent.
2197  */
2198 static void tcp_connect_init(struct sock *sk)
2199 {
2200 	struct dst_entry *dst = __sk_dst_get(sk);
2201 	struct tcp_sock *tp = tcp_sk(sk);
2202 	__u8 rcv_wscale;
2203 
2204 	/* We'll fix this up when we get a response from the other end.
2205 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2206 	 */
2207 	tp->tcp_header_len = sizeof(struct tcphdr) +
2208 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2209 
2210 #ifdef CONFIG_TCP_MD5SIG
2211 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2212 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2213 #endif
2214 
2215 	/* If user gave his TCP_MAXSEG, record it to clamp */
2216 	if (tp->rx_opt.user_mss)
2217 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2218 	tp->max_window = 0;
2219 	tcp_mtup_init(sk);
2220 	tcp_sync_mss(sk, dst_mtu(dst));
2221 
2222 	if (!tp->window_clamp)
2223 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2224 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2225 	tcp_initialize_rcv_mss(sk);
2226 
2227 	tcp_select_initial_window(tcp_full_space(sk),
2228 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2229 				  &tp->rcv_wnd,
2230 				  &tp->window_clamp,
2231 				  sysctl_tcp_window_scaling,
2232 				  &rcv_wscale);
2233 
2234 	tp->rx_opt.rcv_wscale = rcv_wscale;
2235 	tp->rcv_ssthresh = tp->rcv_wnd;
2236 
2237 	sk->sk_err = 0;
2238 	sock_reset_flag(sk, SOCK_DONE);
2239 	tp->snd_wnd = 0;
2240 	tcp_init_wl(tp, tp->write_seq, 0);
2241 	tp->snd_una = tp->write_seq;
2242 	tp->snd_sml = tp->write_seq;
2243 	tp->rcv_nxt = 0;
2244 	tp->rcv_wup = 0;
2245 	tp->copied_seq = 0;
2246 
2247 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2248 	inet_csk(sk)->icsk_retransmits = 0;
2249 	tcp_clear_retrans(tp);
2250 }
2251 
2252 /*
2253  * Build a SYN and send it off.
2254  */
2255 int tcp_connect(struct sock *sk)
2256 {
2257 	struct tcp_sock *tp = tcp_sk(sk);
2258 	struct sk_buff *buff;
2259 
2260 	tcp_connect_init(sk);
2261 
2262 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2263 	if (unlikely(buff == NULL))
2264 		return -ENOBUFS;
2265 
2266 	/* Reserve space for headers. */
2267 	skb_reserve(buff, MAX_TCP_HEADER);
2268 
2269 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2270 	TCP_ECN_send_syn(sk, tp, buff);
2271 	TCP_SKB_CB(buff)->sacked = 0;
2272 	skb_shinfo(buff)->gso_segs = 1;
2273 	skb_shinfo(buff)->gso_size = 0;
2274 	skb_shinfo(buff)->gso_type = 0;
2275 	buff->csum = 0;
2276 	tp->snd_nxt = tp->write_seq;
2277 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
2278 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2279 
2280 	/* Send it off. */
2281 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2282 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2283 	skb_header_release(buff);
2284 	__skb_queue_tail(&sk->sk_write_queue, buff);
2285 	sk_charge_skb(sk, buff);
2286 	tp->packets_out += tcp_skb_pcount(buff);
2287 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2288 
2289 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2290 	 * in order to make this packet get counted in tcpOutSegs.
2291 	 */
2292 	tp->snd_nxt = tp->write_seq;
2293 	tp->pushed_seq = tp->write_seq;
2294 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2295 
2296 	/* Timer for repeating the SYN until an answer. */
2297 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2298 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2299 	return 0;
2300 }
2301 
2302 /* Send out a delayed ack, the caller does the policy checking
2303  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
2304  * for details.
2305  */
2306 void tcp_send_delayed_ack(struct sock *sk)
2307 {
2308 	struct inet_connection_sock *icsk = inet_csk(sk);
2309 	int ato = icsk->icsk_ack.ato;
2310 	unsigned long timeout;
2311 
2312 	if (ato > TCP_DELACK_MIN) {
2313 		const struct tcp_sock *tp = tcp_sk(sk);
2314 		int max_ato = HZ/2;
2315 
2316 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2317 			max_ato = TCP_DELACK_MAX;
2318 
2319 		/* Slow path, intersegment interval is "high". */
2320 
2321 		/* If some rtt estimate is known, use it to bound delayed ack.
2322 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2323 		 * directly.
2324 		 */
2325 		if (tp->srtt) {
2326 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
2327 
2328 			if (rtt < max_ato)
2329 				max_ato = rtt;
2330 		}
2331 
2332 		ato = min(ato, max_ato);
2333 	}
2334 
2335 	/* Stay within the limit we were given */
2336 	timeout = jiffies + ato;
2337 
2338 	/* Use new timeout only if there wasn't a older one earlier. */
2339 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2340 		/* If delack timer was blocked or is about to expire,
2341 		 * send ACK now.
2342 		 */
2343 		if (icsk->icsk_ack.blocked ||
2344 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2345 			tcp_send_ack(sk);
2346 			return;
2347 		}
2348 
2349 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2350 			timeout = icsk->icsk_ack.timeout;
2351 	}
2352 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2353 	icsk->icsk_ack.timeout = timeout;
2354 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2355 }
2356 
2357 /* This routine sends an ack and also updates the window. */
2358 void tcp_send_ack(struct sock *sk)
2359 {
2360 	/* If we have been reset, we may not send again. */
2361 	if (sk->sk_state != TCP_CLOSE) {
2362 		struct tcp_sock *tp = tcp_sk(sk);
2363 		struct sk_buff *buff;
2364 
2365 		/* We are not putting this on the write queue, so
2366 		 * tcp_transmit_skb() will set the ownership to this
2367 		 * sock.
2368 		 */
2369 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2370 		if (buff == NULL) {
2371 			inet_csk_schedule_ack(sk);
2372 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2373 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2374 						  TCP_DELACK_MAX, TCP_RTO_MAX);
2375 			return;
2376 		}
2377 
2378 		/* Reserve space for headers and prepare control bits. */
2379 		skb_reserve(buff, MAX_TCP_HEADER);
2380 		buff->csum = 0;
2381 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2382 		TCP_SKB_CB(buff)->sacked = 0;
2383 		skb_shinfo(buff)->gso_segs = 1;
2384 		skb_shinfo(buff)->gso_size = 0;
2385 		skb_shinfo(buff)->gso_type = 0;
2386 
2387 		/* Send it off, this clears delayed acks for us. */
2388 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2389 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2390 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2391 	}
2392 }
2393 
2394 /* This routine sends a packet with an out of date sequence
2395  * number. It assumes the other end will try to ack it.
2396  *
2397  * Question: what should we make while urgent mode?
2398  * 4.4BSD forces sending single byte of data. We cannot send
2399  * out of window data, because we have SND.NXT==SND.MAX...
2400  *
2401  * Current solution: to send TWO zero-length segments in urgent mode:
2402  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2403  * out-of-date with SND.UNA-1 to probe window.
2404  */
2405 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2406 {
2407 	struct tcp_sock *tp = tcp_sk(sk);
2408 	struct sk_buff *skb;
2409 
2410 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
2411 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2412 	if (skb == NULL)
2413 		return -1;
2414 
2415 	/* Reserve space for headers and set control bits. */
2416 	skb_reserve(skb, MAX_TCP_HEADER);
2417 	skb->csum = 0;
2418 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2419 	TCP_SKB_CB(skb)->sacked = urgent;
2420 	skb_shinfo(skb)->gso_segs = 1;
2421 	skb_shinfo(skb)->gso_size = 0;
2422 	skb_shinfo(skb)->gso_type = 0;
2423 
2424 	/* Use a previous sequence.  This should cause the other
2425 	 * end to send an ack.  Don't queue or clone SKB, just
2426 	 * send it.
2427 	 */
2428 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
2429 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2430 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2431 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2432 }
2433 
2434 int tcp_write_wakeup(struct sock *sk)
2435 {
2436 	if (sk->sk_state != TCP_CLOSE) {
2437 		struct tcp_sock *tp = tcp_sk(sk);
2438 		struct sk_buff *skb;
2439 
2440 		if ((skb = sk->sk_send_head) != NULL &&
2441 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
2442 			int err;
2443 			unsigned int mss = tcp_current_mss(sk, 0);
2444 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
2445 
2446 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2447 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2448 
2449 			/* We are probing the opening of a window
2450 			 * but the window size is != 0
2451 			 * must have been a result SWS avoidance ( sender )
2452 			 */
2453 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2454 			    skb->len > mss) {
2455 				seg_size = min(seg_size, mss);
2456 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2457 				if (tcp_fragment(sk, skb, seg_size, mss))
2458 					return -1;
2459 			} else if (!tcp_skb_pcount(skb))
2460 				tcp_set_skb_tso_segs(sk, skb, mss);
2461 
2462 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2463 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2464 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2465 			if (!err) {
2466 				update_send_head(sk, tp, skb);
2467 			}
2468 			return err;
2469 		} else {
2470 			if (tp->urg_mode &&
2471 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
2472 				tcp_xmit_probe_skb(sk, TCPCB_URG);
2473 			return tcp_xmit_probe_skb(sk, 0);
2474 		}
2475 	}
2476 	return -1;
2477 }
2478 
2479 /* A window probe timeout has occurred.  If window is not closed send
2480  * a partial packet else a zero probe.
2481  */
2482 void tcp_send_probe0(struct sock *sk)
2483 {
2484 	struct inet_connection_sock *icsk = inet_csk(sk);
2485 	struct tcp_sock *tp = tcp_sk(sk);
2486 	int err;
2487 
2488 	err = tcp_write_wakeup(sk);
2489 
2490 	if (tp->packets_out || !sk->sk_send_head) {
2491 		/* Cancel probe timer, if it is not required. */
2492 		icsk->icsk_probes_out = 0;
2493 		icsk->icsk_backoff = 0;
2494 		return;
2495 	}
2496 
2497 	if (err <= 0) {
2498 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2499 			icsk->icsk_backoff++;
2500 		icsk->icsk_probes_out++;
2501 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2502 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2503 					  TCP_RTO_MAX);
2504 	} else {
2505 		/* If packet was not sent due to local congestion,
2506 		 * do not backoff and do not remember icsk_probes_out.
2507 		 * Let local senders to fight for local resources.
2508 		 *
2509 		 * Use accumulated backoff yet.
2510 		 */
2511 		if (!icsk->icsk_probes_out)
2512 			icsk->icsk_probes_out = 1;
2513 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2514 					  min(icsk->icsk_rto << icsk->icsk_backoff,
2515 					      TCP_RESOURCE_PROBE_INTERVAL),
2516 					  TCP_RTO_MAX);
2517 	}
2518 }
2519 
2520 EXPORT_SYMBOL(tcp_connect);
2521 EXPORT_SYMBOL(tcp_make_synack);
2522 EXPORT_SYMBOL(tcp_simple_retransmit);
2523 EXPORT_SYMBOL(tcp_sync_mss);
2524 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2525 EXPORT_SYMBOL(tcp_mtup_init);
2526