xref: /linux/net/ipv4/tcp_output.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *		Florian La Roche, <flla@stud.uni-sb.de>
15  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *		Matthew Dillon, <dillon@apollo.west.oic.com>
19  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *		Jorge Cwik, <jorge@laser.satlink.net>
21  */
22 
23 /*
24  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
25  *				:	Fragmentation on mtu decrease
26  *				:	Segment collapse on retransmit
27  *				:	AF independence
28  *
29  *		Linus Torvalds	:	send_delayed_ack
30  *		David S. Miller	:	Charge memory using the right skb
31  *					during syn/ack processing.
32  *		David S. Miller :	Output engine completely rewritten.
33  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
34  *		Cacophonix Gaul :	draft-minshall-nagle-01
35  *		J Hadi Salim	:	ECN support
36  *
37  */
38 
39 #include <net/tcp.h>
40 
41 #include <linux/compiler.h>
42 #include <linux/module.h>
43 #include <linux/smp_lock.h>
44 
45 /* People can turn this off for buggy TCP's found in printers etc. */
46 int sysctl_tcp_retrans_collapse = 1;
47 
48 /* People can turn this on to  work with those rare, broken TCPs that
49  * interpret the window field as a signed quantity.
50  */
51 int sysctl_tcp_workaround_signed_windows = 0;
52 
53 /* This limits the percentage of the congestion window which we
54  * will allow a single TSO frame to consume.  Building TSO frames
55  * which are too large can cause TCP streams to be bursty.
56  */
57 int sysctl_tcp_tso_win_divisor = 3;
58 
59 int sysctl_tcp_mtu_probing = 0;
60 int sysctl_tcp_base_mss = 512;
61 
62 /* By default, RFC2861 behavior.  */
63 int sysctl_tcp_slow_start_after_idle = 1;
64 
65 static void update_send_head(struct sock *sk, struct tcp_sock *tp,
66 			     struct sk_buff *skb)
67 {
68 	sk->sk_send_head = skb->next;
69 	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
70 		sk->sk_send_head = NULL;
71 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
72 	tcp_packets_out_inc(sk, tp, skb);
73 }
74 
75 /* SND.NXT, if window was not shrunk.
76  * If window has been shrunk, what should we make? It is not clear at all.
77  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
78  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
79  * invalid. OK, let's make this for now:
80  */
81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
82 {
83 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
84 		return tp->snd_nxt;
85 	else
86 		return tp->snd_una+tp->snd_wnd;
87 }
88 
89 /* Calculate mss to advertise in SYN segment.
90  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
91  *
92  * 1. It is independent of path mtu.
93  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
94  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
95  *    attached devices, because some buggy hosts are confused by
96  *    large MSS.
97  * 4. We do not make 3, we advertise MSS, calculated from first
98  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
99  *    This may be overridden via information stored in routing table.
100  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
101  *    probably even Jumbo".
102  */
103 static __u16 tcp_advertise_mss(struct sock *sk)
104 {
105 	struct tcp_sock *tp = tcp_sk(sk);
106 	struct dst_entry *dst = __sk_dst_get(sk);
107 	int mss = tp->advmss;
108 
109 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
110 		mss = dst_metric(dst, RTAX_ADVMSS);
111 		tp->advmss = mss;
112 	}
113 
114 	return (__u16)mss;
115 }
116 
117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
118  * This is the first part of cwnd validation mechanism. */
119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
120 {
121 	struct tcp_sock *tp = tcp_sk(sk);
122 	s32 delta = tcp_time_stamp - tp->lsndtime;
123 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
124 	u32 cwnd = tp->snd_cwnd;
125 
126 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
127 
128 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
129 	restart_cwnd = min(restart_cwnd, cwnd);
130 
131 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
132 		cwnd >>= 1;
133 	tp->snd_cwnd = max(cwnd, restart_cwnd);
134 	tp->snd_cwnd_stamp = tcp_time_stamp;
135 	tp->snd_cwnd_used = 0;
136 }
137 
138 static void tcp_event_data_sent(struct tcp_sock *tp,
139 				struct sk_buff *skb, struct sock *sk)
140 {
141 	struct inet_connection_sock *icsk = inet_csk(sk);
142 	const u32 now = tcp_time_stamp;
143 
144 	if (sysctl_tcp_slow_start_after_idle &&
145 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
146 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
147 
148 	tp->lsndtime = now;
149 
150 	/* If it is a reply for ato after last received
151 	 * packet, enter pingpong mode.
152 	 */
153 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
154 		icsk->icsk_ack.pingpong = 1;
155 }
156 
157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
158 {
159 	tcp_dec_quickack_mode(sk, pkts);
160 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
161 }
162 
163 /* Determine a window scaling and initial window to offer.
164  * Based on the assumption that the given amount of space
165  * will be offered. Store the results in the tp structure.
166  * NOTE: for smooth operation initial space offering should
167  * be a multiple of mss if possible. We assume here that mss >= 1.
168  * This MUST be enforced by all callers.
169  */
170 void tcp_select_initial_window(int __space, __u32 mss,
171 			       __u32 *rcv_wnd, __u32 *window_clamp,
172 			       int wscale_ok, __u8 *rcv_wscale)
173 {
174 	unsigned int space = (__space < 0 ? 0 : __space);
175 
176 	/* If no clamp set the clamp to the max possible scaled window */
177 	if (*window_clamp == 0)
178 		(*window_clamp) = (65535 << 14);
179 	space = min(*window_clamp, space);
180 
181 	/* Quantize space offering to a multiple of mss if possible. */
182 	if (space > mss)
183 		space = (space / mss) * mss;
184 
185 	/* NOTE: offering an initial window larger than 32767
186 	 * will break some buggy TCP stacks. If the admin tells us
187 	 * it is likely we could be speaking with such a buggy stack
188 	 * we will truncate our initial window offering to 32K-1
189 	 * unless the remote has sent us a window scaling option,
190 	 * which we interpret as a sign the remote TCP is not
191 	 * misinterpreting the window field as a signed quantity.
192 	 */
193 	if (sysctl_tcp_workaround_signed_windows)
194 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
195 	else
196 		(*rcv_wnd) = space;
197 
198 	(*rcv_wscale) = 0;
199 	if (wscale_ok) {
200 		/* Set window scaling on max possible window
201 		 * See RFC1323 for an explanation of the limit to 14
202 		 */
203 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 		while (space > 65535 && (*rcv_wscale) < 14) {
205 			space >>= 1;
206 			(*rcv_wscale)++;
207 		}
208 	}
209 
210 	/* Set initial window to value enough for senders,
211 	 * following RFC2414. Senders, not following this RFC,
212 	 * will be satisfied with 2.
213 	 */
214 	if (mss > (1<<*rcv_wscale)) {
215 		int init_cwnd = 4;
216 		if (mss > 1460*3)
217 			init_cwnd = 2;
218 		else if (mss > 1460)
219 			init_cwnd = 3;
220 		if (*rcv_wnd > init_cwnd*mss)
221 			*rcv_wnd = init_cwnd*mss;
222 	}
223 
224 	/* Set the clamp no higher than max representable value */
225 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
226 }
227 
228 /* Chose a new window to advertise, update state in tcp_sock for the
229  * socket, and return result with RFC1323 scaling applied.  The return
230  * value can be stuffed directly into th->window for an outgoing
231  * frame.
232  */
233 static u16 tcp_select_window(struct sock *sk)
234 {
235 	struct tcp_sock *tp = tcp_sk(sk);
236 	u32 cur_win = tcp_receive_window(tp);
237 	u32 new_win = __tcp_select_window(sk);
238 
239 	/* Never shrink the offered window */
240 	if(new_win < cur_win) {
241 		/* Danger Will Robinson!
242 		 * Don't update rcv_wup/rcv_wnd here or else
243 		 * we will not be able to advertise a zero
244 		 * window in time.  --DaveM
245 		 *
246 		 * Relax Will Robinson.
247 		 */
248 		new_win = cur_win;
249 	}
250 	tp->rcv_wnd = new_win;
251 	tp->rcv_wup = tp->rcv_nxt;
252 
253 	/* Make sure we do not exceed the maximum possible
254 	 * scaled window.
255 	 */
256 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
257 		new_win = min(new_win, MAX_TCP_WINDOW);
258 	else
259 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
260 
261 	/* RFC1323 scaling applied */
262 	new_win >>= tp->rx_opt.rcv_wscale;
263 
264 	/* If we advertise zero window, disable fast path. */
265 	if (new_win == 0)
266 		tp->pred_flags = 0;
267 
268 	return new_win;
269 }
270 
271 static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
272 					 __u32 tstamp)
273 {
274 	if (tp->rx_opt.tstamp_ok) {
275 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
276 					  (TCPOPT_NOP << 16) |
277 					  (TCPOPT_TIMESTAMP << 8) |
278 					  TCPOLEN_TIMESTAMP);
279 		*ptr++ = htonl(tstamp);
280 		*ptr++ = htonl(tp->rx_opt.ts_recent);
281 	}
282 	if (tp->rx_opt.eff_sacks) {
283 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
284 		int this_sack;
285 
286 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
287 			       (TCPOPT_NOP  << 16) |
288 			       (TCPOPT_SACK <<  8) |
289 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
290 						     TCPOLEN_SACK_PERBLOCK)));
291 		for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
292 			*ptr++ = htonl(sp[this_sack].start_seq);
293 			*ptr++ = htonl(sp[this_sack].end_seq);
294 		}
295 		if (tp->rx_opt.dsack) {
296 			tp->rx_opt.dsack = 0;
297 			tp->rx_opt.eff_sacks--;
298 		}
299 	}
300 }
301 
302 /* Construct a tcp options header for a SYN or SYN_ACK packet.
303  * If this is every changed make sure to change the definition of
304  * MAX_SYN_SIZE to match the new maximum number of options that you
305  * can generate.
306  */
307 static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
308 				  int offer_wscale, int wscale, __u32 tstamp,
309 				  __u32 ts_recent)
310 {
311 	/* We always get an MSS option.
312 	 * The option bytes which will be seen in normal data
313 	 * packets should timestamps be used, must be in the MSS
314 	 * advertised.  But we subtract them from tp->mss_cache so
315 	 * that calculations in tcp_sendmsg are simpler etc.
316 	 * So account for this fact here if necessary.  If we
317 	 * don't do this correctly, as a receiver we won't
318 	 * recognize data packets as being full sized when we
319 	 * should, and thus we won't abide by the delayed ACK
320 	 * rules correctly.
321 	 * SACKs don't matter, we never delay an ACK when we
322 	 * have any of those going out.
323 	 */
324 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
325 	if (ts) {
326 		if(sack)
327 			*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
328 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
329 		else
330 			*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
331 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
332 		*ptr++ = htonl(tstamp);		/* TSVAL */
333 		*ptr++ = htonl(ts_recent);	/* TSECR */
334 	} else if(sack)
335 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
336 					  (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
337 	if (offer_wscale)
338 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
339 }
340 
341 /* This routine actually transmits TCP packets queued in by
342  * tcp_do_sendmsg().  This is used by both the initial
343  * transmission and possible later retransmissions.
344  * All SKB's seen here are completely headerless.  It is our
345  * job to build the TCP header, and pass the packet down to
346  * IP so it can do the same plus pass the packet off to the
347  * device.
348  *
349  * We are working here with either a clone of the original
350  * SKB, or a fresh unique copy made by the retransmit engine.
351  */
352 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
353 {
354 	const struct inet_connection_sock *icsk = inet_csk(sk);
355 	struct inet_sock *inet;
356 	struct tcp_sock *tp;
357 	struct tcp_skb_cb *tcb;
358 	int tcp_header_size;
359 	struct tcphdr *th;
360 	int sysctl_flags;
361 	int err;
362 
363 	BUG_ON(!skb || !tcp_skb_pcount(skb));
364 
365 	/* If congestion control is doing timestamping, we must
366 	 * take such a timestamp before we potentially clone/copy.
367 	 */
368 	if (icsk->icsk_ca_ops->rtt_sample)
369 		__net_timestamp(skb);
370 
371 	if (likely(clone_it)) {
372 		if (unlikely(skb_cloned(skb)))
373 			skb = pskb_copy(skb, gfp_mask);
374 		else
375 			skb = skb_clone(skb, gfp_mask);
376 		if (unlikely(!skb))
377 			return -ENOBUFS;
378 	}
379 
380 	inet = inet_sk(sk);
381 	tp = tcp_sk(sk);
382 	tcb = TCP_SKB_CB(skb);
383 	tcp_header_size = tp->tcp_header_len;
384 
385 #define SYSCTL_FLAG_TSTAMPS	0x1
386 #define SYSCTL_FLAG_WSCALE	0x2
387 #define SYSCTL_FLAG_SACK	0x4
388 
389 	sysctl_flags = 0;
390 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
391 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
392 		if(sysctl_tcp_timestamps) {
393 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
394 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
395 		}
396 		if (sysctl_tcp_window_scaling) {
397 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
398 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
399 		}
400 		if (sysctl_tcp_sack) {
401 			sysctl_flags |= SYSCTL_FLAG_SACK;
402 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
403 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
404 		}
405 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
406 		/* A SACK is 2 pad bytes, a 2 byte header, plus
407 		 * 2 32-bit sequence numbers for each SACK block.
408 		 */
409 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
410 				    (tp->rx_opt.eff_sacks *
411 				     TCPOLEN_SACK_PERBLOCK));
412 	}
413 
414 	if (tcp_packets_in_flight(tp) == 0)
415 		tcp_ca_event(sk, CA_EVENT_TX_START);
416 
417 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
418 	skb->h.th = th;
419 	skb_set_owner_w(skb, sk);
420 
421 	/* Build TCP header and checksum it. */
422 	th->source		= inet->sport;
423 	th->dest		= inet->dport;
424 	th->seq			= htonl(tcb->seq);
425 	th->ack_seq		= htonl(tp->rcv_nxt);
426 	*(((__u16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
427 					tcb->flags);
428 
429 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
430 		/* RFC1323: The window in SYN & SYN/ACK segments
431 		 * is never scaled.
432 		 */
433 		th->window	= htons(tp->rcv_wnd);
434 	} else {
435 		th->window	= htons(tcp_select_window(sk));
436 	}
437 	th->check		= 0;
438 	th->urg_ptr		= 0;
439 
440 	if (unlikely(tp->urg_mode &&
441 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
442 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
443 		th->urg			= 1;
444 	}
445 
446 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
447 		tcp_syn_build_options((__u32 *)(th + 1),
448 				      tcp_advertise_mss(sk),
449 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
450 				      (sysctl_flags & SYSCTL_FLAG_SACK),
451 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
452 				      tp->rx_opt.rcv_wscale,
453 				      tcb->when,
454 				      tp->rx_opt.ts_recent);
455 	} else {
456 		tcp_build_and_update_options((__u32 *)(th + 1),
457 					     tp, tcb->when);
458 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
459 	}
460 
461 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
462 
463 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
464 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
465 
466 	if (skb->len != tcp_header_size)
467 		tcp_event_data_sent(tp, skb, sk);
468 
469 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
470 
471 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
472 	if (likely(err <= 0))
473 		return err;
474 
475 	tcp_enter_cwr(sk);
476 
477 	/* NET_XMIT_CN is special. It does not guarantee,
478 	 * that this packet is lost. It tells that device
479 	 * is about to start to drop packets or already
480 	 * drops some packets of the same priority and
481 	 * invokes us to send less aggressively.
482 	 */
483 	return err == NET_XMIT_CN ? 0 : err;
484 
485 #undef SYSCTL_FLAG_TSTAMPS
486 #undef SYSCTL_FLAG_WSCALE
487 #undef SYSCTL_FLAG_SACK
488 }
489 
490 
491 /* This routine just queue's the buffer
492  *
493  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
494  * otherwise socket can stall.
495  */
496 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
497 {
498 	struct tcp_sock *tp = tcp_sk(sk);
499 
500 	/* Advance write_seq and place onto the write_queue. */
501 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
502 	skb_header_release(skb);
503 	__skb_queue_tail(&sk->sk_write_queue, skb);
504 	sk_charge_skb(sk, skb);
505 
506 	/* Queue it, remembering where we must start sending. */
507 	if (sk->sk_send_head == NULL)
508 		sk->sk_send_head = skb;
509 }
510 
511 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
512 {
513 	if (skb->len <= mss_now || !sk_can_gso(sk)) {
514 		/* Avoid the costly divide in the normal
515 		 * non-TSO case.
516 		 */
517 		skb_shinfo(skb)->gso_segs = 1;
518 		skb_shinfo(skb)->gso_size = 0;
519 		skb_shinfo(skb)->gso_type = 0;
520 	} else {
521 		unsigned int factor;
522 
523 		factor = skb->len + (mss_now - 1);
524 		factor /= mss_now;
525 		skb_shinfo(skb)->gso_segs = factor;
526 		skb_shinfo(skb)->gso_size = mss_now;
527 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
528 	}
529 }
530 
531 /* Function to create two new TCP segments.  Shrinks the given segment
532  * to the specified size and appends a new segment with the rest of the
533  * packet to the list.  This won't be called frequently, I hope.
534  * Remember, these are still headerless SKBs at this point.
535  */
536 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
537 {
538 	struct tcp_sock *tp = tcp_sk(sk);
539 	struct sk_buff *buff;
540 	int nsize, old_factor;
541 	int nlen;
542 	u16 flags;
543 
544 	BUG_ON(len > skb->len);
545 
546  	clear_all_retrans_hints(tp);
547 	nsize = skb_headlen(skb) - len;
548 	if (nsize < 0)
549 		nsize = 0;
550 
551 	if (skb_cloned(skb) &&
552 	    skb_is_nonlinear(skb) &&
553 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
554 		return -ENOMEM;
555 
556 	/* Get a new skb... force flag on. */
557 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
558 	if (buff == NULL)
559 		return -ENOMEM; /* We'll just try again later. */
560 
561 	sk_charge_skb(sk, buff);
562 	nlen = skb->len - len - nsize;
563 	buff->truesize += nlen;
564 	skb->truesize -= nlen;
565 
566 	/* Correct the sequence numbers. */
567 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
568 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
569 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
570 
571 	/* PSH and FIN should only be set in the second packet. */
572 	flags = TCP_SKB_CB(skb)->flags;
573 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
574 	TCP_SKB_CB(buff)->flags = flags;
575 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
576 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
577 
578 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
579 		/* Copy and checksum data tail into the new buffer. */
580 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
581 						       nsize, 0);
582 
583 		skb_trim(skb, len);
584 
585 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
586 	} else {
587 		skb->ip_summed = CHECKSUM_HW;
588 		skb_split(skb, buff, len);
589 	}
590 
591 	buff->ip_summed = skb->ip_summed;
592 
593 	/* Looks stupid, but our code really uses when of
594 	 * skbs, which it never sent before. --ANK
595 	 */
596 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
597 	buff->tstamp = skb->tstamp;
598 
599 	old_factor = tcp_skb_pcount(skb);
600 
601 	/* Fix up tso_factor for both original and new SKB.  */
602 	tcp_set_skb_tso_segs(sk, skb, mss_now);
603 	tcp_set_skb_tso_segs(sk, buff, mss_now);
604 
605 	/* If this packet has been sent out already, we must
606 	 * adjust the various packet counters.
607 	 */
608 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
609 		int diff = old_factor - tcp_skb_pcount(skb) -
610 			tcp_skb_pcount(buff);
611 
612 		tp->packets_out -= diff;
613 
614 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
615 			tp->sacked_out -= diff;
616 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
617 			tp->retrans_out -= diff;
618 
619 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
620 			tp->lost_out -= diff;
621 			tp->left_out -= diff;
622 		}
623 
624 		if (diff > 0) {
625 			/* Adjust Reno SACK estimate. */
626 			if (!tp->rx_opt.sack_ok) {
627 				tp->sacked_out -= diff;
628 				if ((int)tp->sacked_out < 0)
629 					tp->sacked_out = 0;
630 				tcp_sync_left_out(tp);
631 			}
632 
633 			tp->fackets_out -= diff;
634 			if ((int)tp->fackets_out < 0)
635 				tp->fackets_out = 0;
636 		}
637 	}
638 
639 	/* Link BUFF into the send queue. */
640 	skb_header_release(buff);
641 	__skb_append(skb, buff, &sk->sk_write_queue);
642 
643 	return 0;
644 }
645 
646 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
647  * eventually). The difference is that pulled data not copied, but
648  * immediately discarded.
649  */
650 static void __pskb_trim_head(struct sk_buff *skb, int len)
651 {
652 	int i, k, eat;
653 
654 	eat = len;
655 	k = 0;
656 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
657 		if (skb_shinfo(skb)->frags[i].size <= eat) {
658 			put_page(skb_shinfo(skb)->frags[i].page);
659 			eat -= skb_shinfo(skb)->frags[i].size;
660 		} else {
661 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
662 			if (eat) {
663 				skb_shinfo(skb)->frags[k].page_offset += eat;
664 				skb_shinfo(skb)->frags[k].size -= eat;
665 				eat = 0;
666 			}
667 			k++;
668 		}
669 	}
670 	skb_shinfo(skb)->nr_frags = k;
671 
672 	skb->tail = skb->data;
673 	skb->data_len -= len;
674 	skb->len = skb->data_len;
675 }
676 
677 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
678 {
679 	if (skb_cloned(skb) &&
680 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
681 		return -ENOMEM;
682 
683 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
684 	if (unlikely(len < skb_headlen(skb)))
685 		__skb_pull(skb, len);
686 	else
687 		__pskb_trim_head(skb, len - skb_headlen(skb));
688 
689 	TCP_SKB_CB(skb)->seq += len;
690 	skb->ip_summed = CHECKSUM_HW;
691 
692 	skb->truesize	     -= len;
693 	sk->sk_wmem_queued   -= len;
694 	sk->sk_forward_alloc += len;
695 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
696 
697 	/* Any change of skb->len requires recalculation of tso
698 	 * factor and mss.
699 	 */
700 	if (tcp_skb_pcount(skb) > 1)
701 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
702 
703 	return 0;
704 }
705 
706 /* Not accounting for SACKs here. */
707 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
708 {
709 	struct tcp_sock *tp = tcp_sk(sk);
710 	struct inet_connection_sock *icsk = inet_csk(sk);
711 	int mss_now;
712 
713 	/* Calculate base mss without TCP options:
714 	   It is MMS_S - sizeof(tcphdr) of rfc1122
715 	 */
716 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
717 
718 	/* Clamp it (mss_clamp does not include tcp options) */
719 	if (mss_now > tp->rx_opt.mss_clamp)
720 		mss_now = tp->rx_opt.mss_clamp;
721 
722 	/* Now subtract optional transport overhead */
723 	mss_now -= icsk->icsk_ext_hdr_len;
724 
725 	/* Then reserve room for full set of TCP options and 8 bytes of data */
726 	if (mss_now < 48)
727 		mss_now = 48;
728 
729 	/* Now subtract TCP options size, not including SACKs */
730 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
731 
732 	return mss_now;
733 }
734 
735 /* Inverse of above */
736 int tcp_mss_to_mtu(struct sock *sk, int mss)
737 {
738 	struct tcp_sock *tp = tcp_sk(sk);
739 	struct inet_connection_sock *icsk = inet_csk(sk);
740 	int mtu;
741 
742 	mtu = mss +
743 	      tp->tcp_header_len +
744 	      icsk->icsk_ext_hdr_len +
745 	      icsk->icsk_af_ops->net_header_len;
746 
747 	return mtu;
748 }
749 
750 void tcp_mtup_init(struct sock *sk)
751 {
752 	struct tcp_sock *tp = tcp_sk(sk);
753 	struct inet_connection_sock *icsk = inet_csk(sk);
754 
755 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
756 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
757 	                       icsk->icsk_af_ops->net_header_len;
758 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
759 	icsk->icsk_mtup.probe_size = 0;
760 }
761 
762 /* This function synchronize snd mss to current pmtu/exthdr set.
763 
764    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
765    for TCP options, but includes only bare TCP header.
766 
767    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
768    It is minimum of user_mss and mss received with SYN.
769    It also does not include TCP options.
770 
771    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
772 
773    tp->mss_cache is current effective sending mss, including
774    all tcp options except for SACKs. It is evaluated,
775    taking into account current pmtu, but never exceeds
776    tp->rx_opt.mss_clamp.
777 
778    NOTE1. rfc1122 clearly states that advertised MSS
779    DOES NOT include either tcp or ip options.
780 
781    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
782    are READ ONLY outside this function.		--ANK (980731)
783  */
784 
785 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
786 {
787 	struct tcp_sock *tp = tcp_sk(sk);
788 	struct inet_connection_sock *icsk = inet_csk(sk);
789 	int mss_now;
790 
791 	if (icsk->icsk_mtup.search_high > pmtu)
792 		icsk->icsk_mtup.search_high = pmtu;
793 
794 	mss_now = tcp_mtu_to_mss(sk, pmtu);
795 
796 	/* Bound mss with half of window */
797 	if (tp->max_window && mss_now > (tp->max_window>>1))
798 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
799 
800 	/* And store cached results */
801 	icsk->icsk_pmtu_cookie = pmtu;
802 	if (icsk->icsk_mtup.enabled)
803 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
804 	tp->mss_cache = mss_now;
805 
806 	return mss_now;
807 }
808 
809 /* Compute the current effective MSS, taking SACKs and IP options,
810  * and even PMTU discovery events into account.
811  *
812  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
813  * cannot be large. However, taking into account rare use of URG, this
814  * is not a big flaw.
815  */
816 unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
817 {
818 	struct tcp_sock *tp = tcp_sk(sk);
819 	struct dst_entry *dst = __sk_dst_get(sk);
820 	u32 mss_now;
821 	u16 xmit_size_goal;
822 	int doing_tso = 0;
823 
824 	mss_now = tp->mss_cache;
825 
826 	if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
827 		doing_tso = 1;
828 
829 	if (dst) {
830 		u32 mtu = dst_mtu(dst);
831 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
832 			mss_now = tcp_sync_mss(sk, mtu);
833 	}
834 
835 	if (tp->rx_opt.eff_sacks)
836 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
837 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
838 
839 	xmit_size_goal = mss_now;
840 
841 	if (doing_tso) {
842 		xmit_size_goal = (65535 -
843 				  inet_csk(sk)->icsk_af_ops->net_header_len -
844 				  inet_csk(sk)->icsk_ext_hdr_len -
845 				  tp->tcp_header_len);
846 
847 		if (tp->max_window &&
848 		    (xmit_size_goal > (tp->max_window >> 1)))
849 			xmit_size_goal = max((tp->max_window >> 1),
850 					     68U - tp->tcp_header_len);
851 
852 		xmit_size_goal -= (xmit_size_goal % mss_now);
853 	}
854 	tp->xmit_size_goal = xmit_size_goal;
855 
856 	return mss_now;
857 }
858 
859 /* Congestion window validation. (RFC2861) */
860 
861 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
862 {
863 	__u32 packets_out = tp->packets_out;
864 
865 	if (packets_out >= tp->snd_cwnd) {
866 		/* Network is feed fully. */
867 		tp->snd_cwnd_used = 0;
868 		tp->snd_cwnd_stamp = tcp_time_stamp;
869 	} else {
870 		/* Network starves. */
871 		if (tp->packets_out > tp->snd_cwnd_used)
872 			tp->snd_cwnd_used = tp->packets_out;
873 
874 		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
875 			tcp_cwnd_application_limited(sk);
876 	}
877 }
878 
879 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
880 {
881 	u32 window, cwnd_len;
882 
883 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
884 	cwnd_len = mss_now * cwnd;
885 	return min(window, cwnd_len);
886 }
887 
888 /* Can at least one segment of SKB be sent right now, according to the
889  * congestion window rules?  If so, return how many segments are allowed.
890  */
891 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
892 {
893 	u32 in_flight, cwnd;
894 
895 	/* Don't be strict about the congestion window for the final FIN.  */
896 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
897 		return 1;
898 
899 	in_flight = tcp_packets_in_flight(tp);
900 	cwnd = tp->snd_cwnd;
901 	if (in_flight < cwnd)
902 		return (cwnd - in_flight);
903 
904 	return 0;
905 }
906 
907 /* This must be invoked the first time we consider transmitting
908  * SKB onto the wire.
909  */
910 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
911 {
912 	int tso_segs = tcp_skb_pcount(skb);
913 
914 	if (!tso_segs ||
915 	    (tso_segs > 1 &&
916 	     tcp_skb_mss(skb) != mss_now)) {
917 		tcp_set_skb_tso_segs(sk, skb, mss_now);
918 		tso_segs = tcp_skb_pcount(skb);
919 	}
920 	return tso_segs;
921 }
922 
923 static inline int tcp_minshall_check(const struct tcp_sock *tp)
924 {
925 	return after(tp->snd_sml,tp->snd_una) &&
926 		!after(tp->snd_sml, tp->snd_nxt);
927 }
928 
929 /* Return 0, if packet can be sent now without violation Nagle's rules:
930  * 1. It is full sized.
931  * 2. Or it contains FIN. (already checked by caller)
932  * 3. Or TCP_NODELAY was set.
933  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
934  *    With Minshall's modification: all sent small packets are ACKed.
935  */
936 
937 static inline int tcp_nagle_check(const struct tcp_sock *tp,
938 				  const struct sk_buff *skb,
939 				  unsigned mss_now, int nonagle)
940 {
941 	return (skb->len < mss_now &&
942 		((nonagle&TCP_NAGLE_CORK) ||
943 		 (!nonagle &&
944 		  tp->packets_out &&
945 		  tcp_minshall_check(tp))));
946 }
947 
948 /* Return non-zero if the Nagle test allows this packet to be
949  * sent now.
950  */
951 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
952 				 unsigned int cur_mss, int nonagle)
953 {
954 	/* Nagle rule does not apply to frames, which sit in the middle of the
955 	 * write_queue (they have no chances to get new data).
956 	 *
957 	 * This is implemented in the callers, where they modify the 'nonagle'
958 	 * argument based upon the location of SKB in the send queue.
959 	 */
960 	if (nonagle & TCP_NAGLE_PUSH)
961 		return 1;
962 
963 	/* Don't use the nagle rule for urgent data (or for the final FIN).  */
964 	if (tp->urg_mode ||
965 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
966 		return 1;
967 
968 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
969 		return 1;
970 
971 	return 0;
972 }
973 
974 /* Does at least the first segment of SKB fit into the send window? */
975 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
976 {
977 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
978 
979 	if (skb->len > cur_mss)
980 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
981 
982 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
983 }
984 
985 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
986  * should be put on the wire right now.  If so, it returns the number of
987  * packets allowed by the congestion window.
988  */
989 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
990 				 unsigned int cur_mss, int nonagle)
991 {
992 	struct tcp_sock *tp = tcp_sk(sk);
993 	unsigned int cwnd_quota;
994 
995 	tcp_init_tso_segs(sk, skb, cur_mss);
996 
997 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
998 		return 0;
999 
1000 	cwnd_quota = tcp_cwnd_test(tp, skb);
1001 	if (cwnd_quota &&
1002 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
1003 		cwnd_quota = 0;
1004 
1005 	return cwnd_quota;
1006 }
1007 
1008 static inline int tcp_skb_is_last(const struct sock *sk,
1009 				  const struct sk_buff *skb)
1010 {
1011 	return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1012 }
1013 
1014 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1015 {
1016 	struct sk_buff *skb = sk->sk_send_head;
1017 
1018 	return (skb &&
1019 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1020 			     (tcp_skb_is_last(sk, skb) ?
1021 			      TCP_NAGLE_PUSH :
1022 			      tp->nonagle)));
1023 }
1024 
1025 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1026  * which is put after SKB on the list.  It is very much like
1027  * tcp_fragment() except that it may make several kinds of assumptions
1028  * in order to speed up the splitting operation.  In particular, we
1029  * know that all the data is in scatter-gather pages, and that the
1030  * packet has never been sent out before (and thus is not cloned).
1031  */
1032 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1033 {
1034 	struct sk_buff *buff;
1035 	int nlen = skb->len - len;
1036 	u16 flags;
1037 
1038 	/* All of a TSO frame must be composed of paged data.  */
1039 	if (skb->len != skb->data_len)
1040 		return tcp_fragment(sk, skb, len, mss_now);
1041 
1042 	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1043 	if (unlikely(buff == NULL))
1044 		return -ENOMEM;
1045 
1046 	sk_charge_skb(sk, buff);
1047 	buff->truesize += nlen;
1048 	skb->truesize -= nlen;
1049 
1050 	/* Correct the sequence numbers. */
1051 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1052 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1053 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1054 
1055 	/* PSH and FIN should only be set in the second packet. */
1056 	flags = TCP_SKB_CB(skb)->flags;
1057 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1058 	TCP_SKB_CB(buff)->flags = flags;
1059 
1060 	/* This packet was never sent out yet, so no SACK bits. */
1061 	TCP_SKB_CB(buff)->sacked = 0;
1062 
1063 	buff->ip_summed = skb->ip_summed = CHECKSUM_HW;
1064 	skb_split(skb, buff, len);
1065 
1066 	/* Fix up tso_factor for both original and new SKB.  */
1067 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1068 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1069 
1070 	/* Link BUFF into the send queue. */
1071 	skb_header_release(buff);
1072 	__skb_append(skb, buff, &sk->sk_write_queue);
1073 
1074 	return 0;
1075 }
1076 
1077 /* Try to defer sending, if possible, in order to minimize the amount
1078  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1079  *
1080  * This algorithm is from John Heffner.
1081  */
1082 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1083 {
1084 	const struct inet_connection_sock *icsk = inet_csk(sk);
1085 	u32 send_win, cong_win, limit, in_flight;
1086 
1087 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1088 		return 0;
1089 
1090 	if (icsk->icsk_ca_state != TCP_CA_Open)
1091 		return 0;
1092 
1093 	in_flight = tcp_packets_in_flight(tp);
1094 
1095 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1096 	       (tp->snd_cwnd <= in_flight));
1097 
1098 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1099 
1100 	/* From in_flight test above, we know that cwnd > in_flight.  */
1101 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1102 
1103 	limit = min(send_win, cong_win);
1104 
1105 	/* If a full-sized TSO skb can be sent, do it. */
1106 	if (limit >= 65536)
1107 		return 0;
1108 
1109 	if (sysctl_tcp_tso_win_divisor) {
1110 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1111 
1112 		/* If at least some fraction of a window is available,
1113 		 * just use it.
1114 		 */
1115 		chunk /= sysctl_tcp_tso_win_divisor;
1116 		if (limit >= chunk)
1117 			return 0;
1118 	} else {
1119 		/* Different approach, try not to defer past a single
1120 		 * ACK.  Receiver should ACK every other full sized
1121 		 * frame, so if we have space for more than 3 frames
1122 		 * then send now.
1123 		 */
1124 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1125 			return 0;
1126 	}
1127 
1128 	/* Ok, it looks like it is advisable to defer.  */
1129 	return 1;
1130 }
1131 
1132 /* Create a new MTU probe if we are ready.
1133  * Returns 0 if we should wait to probe (no cwnd available),
1134  *         1 if a probe was sent,
1135  *         -1 otherwise */
1136 static int tcp_mtu_probe(struct sock *sk)
1137 {
1138 	struct tcp_sock *tp = tcp_sk(sk);
1139 	struct inet_connection_sock *icsk = inet_csk(sk);
1140 	struct sk_buff *skb, *nskb, *next;
1141 	int len;
1142 	int probe_size;
1143 	unsigned int pif;
1144 	int copy;
1145 	int mss_now;
1146 
1147 	/* Not currently probing/verifying,
1148 	 * not in recovery,
1149 	 * have enough cwnd, and
1150 	 * not SACKing (the variable headers throw things off) */
1151 	if (!icsk->icsk_mtup.enabled ||
1152 	    icsk->icsk_mtup.probe_size ||
1153 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1154 	    tp->snd_cwnd < 11 ||
1155 	    tp->rx_opt.eff_sacks)
1156 		return -1;
1157 
1158 	/* Very simple search strategy: just double the MSS. */
1159 	mss_now = tcp_current_mss(sk, 0);
1160 	probe_size = 2*tp->mss_cache;
1161 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1162 		/* TODO: set timer for probe_converge_event */
1163 		return -1;
1164 	}
1165 
1166 	/* Have enough data in the send queue to probe? */
1167 	len = 0;
1168 	if ((skb = sk->sk_send_head) == NULL)
1169 		return -1;
1170 	while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1171 		skb = skb->next;
1172 	if (len < probe_size)
1173 		return -1;
1174 
1175 	/* Receive window check. */
1176 	if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
1177 		if (tp->snd_wnd < probe_size)
1178 			return -1;
1179 		else
1180 			return 0;
1181 	}
1182 
1183 	/* Do we need to wait to drain cwnd? */
1184 	pif = tcp_packets_in_flight(tp);
1185 	if (pif + 2 > tp->snd_cwnd) {
1186 		/* With no packets in flight, don't stall. */
1187 		if (pif == 0)
1188 			return -1;
1189 		else
1190 			return 0;
1191 	}
1192 
1193 	/* We're allowed to probe.  Build it now. */
1194 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1195 		return -1;
1196 	sk_charge_skb(sk, nskb);
1197 
1198 	skb = sk->sk_send_head;
1199 	__skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
1200 	sk->sk_send_head = nskb;
1201 
1202 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1203 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1204 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1205 	TCP_SKB_CB(nskb)->sacked = 0;
1206 	nskb->csum = 0;
1207 	if (skb->ip_summed == CHECKSUM_HW)
1208 		nskb->ip_summed = CHECKSUM_HW;
1209 
1210 	len = 0;
1211 	while (len < probe_size) {
1212 		next = skb->next;
1213 
1214 		copy = min_t(int, skb->len, probe_size - len);
1215 		if (nskb->ip_summed)
1216 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1217 		else
1218 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1219 			                 skb_put(nskb, copy), copy, nskb->csum);
1220 
1221 		if (skb->len <= copy) {
1222 			/* We've eaten all the data from this skb.
1223 			 * Throw it away. */
1224 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1225 			__skb_unlink(skb, &sk->sk_write_queue);
1226 			sk_stream_free_skb(sk, skb);
1227 		} else {
1228 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1229 			                           ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1230 			if (!skb_shinfo(skb)->nr_frags) {
1231 				skb_pull(skb, copy);
1232 				if (skb->ip_summed != CHECKSUM_HW)
1233 					skb->csum = csum_partial(skb->data, skb->len, 0);
1234 			} else {
1235 				__pskb_trim_head(skb, copy);
1236 				tcp_set_skb_tso_segs(sk, skb, mss_now);
1237 			}
1238 			TCP_SKB_CB(skb)->seq += copy;
1239 		}
1240 
1241 		len += copy;
1242 		skb = next;
1243 	}
1244 	tcp_init_tso_segs(sk, nskb, nskb->len);
1245 
1246 	/* We're ready to send.  If this fails, the probe will
1247 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1248 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1249 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1250 		/* Decrement cwnd here because we are sending
1251 		* effectively two packets. */
1252 		tp->snd_cwnd--;
1253 		update_send_head(sk, tp, nskb);
1254 
1255 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1256 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1257 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1258 
1259 		return 1;
1260 	}
1261 
1262 	return -1;
1263 }
1264 
1265 
1266 /* This routine writes packets to the network.  It advances the
1267  * send_head.  This happens as incoming acks open up the remote
1268  * window for us.
1269  *
1270  * Returns 1, if no segments are in flight and we have queued segments, but
1271  * cannot send anything now because of SWS or another problem.
1272  */
1273 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1274 {
1275 	struct tcp_sock *tp = tcp_sk(sk);
1276 	struct sk_buff *skb;
1277 	unsigned int tso_segs, sent_pkts;
1278 	int cwnd_quota;
1279 	int result;
1280 
1281 	/* If we are closed, the bytes will have to remain here.
1282 	 * In time closedown will finish, we empty the write queue and all
1283 	 * will be happy.
1284 	 */
1285 	if (unlikely(sk->sk_state == TCP_CLOSE))
1286 		return 0;
1287 
1288 	sent_pkts = 0;
1289 
1290 	/* Do MTU probing. */
1291 	if ((result = tcp_mtu_probe(sk)) == 0) {
1292 		return 0;
1293 	} else if (result > 0) {
1294 		sent_pkts = 1;
1295 	}
1296 
1297 	while ((skb = sk->sk_send_head)) {
1298 		unsigned int limit;
1299 
1300 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1301 		BUG_ON(!tso_segs);
1302 
1303 		cwnd_quota = tcp_cwnd_test(tp, skb);
1304 		if (!cwnd_quota)
1305 			break;
1306 
1307 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1308 			break;
1309 
1310 		if (tso_segs == 1) {
1311 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1312 						     (tcp_skb_is_last(sk, skb) ?
1313 						      nonagle : TCP_NAGLE_PUSH))))
1314 				break;
1315 		} else {
1316 			if (tcp_tso_should_defer(sk, tp, skb))
1317 				break;
1318 		}
1319 
1320 		limit = mss_now;
1321 		if (tso_segs > 1) {
1322 			limit = tcp_window_allows(tp, skb,
1323 						  mss_now, cwnd_quota);
1324 
1325 			if (skb->len < limit) {
1326 				unsigned int trim = skb->len % mss_now;
1327 
1328 				if (trim)
1329 					limit = skb->len - trim;
1330 			}
1331 		}
1332 
1333 		if (skb->len > limit &&
1334 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1335 			break;
1336 
1337 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1338 
1339 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
1340 			break;
1341 
1342 		/* Advance the send_head.  This one is sent out.
1343 		 * This call will increment packets_out.
1344 		 */
1345 		update_send_head(sk, tp, skb);
1346 
1347 		tcp_minshall_update(tp, mss_now, skb);
1348 		sent_pkts++;
1349 	}
1350 
1351 	if (likely(sent_pkts)) {
1352 		tcp_cwnd_validate(sk, tp);
1353 		return 0;
1354 	}
1355 	return !tp->packets_out && sk->sk_send_head;
1356 }
1357 
1358 /* Push out any pending frames which were held back due to
1359  * TCP_CORK or attempt at coalescing tiny packets.
1360  * The socket must be locked by the caller.
1361  */
1362 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1363 			       unsigned int cur_mss, int nonagle)
1364 {
1365 	struct sk_buff *skb = sk->sk_send_head;
1366 
1367 	if (skb) {
1368 		if (tcp_write_xmit(sk, cur_mss, nonagle))
1369 			tcp_check_probe_timer(sk, tp);
1370 	}
1371 }
1372 
1373 /* Send _single_ skb sitting at the send head. This function requires
1374  * true push pending frames to setup probe timer etc.
1375  */
1376 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1377 {
1378 	struct tcp_sock *tp = tcp_sk(sk);
1379 	struct sk_buff *skb = sk->sk_send_head;
1380 	unsigned int tso_segs, cwnd_quota;
1381 
1382 	BUG_ON(!skb || skb->len < mss_now);
1383 
1384 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1385 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1386 
1387 	if (likely(cwnd_quota)) {
1388 		unsigned int limit;
1389 
1390 		BUG_ON(!tso_segs);
1391 
1392 		limit = mss_now;
1393 		if (tso_segs > 1) {
1394 			limit = tcp_window_allows(tp, skb,
1395 						  mss_now, cwnd_quota);
1396 
1397 			if (skb->len < limit) {
1398 				unsigned int trim = skb->len % mss_now;
1399 
1400 				if (trim)
1401 					limit = skb->len - trim;
1402 			}
1403 		}
1404 
1405 		if (skb->len > limit &&
1406 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1407 			return;
1408 
1409 		/* Send it out now. */
1410 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1411 
1412 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1413 			update_send_head(sk, tp, skb);
1414 			tcp_cwnd_validate(sk, tp);
1415 			return;
1416 		}
1417 	}
1418 }
1419 
1420 /* This function returns the amount that we can raise the
1421  * usable window based on the following constraints
1422  *
1423  * 1. The window can never be shrunk once it is offered (RFC 793)
1424  * 2. We limit memory per socket
1425  *
1426  * RFC 1122:
1427  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1428  *  RECV.NEXT + RCV.WIN fixed until:
1429  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1430  *
1431  * i.e. don't raise the right edge of the window until you can raise
1432  * it at least MSS bytes.
1433  *
1434  * Unfortunately, the recommended algorithm breaks header prediction,
1435  * since header prediction assumes th->window stays fixed.
1436  *
1437  * Strictly speaking, keeping th->window fixed violates the receiver
1438  * side SWS prevention criteria. The problem is that under this rule
1439  * a stream of single byte packets will cause the right side of the
1440  * window to always advance by a single byte.
1441  *
1442  * Of course, if the sender implements sender side SWS prevention
1443  * then this will not be a problem.
1444  *
1445  * BSD seems to make the following compromise:
1446  *
1447  *	If the free space is less than the 1/4 of the maximum
1448  *	space available and the free space is less than 1/2 mss,
1449  *	then set the window to 0.
1450  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1451  *	Otherwise, just prevent the window from shrinking
1452  *	and from being larger than the largest representable value.
1453  *
1454  * This prevents incremental opening of the window in the regime
1455  * where TCP is limited by the speed of the reader side taking
1456  * data out of the TCP receive queue. It does nothing about
1457  * those cases where the window is constrained on the sender side
1458  * because the pipeline is full.
1459  *
1460  * BSD also seems to "accidentally" limit itself to windows that are a
1461  * multiple of MSS, at least until the free space gets quite small.
1462  * This would appear to be a side effect of the mbuf implementation.
1463  * Combining these two algorithms results in the observed behavior
1464  * of having a fixed window size at almost all times.
1465  *
1466  * Below we obtain similar behavior by forcing the offered window to
1467  * a multiple of the mss when it is feasible to do so.
1468  *
1469  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1470  * Regular options like TIMESTAMP are taken into account.
1471  */
1472 u32 __tcp_select_window(struct sock *sk)
1473 {
1474 	struct inet_connection_sock *icsk = inet_csk(sk);
1475 	struct tcp_sock *tp = tcp_sk(sk);
1476 	/* MSS for the peer's data.  Previous versions used mss_clamp
1477 	 * here.  I don't know if the value based on our guesses
1478 	 * of peer's MSS is better for the performance.  It's more correct
1479 	 * but may be worse for the performance because of rcv_mss
1480 	 * fluctuations.  --SAW  1998/11/1
1481 	 */
1482 	int mss = icsk->icsk_ack.rcv_mss;
1483 	int free_space = tcp_space(sk);
1484 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1485 	int window;
1486 
1487 	if (mss > full_space)
1488 		mss = full_space;
1489 
1490 	if (free_space < full_space/2) {
1491 		icsk->icsk_ack.quick = 0;
1492 
1493 		if (tcp_memory_pressure)
1494 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
1495 
1496 		if (free_space < mss)
1497 			return 0;
1498 	}
1499 
1500 	if (free_space > tp->rcv_ssthresh)
1501 		free_space = tp->rcv_ssthresh;
1502 
1503 	/* Don't do rounding if we are using window scaling, since the
1504 	 * scaled window will not line up with the MSS boundary anyway.
1505 	 */
1506 	window = tp->rcv_wnd;
1507 	if (tp->rx_opt.rcv_wscale) {
1508 		window = free_space;
1509 
1510 		/* Advertise enough space so that it won't get scaled away.
1511 		 * Import case: prevent zero window announcement if
1512 		 * 1<<rcv_wscale > mss.
1513 		 */
1514 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1515 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1516 				  << tp->rx_opt.rcv_wscale);
1517 	} else {
1518 		/* Get the largest window that is a nice multiple of mss.
1519 		 * Window clamp already applied above.
1520 		 * If our current window offering is within 1 mss of the
1521 		 * free space we just keep it. This prevents the divide
1522 		 * and multiply from happening most of the time.
1523 		 * We also don't do any window rounding when the free space
1524 		 * is too small.
1525 		 */
1526 		if (window <= free_space - mss || window > free_space)
1527 			window = (free_space/mss)*mss;
1528 	}
1529 
1530 	return window;
1531 }
1532 
1533 /* Attempt to collapse two adjacent SKB's during retransmission. */
1534 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1535 {
1536 	struct tcp_sock *tp = tcp_sk(sk);
1537 	struct sk_buff *next_skb = skb->next;
1538 
1539 	/* The first test we must make is that neither of these two
1540 	 * SKB's are still referenced by someone else.
1541 	 */
1542 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
1543 		int skb_size = skb->len, next_skb_size = next_skb->len;
1544 		u16 flags = TCP_SKB_CB(skb)->flags;
1545 
1546 		/* Also punt if next skb has been SACK'd. */
1547 		if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1548 			return;
1549 
1550 		/* Next skb is out of window. */
1551 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
1552 			return;
1553 
1554 		/* Punt if not enough space exists in the first SKB for
1555 		 * the data in the second, or the total combined payload
1556 		 * would exceed the MSS.
1557 		 */
1558 		if ((next_skb_size > skb_tailroom(skb)) ||
1559 		    ((skb_size + next_skb_size) > mss_now))
1560 			return;
1561 
1562 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
1563 		       tcp_skb_pcount(next_skb) != 1);
1564 
1565 		/* changing transmit queue under us so clear hints */
1566 		clear_all_retrans_hints(tp);
1567 
1568 		/* Ok.	We will be able to collapse the packet. */
1569 		__skb_unlink(next_skb, &sk->sk_write_queue);
1570 
1571 		memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1572 
1573 		if (next_skb->ip_summed == CHECKSUM_HW)
1574 			skb->ip_summed = CHECKSUM_HW;
1575 
1576 		if (skb->ip_summed != CHECKSUM_HW)
1577 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1578 
1579 		/* Update sequence range on original skb. */
1580 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1581 
1582 		/* Merge over control information. */
1583 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1584 		TCP_SKB_CB(skb)->flags = flags;
1585 
1586 		/* All done, get rid of second SKB and account for it so
1587 		 * packet counting does not break.
1588 		 */
1589 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
1590 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
1591 			tp->retrans_out -= tcp_skb_pcount(next_skb);
1592 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
1593 			tp->lost_out -= tcp_skb_pcount(next_skb);
1594 			tp->left_out -= tcp_skb_pcount(next_skb);
1595 		}
1596 		/* Reno case is special. Sigh... */
1597 		if (!tp->rx_opt.sack_ok && tp->sacked_out) {
1598 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1599 			tp->left_out -= tcp_skb_pcount(next_skb);
1600 		}
1601 
1602 		/* Not quite right: it can be > snd.fack, but
1603 		 * it is better to underestimate fackets.
1604 		 */
1605 		tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
1606 		tcp_packets_out_dec(tp, next_skb);
1607 		sk_stream_free_skb(sk, next_skb);
1608 	}
1609 }
1610 
1611 /* Do a simple retransmit without using the backoff mechanisms in
1612  * tcp_timer. This is used for path mtu discovery.
1613  * The socket is already locked here.
1614  */
1615 void tcp_simple_retransmit(struct sock *sk)
1616 {
1617 	const struct inet_connection_sock *icsk = inet_csk(sk);
1618 	struct tcp_sock *tp = tcp_sk(sk);
1619 	struct sk_buff *skb;
1620 	unsigned int mss = tcp_current_mss(sk, 0);
1621 	int lost = 0;
1622 
1623 	sk_stream_for_retrans_queue(skb, sk) {
1624 		if (skb->len > mss &&
1625 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1626 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1627 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1628 				tp->retrans_out -= tcp_skb_pcount(skb);
1629 			}
1630 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
1631 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1632 				tp->lost_out += tcp_skb_pcount(skb);
1633 				lost = 1;
1634 			}
1635 		}
1636 	}
1637 
1638 	clear_all_retrans_hints(tp);
1639 
1640 	if (!lost)
1641 		return;
1642 
1643 	tcp_sync_left_out(tp);
1644 
1645  	/* Don't muck with the congestion window here.
1646 	 * Reason is that we do not increase amount of _data_
1647 	 * in network, but units changed and effective
1648 	 * cwnd/ssthresh really reduced now.
1649 	 */
1650 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
1651 		tp->high_seq = tp->snd_nxt;
1652 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
1653 		tp->prior_ssthresh = 0;
1654 		tp->undo_marker = 0;
1655 		tcp_set_ca_state(sk, TCP_CA_Loss);
1656 	}
1657 	tcp_xmit_retransmit_queue(sk);
1658 }
1659 
1660 /* This retransmits one SKB.  Policy decisions and retransmit queue
1661  * state updates are done by the caller.  Returns non-zero if an
1662  * error occurred which prevented the send.
1663  */
1664 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1665 {
1666 	struct tcp_sock *tp = tcp_sk(sk);
1667 	struct inet_connection_sock *icsk = inet_csk(sk);
1668  	unsigned int cur_mss = tcp_current_mss(sk, 0);
1669 	int err;
1670 
1671 	/* Inconslusive MTU probe */
1672 	if (icsk->icsk_mtup.probe_size) {
1673 		icsk->icsk_mtup.probe_size = 0;
1674 	}
1675 
1676 	/* Do not sent more than we queued. 1/4 is reserved for possible
1677 	 * copying overhead: fragmentation, tunneling, mangling etc.
1678 	 */
1679 	if (atomic_read(&sk->sk_wmem_alloc) >
1680 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1681 		return -EAGAIN;
1682 
1683 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1684 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1685 			BUG();
1686 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1687 			return -ENOMEM;
1688 	}
1689 
1690 	/* If receiver has shrunk his window, and skb is out of
1691 	 * new window, do not retransmit it. The exception is the
1692 	 * case, when window is shrunk to zero. In this case
1693 	 * our retransmit serves as a zero window probe.
1694 	 */
1695 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
1696 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
1697 		return -EAGAIN;
1698 
1699 	if (skb->len > cur_mss) {
1700 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1701 			return -ENOMEM; /* We'll try again later. */
1702 	}
1703 
1704 	/* Collapse two adjacent packets if worthwhile and we can. */
1705 	if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1706 	   (skb->len < (cur_mss >> 1)) &&
1707 	   (skb->next != sk->sk_send_head) &&
1708 	   (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
1709 	   (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
1710 	   (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
1711 	   (sysctl_tcp_retrans_collapse != 0))
1712 		tcp_retrans_try_collapse(sk, skb, cur_mss);
1713 
1714 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1715 		return -EHOSTUNREACH; /* Routing failure or similar. */
1716 
1717 	/* Some Solaris stacks overoptimize and ignore the FIN on a
1718 	 * retransmit when old data is attached.  So strip it off
1719 	 * since it is cheap to do so and saves bytes on the network.
1720 	 */
1721 	if(skb->len > 0 &&
1722 	   (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1723 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1724 		if (!pskb_trim(skb, 0)) {
1725 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1726 			skb_shinfo(skb)->gso_segs = 1;
1727 			skb_shinfo(skb)->gso_size = 0;
1728 			skb_shinfo(skb)->gso_type = 0;
1729 			skb->ip_summed = CHECKSUM_NONE;
1730 			skb->csum = 0;
1731 		}
1732 	}
1733 
1734 	/* Make a copy, if the first transmission SKB clone we made
1735 	 * is still in somebody's hands, else make a clone.
1736 	 */
1737 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1738 
1739 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1740 
1741 	if (err == 0) {
1742 		/* Update global TCP statistics. */
1743 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
1744 
1745 		tp->total_retrans++;
1746 
1747 #if FASTRETRANS_DEBUG > 0
1748 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1749 			if (net_ratelimit())
1750 				printk(KERN_DEBUG "retrans_out leaked.\n");
1751 		}
1752 #endif
1753 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1754 		tp->retrans_out += tcp_skb_pcount(skb);
1755 
1756 		/* Save stamp of the first retransmit. */
1757 		if (!tp->retrans_stamp)
1758 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1759 
1760 		tp->undo_retrans++;
1761 
1762 		/* snd_nxt is stored to detect loss of retransmitted segment,
1763 		 * see tcp_input.c tcp_sacktag_write_queue().
1764 		 */
1765 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1766 	}
1767 	return err;
1768 }
1769 
1770 /* This gets called after a retransmit timeout, and the initially
1771  * retransmitted data is acknowledged.  It tries to continue
1772  * resending the rest of the retransmit queue, until either
1773  * we've sent it all or the congestion window limit is reached.
1774  * If doing SACK, the first ACK which comes back for a timeout
1775  * based retransmit packet might feed us FACK information again.
1776  * If so, we use it to avoid unnecessarily retransmissions.
1777  */
1778 void tcp_xmit_retransmit_queue(struct sock *sk)
1779 {
1780 	const struct inet_connection_sock *icsk = inet_csk(sk);
1781 	struct tcp_sock *tp = tcp_sk(sk);
1782 	struct sk_buff *skb;
1783 	int packet_cnt;
1784 
1785 	if (tp->retransmit_skb_hint) {
1786 		skb = tp->retransmit_skb_hint;
1787 		packet_cnt = tp->retransmit_cnt_hint;
1788 	}else{
1789 		skb = sk->sk_write_queue.next;
1790 		packet_cnt = 0;
1791 	}
1792 
1793 	/* First pass: retransmit lost packets. */
1794 	if (tp->lost_out) {
1795 		sk_stream_for_retrans_queue_from(skb, sk) {
1796 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
1797 
1798 			/* we could do better than to assign each time */
1799 			tp->retransmit_skb_hint = skb;
1800 			tp->retransmit_cnt_hint = packet_cnt;
1801 
1802 			/* Assume this retransmit will generate
1803 			 * only one packet for congestion window
1804 			 * calculation purposes.  This works because
1805 			 * tcp_retransmit_skb() will chop up the
1806 			 * packet to be MSS sized and all the
1807 			 * packet counting works out.
1808 			 */
1809 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1810 				return;
1811 
1812 			if (sacked & TCPCB_LOST) {
1813 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1814 					if (tcp_retransmit_skb(sk, skb)) {
1815 						tp->retransmit_skb_hint = NULL;
1816 						return;
1817 					}
1818 					if (icsk->icsk_ca_state != TCP_CA_Loss)
1819 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1820 					else
1821 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1822 
1823 					if (skb ==
1824 					    skb_peek(&sk->sk_write_queue))
1825 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1826 									  inet_csk(sk)->icsk_rto,
1827 									  TCP_RTO_MAX);
1828 				}
1829 
1830 				packet_cnt += tcp_skb_pcount(skb);
1831 				if (packet_cnt >= tp->lost_out)
1832 					break;
1833 			}
1834 		}
1835 	}
1836 
1837 	/* OK, demanded retransmission is finished. */
1838 
1839 	/* Forward retransmissions are possible only during Recovery. */
1840 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
1841 		return;
1842 
1843 	/* No forward retransmissions in Reno are possible. */
1844 	if (!tp->rx_opt.sack_ok)
1845 		return;
1846 
1847 	/* Yeah, we have to make difficult choice between forward transmission
1848 	 * and retransmission... Both ways have their merits...
1849 	 *
1850 	 * For now we do not retransmit anything, while we have some new
1851 	 * segments to send.
1852 	 */
1853 
1854 	if (tcp_may_send_now(sk, tp))
1855 		return;
1856 
1857 	if (tp->forward_skb_hint) {
1858 		skb = tp->forward_skb_hint;
1859 		packet_cnt = tp->forward_cnt_hint;
1860 	} else{
1861 		skb = sk->sk_write_queue.next;
1862 		packet_cnt = 0;
1863 	}
1864 
1865 	sk_stream_for_retrans_queue_from(skb, sk) {
1866 		tp->forward_cnt_hint = packet_cnt;
1867 		tp->forward_skb_hint = skb;
1868 
1869 		/* Similar to the retransmit loop above we
1870 		 * can pretend that the retransmitted SKB
1871 		 * we send out here will be composed of one
1872 		 * real MSS sized packet because tcp_retransmit_skb()
1873 		 * will fragment it if necessary.
1874 		 */
1875 		if (++packet_cnt > tp->fackets_out)
1876 			break;
1877 
1878 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1879 			break;
1880 
1881 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
1882 			continue;
1883 
1884 		/* Ok, retransmit it. */
1885 		if (tcp_retransmit_skb(sk, skb)) {
1886 			tp->forward_skb_hint = NULL;
1887 			break;
1888 		}
1889 
1890 		if (skb == skb_peek(&sk->sk_write_queue))
1891 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1892 						  inet_csk(sk)->icsk_rto,
1893 						  TCP_RTO_MAX);
1894 
1895 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
1896 	}
1897 }
1898 
1899 
1900 /* Send a fin.  The caller locks the socket for us.  This cannot be
1901  * allowed to fail queueing a FIN frame under any circumstances.
1902  */
1903 void tcp_send_fin(struct sock *sk)
1904 {
1905 	struct tcp_sock *tp = tcp_sk(sk);
1906 	struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
1907 	int mss_now;
1908 
1909 	/* Optimization, tack on the FIN if we have a queue of
1910 	 * unsent frames.  But be careful about outgoing SACKS
1911 	 * and IP options.
1912 	 */
1913 	mss_now = tcp_current_mss(sk, 1);
1914 
1915 	if (sk->sk_send_head != NULL) {
1916 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
1917 		TCP_SKB_CB(skb)->end_seq++;
1918 		tp->write_seq++;
1919 	} else {
1920 		/* Socket is locked, keep trying until memory is available. */
1921 		for (;;) {
1922 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
1923 			if (skb)
1924 				break;
1925 			yield();
1926 		}
1927 
1928 		/* Reserve space for headers and prepare control bits. */
1929 		skb_reserve(skb, MAX_TCP_HEADER);
1930 		skb->csum = 0;
1931 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
1932 		TCP_SKB_CB(skb)->sacked = 0;
1933 		skb_shinfo(skb)->gso_segs = 1;
1934 		skb_shinfo(skb)->gso_size = 0;
1935 		skb_shinfo(skb)->gso_type = 0;
1936 
1937 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
1938 		TCP_SKB_CB(skb)->seq = tp->write_seq;
1939 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
1940 		tcp_queue_skb(sk, skb);
1941 	}
1942 	__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
1943 }
1944 
1945 /* We get here when a process closes a file descriptor (either due to
1946  * an explicit close() or as a byproduct of exit()'ing) and there
1947  * was unread data in the receive queue.  This behavior is recommended
1948  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
1949  */
1950 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
1951 {
1952 	struct tcp_sock *tp = tcp_sk(sk);
1953 	struct sk_buff *skb;
1954 
1955 	/* NOTE: No TCP options attached and we never retransmit this. */
1956 	skb = alloc_skb(MAX_TCP_HEADER, priority);
1957 	if (!skb) {
1958 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
1959 		return;
1960 	}
1961 
1962 	/* Reserve space for headers and prepare control bits. */
1963 	skb_reserve(skb, MAX_TCP_HEADER);
1964 	skb->csum = 0;
1965 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
1966 	TCP_SKB_CB(skb)->sacked = 0;
1967 	skb_shinfo(skb)->gso_segs = 1;
1968 	skb_shinfo(skb)->gso_size = 0;
1969 	skb_shinfo(skb)->gso_type = 0;
1970 
1971 	/* Send it off. */
1972 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
1973 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
1974 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1975 	if (tcp_transmit_skb(sk, skb, 0, priority))
1976 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
1977 }
1978 
1979 /* WARNING: This routine must only be called when we have already sent
1980  * a SYN packet that crossed the incoming SYN that caused this routine
1981  * to get called. If this assumption fails then the initial rcv_wnd
1982  * and rcv_wscale values will not be correct.
1983  */
1984 int tcp_send_synack(struct sock *sk)
1985 {
1986 	struct sk_buff* skb;
1987 
1988 	skb = skb_peek(&sk->sk_write_queue);
1989 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
1990 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
1991 		return -EFAULT;
1992 	}
1993 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
1994 		if (skb_cloned(skb)) {
1995 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
1996 			if (nskb == NULL)
1997 				return -ENOMEM;
1998 			__skb_unlink(skb, &sk->sk_write_queue);
1999 			skb_header_release(nskb);
2000 			__skb_queue_head(&sk->sk_write_queue, nskb);
2001 			sk_stream_free_skb(sk, skb);
2002 			sk_charge_skb(sk, nskb);
2003 			skb = nskb;
2004 		}
2005 
2006 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2007 		TCP_ECN_send_synack(tcp_sk(sk), skb);
2008 	}
2009 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2010 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2011 }
2012 
2013 /*
2014  * Prepare a SYN-ACK.
2015  */
2016 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2017 				 struct request_sock *req)
2018 {
2019 	struct inet_request_sock *ireq = inet_rsk(req);
2020 	struct tcp_sock *tp = tcp_sk(sk);
2021 	struct tcphdr *th;
2022 	int tcp_header_size;
2023 	struct sk_buff *skb;
2024 
2025 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2026 	if (skb == NULL)
2027 		return NULL;
2028 
2029 	/* Reserve space for headers. */
2030 	skb_reserve(skb, MAX_TCP_HEADER);
2031 
2032 	skb->dst = dst_clone(dst);
2033 
2034 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
2035 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
2036 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2037 			   /* SACK_PERM is in the place of NOP NOP of TS */
2038 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2039 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
2040 
2041 	memset(th, 0, sizeof(struct tcphdr));
2042 	th->syn = 1;
2043 	th->ack = 1;
2044 	TCP_ECN_make_synack(req, th);
2045 	th->source = inet_sk(sk)->sport;
2046 	th->dest = ireq->rmt_port;
2047 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2048 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2049 	TCP_SKB_CB(skb)->sacked = 0;
2050 	skb_shinfo(skb)->gso_segs = 1;
2051 	skb_shinfo(skb)->gso_size = 0;
2052 	skb_shinfo(skb)->gso_type = 0;
2053 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
2054 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2055 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2056 		__u8 rcv_wscale;
2057 		/* Set this up on the first call only */
2058 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2059 		/* tcp_full_space because it is guaranteed to be the first packet */
2060 		tcp_select_initial_window(tcp_full_space(sk),
2061 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2062 			&req->rcv_wnd,
2063 			&req->window_clamp,
2064 			ireq->wscale_ok,
2065 			&rcv_wscale);
2066 		ireq->rcv_wscale = rcv_wscale;
2067 	}
2068 
2069 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2070 	th->window = htons(req->rcv_wnd);
2071 
2072 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2073 	tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2074 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2075 			      TCP_SKB_CB(skb)->when,
2076 			      req->ts_recent);
2077 
2078 	skb->csum = 0;
2079 	th->doff = (tcp_header_size >> 2);
2080 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
2081 	return skb;
2082 }
2083 
2084 /*
2085  * Do all connect socket setups that can be done AF independent.
2086  */
2087 static void tcp_connect_init(struct sock *sk)
2088 {
2089 	struct dst_entry *dst = __sk_dst_get(sk);
2090 	struct tcp_sock *tp = tcp_sk(sk);
2091 	__u8 rcv_wscale;
2092 
2093 	/* We'll fix this up when we get a response from the other end.
2094 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2095 	 */
2096 	tp->tcp_header_len = sizeof(struct tcphdr) +
2097 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2098 
2099 	/* If user gave his TCP_MAXSEG, record it to clamp */
2100 	if (tp->rx_opt.user_mss)
2101 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2102 	tp->max_window = 0;
2103 	tcp_mtup_init(sk);
2104 	tcp_sync_mss(sk, dst_mtu(dst));
2105 
2106 	if (!tp->window_clamp)
2107 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2108 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2109 	tcp_initialize_rcv_mss(sk);
2110 
2111 	tcp_select_initial_window(tcp_full_space(sk),
2112 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2113 				  &tp->rcv_wnd,
2114 				  &tp->window_clamp,
2115 				  sysctl_tcp_window_scaling,
2116 				  &rcv_wscale);
2117 
2118 	tp->rx_opt.rcv_wscale = rcv_wscale;
2119 	tp->rcv_ssthresh = tp->rcv_wnd;
2120 
2121 	sk->sk_err = 0;
2122 	sock_reset_flag(sk, SOCK_DONE);
2123 	tp->snd_wnd = 0;
2124 	tcp_init_wl(tp, tp->write_seq, 0);
2125 	tp->snd_una = tp->write_seq;
2126 	tp->snd_sml = tp->write_seq;
2127 	tp->rcv_nxt = 0;
2128 	tp->rcv_wup = 0;
2129 	tp->copied_seq = 0;
2130 
2131 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2132 	inet_csk(sk)->icsk_retransmits = 0;
2133 	tcp_clear_retrans(tp);
2134 }
2135 
2136 /*
2137  * Build a SYN and send it off.
2138  */
2139 int tcp_connect(struct sock *sk)
2140 {
2141 	struct tcp_sock *tp = tcp_sk(sk);
2142 	struct sk_buff *buff;
2143 
2144 	tcp_connect_init(sk);
2145 
2146 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2147 	if (unlikely(buff == NULL))
2148 		return -ENOBUFS;
2149 
2150 	/* Reserve space for headers. */
2151 	skb_reserve(buff, MAX_TCP_HEADER);
2152 
2153 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2154 	TCP_ECN_send_syn(sk, tp, buff);
2155 	TCP_SKB_CB(buff)->sacked = 0;
2156 	skb_shinfo(buff)->gso_segs = 1;
2157 	skb_shinfo(buff)->gso_size = 0;
2158 	skb_shinfo(buff)->gso_type = 0;
2159 	buff->csum = 0;
2160 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
2161 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2162 	tp->snd_nxt = tp->write_seq;
2163 	tp->pushed_seq = tp->write_seq;
2164 
2165 	/* Send it off. */
2166 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2167 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2168 	skb_header_release(buff);
2169 	__skb_queue_tail(&sk->sk_write_queue, buff);
2170 	sk_charge_skb(sk, buff);
2171 	tp->packets_out += tcp_skb_pcount(buff);
2172 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2173 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2174 
2175 	/* Timer for repeating the SYN until an answer. */
2176 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2177 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2178 	return 0;
2179 }
2180 
2181 /* Send out a delayed ack, the caller does the policy checking
2182  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
2183  * for details.
2184  */
2185 void tcp_send_delayed_ack(struct sock *sk)
2186 {
2187 	struct inet_connection_sock *icsk = inet_csk(sk);
2188 	int ato = icsk->icsk_ack.ato;
2189 	unsigned long timeout;
2190 
2191 	if (ato > TCP_DELACK_MIN) {
2192 		const struct tcp_sock *tp = tcp_sk(sk);
2193 		int max_ato = HZ/2;
2194 
2195 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2196 			max_ato = TCP_DELACK_MAX;
2197 
2198 		/* Slow path, intersegment interval is "high". */
2199 
2200 		/* If some rtt estimate is known, use it to bound delayed ack.
2201 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2202 		 * directly.
2203 		 */
2204 		if (tp->srtt) {
2205 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
2206 
2207 			if (rtt < max_ato)
2208 				max_ato = rtt;
2209 		}
2210 
2211 		ato = min(ato, max_ato);
2212 	}
2213 
2214 	/* Stay within the limit we were given */
2215 	timeout = jiffies + ato;
2216 
2217 	/* Use new timeout only if there wasn't a older one earlier. */
2218 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2219 		/* If delack timer was blocked or is about to expire,
2220 		 * send ACK now.
2221 		 */
2222 		if (icsk->icsk_ack.blocked ||
2223 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2224 			tcp_send_ack(sk);
2225 			return;
2226 		}
2227 
2228 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2229 			timeout = icsk->icsk_ack.timeout;
2230 	}
2231 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2232 	icsk->icsk_ack.timeout = timeout;
2233 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2234 }
2235 
2236 /* This routine sends an ack and also updates the window. */
2237 void tcp_send_ack(struct sock *sk)
2238 {
2239 	/* If we have been reset, we may not send again. */
2240 	if (sk->sk_state != TCP_CLOSE) {
2241 		struct tcp_sock *tp = tcp_sk(sk);
2242 		struct sk_buff *buff;
2243 
2244 		/* We are not putting this on the write queue, so
2245 		 * tcp_transmit_skb() will set the ownership to this
2246 		 * sock.
2247 		 */
2248 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2249 		if (buff == NULL) {
2250 			inet_csk_schedule_ack(sk);
2251 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2252 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2253 						  TCP_DELACK_MAX, TCP_RTO_MAX);
2254 			return;
2255 		}
2256 
2257 		/* Reserve space for headers and prepare control bits. */
2258 		skb_reserve(buff, MAX_TCP_HEADER);
2259 		buff->csum = 0;
2260 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2261 		TCP_SKB_CB(buff)->sacked = 0;
2262 		skb_shinfo(buff)->gso_segs = 1;
2263 		skb_shinfo(buff)->gso_size = 0;
2264 		skb_shinfo(buff)->gso_type = 0;
2265 
2266 		/* Send it off, this clears delayed acks for us. */
2267 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2268 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2269 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2270 	}
2271 }
2272 
2273 /* This routine sends a packet with an out of date sequence
2274  * number. It assumes the other end will try to ack it.
2275  *
2276  * Question: what should we make while urgent mode?
2277  * 4.4BSD forces sending single byte of data. We cannot send
2278  * out of window data, because we have SND.NXT==SND.MAX...
2279  *
2280  * Current solution: to send TWO zero-length segments in urgent mode:
2281  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2282  * out-of-date with SND.UNA-1 to probe window.
2283  */
2284 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2285 {
2286 	struct tcp_sock *tp = tcp_sk(sk);
2287 	struct sk_buff *skb;
2288 
2289 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
2290 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2291 	if (skb == NULL)
2292 		return -1;
2293 
2294 	/* Reserve space for headers and set control bits. */
2295 	skb_reserve(skb, MAX_TCP_HEADER);
2296 	skb->csum = 0;
2297 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2298 	TCP_SKB_CB(skb)->sacked = urgent;
2299 	skb_shinfo(skb)->gso_segs = 1;
2300 	skb_shinfo(skb)->gso_size = 0;
2301 	skb_shinfo(skb)->gso_type = 0;
2302 
2303 	/* Use a previous sequence.  This should cause the other
2304 	 * end to send an ack.  Don't queue or clone SKB, just
2305 	 * send it.
2306 	 */
2307 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
2308 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2309 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2310 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2311 }
2312 
2313 int tcp_write_wakeup(struct sock *sk)
2314 {
2315 	if (sk->sk_state != TCP_CLOSE) {
2316 		struct tcp_sock *tp = tcp_sk(sk);
2317 		struct sk_buff *skb;
2318 
2319 		if ((skb = sk->sk_send_head) != NULL &&
2320 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
2321 			int err;
2322 			unsigned int mss = tcp_current_mss(sk, 0);
2323 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
2324 
2325 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2326 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2327 
2328 			/* We are probing the opening of a window
2329 			 * but the window size is != 0
2330 			 * must have been a result SWS avoidance ( sender )
2331 			 */
2332 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2333 			    skb->len > mss) {
2334 				seg_size = min(seg_size, mss);
2335 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2336 				if (tcp_fragment(sk, skb, seg_size, mss))
2337 					return -1;
2338 			} else if (!tcp_skb_pcount(skb))
2339 				tcp_set_skb_tso_segs(sk, skb, mss);
2340 
2341 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2342 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2343 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2344 			if (!err) {
2345 				update_send_head(sk, tp, skb);
2346 			}
2347 			return err;
2348 		} else {
2349 			if (tp->urg_mode &&
2350 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
2351 				tcp_xmit_probe_skb(sk, TCPCB_URG);
2352 			return tcp_xmit_probe_skb(sk, 0);
2353 		}
2354 	}
2355 	return -1;
2356 }
2357 
2358 /* A window probe timeout has occurred.  If window is not closed send
2359  * a partial packet else a zero probe.
2360  */
2361 void tcp_send_probe0(struct sock *sk)
2362 {
2363 	struct inet_connection_sock *icsk = inet_csk(sk);
2364 	struct tcp_sock *tp = tcp_sk(sk);
2365 	int err;
2366 
2367 	err = tcp_write_wakeup(sk);
2368 
2369 	if (tp->packets_out || !sk->sk_send_head) {
2370 		/* Cancel probe timer, if it is not required. */
2371 		icsk->icsk_probes_out = 0;
2372 		icsk->icsk_backoff = 0;
2373 		return;
2374 	}
2375 
2376 	if (err <= 0) {
2377 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2378 			icsk->icsk_backoff++;
2379 		icsk->icsk_probes_out++;
2380 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2381 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2382 					  TCP_RTO_MAX);
2383 	} else {
2384 		/* If packet was not sent due to local congestion,
2385 		 * do not backoff and do not remember icsk_probes_out.
2386 		 * Let local senders to fight for local resources.
2387 		 *
2388 		 * Use accumulated backoff yet.
2389 		 */
2390 		if (!icsk->icsk_probes_out)
2391 			icsk->icsk_probes_out = 1;
2392 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2393 					  min(icsk->icsk_rto << icsk->icsk_backoff,
2394 					      TCP_RESOURCE_PROBE_INTERVAL),
2395 					  TCP_RTO_MAX);
2396 	}
2397 }
2398 
2399 EXPORT_SYMBOL(tcp_connect);
2400 EXPORT_SYMBOL(tcp_make_synack);
2401 EXPORT_SYMBOL(tcp_simple_retransmit);
2402 EXPORT_SYMBOL(tcp_sync_mss);
2403 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2404 EXPORT_SYMBOL(tcp_mtup_init);
2405