xref: /linux/net/ipv4/tcp_output.c (revision 3ab224be6d69de912ee21302745ea45a99274dbc)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
251da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
261da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
271da177e4SLinus Torvalds  *				:	AF independence
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
301da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
311da177e4SLinus Torvalds  *					during syn/ack processing.
321da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
331da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
341da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
351da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
421da177e4SLinus Torvalds #include <linux/module.h>
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
45ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
461da177e4SLinus Torvalds 
4715d99e02SRick Jones /* People can turn this on to  work with those rare, broken TCPs that
4815d99e02SRick Jones  * interpret the window field as a signed quantity.
4915d99e02SRick Jones  */
50ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5115d99e02SRick Jones 
521da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
531da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
541da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
551da177e4SLinus Torvalds  */
56ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
571da177e4SLinus Torvalds 
58ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
59ab32ea5dSBrian Haley int sysctl_tcp_base_mss __read_mostly = 512;
605d424d5aSJohn Heffner 
6135089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
62ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6335089bb2SDavid S. Miller 
646ff03ac3SIlpo Järvinen static inline void tcp_packets_out_inc(struct sock *sk,
656ff03ac3SIlpo Järvinen 				       const struct sk_buff *skb)
666ff03ac3SIlpo Järvinen {
676ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
686ff03ac3SIlpo Järvinen 	int orig = tp->packets_out;
696ff03ac3SIlpo Järvinen 
706ff03ac3SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
716ff03ac3SIlpo Järvinen 	if (!orig)
726ff03ac3SIlpo Järvinen 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
736ff03ac3SIlpo Järvinen 					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
746ff03ac3SIlpo Järvinen }
756ff03ac3SIlpo Järvinen 
769e412ba7SIlpo Järvinen static void update_send_head(struct sock *sk, struct sk_buff *skb)
771da177e4SLinus Torvalds {
789e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
799e412ba7SIlpo Järvinen 
80fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
811da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
829e412ba7SIlpo Järvinen 	tcp_packets_out_inc(sk, skb);
838512430eSIlpo Järvinen 
848512430eSIlpo Järvinen 	/* Don't override Nagle indefinately with F-RTO */
858512430eSIlpo Järvinen 	if (tp->frto_counter == 2)
868512430eSIlpo Järvinen 		tp->frto_counter = 3;
871da177e4SLinus Torvalds }
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
901da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
911da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
921da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
931da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
941da177e4SLinus Torvalds  */
959e412ba7SIlpo Järvinen static inline __u32 tcp_acceptable_seq(struct sock *sk)
961da177e4SLinus Torvalds {
979e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
989e412ba7SIlpo Järvinen 
991da177e4SLinus Torvalds 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
1001da177e4SLinus Torvalds 		return tp->snd_nxt;
1011da177e4SLinus Torvalds 	else
1021da177e4SLinus Torvalds 		return tp->snd_una+tp->snd_wnd;
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1061da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1071da177e4SLinus Torvalds  *
1081da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1091da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1101da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1111da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1121da177e4SLinus Torvalds  *    large MSS.
1131da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1141da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1151da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1161da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1171da177e4SLinus Torvalds  *    probably even Jumbo".
1181da177e4SLinus Torvalds  */
1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1221da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1231da177e4SLinus Torvalds 	int mss = tp->advmss;
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
1261da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1271da177e4SLinus Torvalds 		tp->advmss = mss;
1281da177e4SLinus Torvalds 	}
1291da177e4SLinus Torvalds 
1301da177e4SLinus Torvalds 	return (__u16)mss;
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1341da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
135463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1361da177e4SLinus Torvalds {
137463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1381da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1391da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1401da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1411da177e4SLinus Torvalds 
1426687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1431da177e4SLinus Torvalds 
1446687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1451da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1461da177e4SLinus Torvalds 
147463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1481da177e4SLinus Torvalds 		cwnd >>= 1;
1491da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1501da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1511da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1521da177e4SLinus Torvalds }
1531da177e4SLinus Torvalds 
15440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
1551da177e4SLinus Torvalds 				struct sk_buff *skb, struct sock *sk)
1561da177e4SLinus Torvalds {
157463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
158463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1591da177e4SLinus Torvalds 
16035089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
16135089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
162463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds 	tp->lsndtime = now;
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1671da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1681da177e4SLinus Torvalds 	 */
169463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
170463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1711da177e4SLinus Torvalds }
1721da177e4SLinus Torvalds 
17340efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1741da177e4SLinus Torvalds {
175463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
176463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1801da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1811da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1821da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1831da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1841da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1851da177e4SLinus Torvalds  */
1861da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1871da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
1881da177e4SLinus Torvalds 			       int wscale_ok, __u8 *rcv_wscale)
1891da177e4SLinus Torvalds {
1901da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1931da177e4SLinus Torvalds 	if (*window_clamp == 0)
1941da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1951da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1981da177e4SLinus Torvalds 	if (space > mss)
1991da177e4SLinus Torvalds 		space = (space / mss) * mss;
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
20215d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
20315d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
20415d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
20515d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
20615d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
20715d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2081da177e4SLinus Torvalds 	 */
20915d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2101da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
21115d99e02SRick Jones 	else
21215d99e02SRick Jones 		(*rcv_wnd) = space;
21315d99e02SRick Jones 
2141da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2151da177e4SLinus Torvalds 	if (wscale_ok) {
2161da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2171da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2181da177e4SLinus Torvalds 		 */
2191da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
220316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2211da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2221da177e4SLinus Torvalds 			space >>= 1;
2231da177e4SLinus Torvalds 			(*rcv_wscale)++;
2241da177e4SLinus Torvalds 		}
2251da177e4SLinus Torvalds 	}
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
2286b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
2291da177e4SLinus Torvalds 	 * will be satisfied with 2.
2301da177e4SLinus Torvalds 	 */
2311da177e4SLinus Torvalds 	if (mss > (1<<*rcv_wscale)) {
23201ff367eSDavid S. Miller 		int init_cwnd = 4;
23301ff367eSDavid S. Miller 		if (mss > 1460*3)
2341da177e4SLinus Torvalds 			init_cwnd = 2;
23501ff367eSDavid S. Miller 		else if (mss > 1460)
23601ff367eSDavid S. Miller 			init_cwnd = 3;
2371da177e4SLinus Torvalds 		if (*rcv_wnd > init_cwnd*mss)
2381da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd*mss;
2391da177e4SLinus Torvalds 	}
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2421da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2461da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2471da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2481da177e4SLinus Torvalds  * frame.
2491da177e4SLinus Torvalds  */
25040efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2511da177e4SLinus Torvalds {
2521da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2531da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2541da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	/* Never shrink the offered window */
2571da177e4SLinus Torvalds 	if (new_win < cur_win) {
2581da177e4SLinus Torvalds 		/* Danger Will Robinson!
2591da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2601da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2611da177e4SLinus Torvalds 		 * window in time.  --DaveM
2621da177e4SLinus Torvalds 		 *
2631da177e4SLinus Torvalds 		 * Relax Will Robinson.
2641da177e4SLinus Torvalds 		 */
2651da177e4SLinus Torvalds 		new_win = cur_win;
2661da177e4SLinus Torvalds 	}
2671da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2681da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2711da177e4SLinus Torvalds 	 * scaled window.
2721da177e4SLinus Torvalds 	 */
27315d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2741da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2751da177e4SLinus Torvalds 	else
2761da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2791da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2821da177e4SLinus Torvalds 	if (new_win == 0)
2831da177e4SLinus Torvalds 		tp->pred_flags = 0;
2841da177e4SLinus Torvalds 
2851da177e4SLinus Torvalds 	return new_win;
2861da177e4SLinus Torvalds }
2871da177e4SLinus Torvalds 
288bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
289bdf1ee5dSIlpo Järvinen 				       struct sk_buff *skb)
290bdf1ee5dSIlpo Järvinen {
291bdf1ee5dSIlpo Järvinen 	TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
292bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags&TCP_ECN_OK))
293bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
294bdf1ee5dSIlpo Järvinen }
295bdf1ee5dSIlpo Järvinen 
296bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
297bdf1ee5dSIlpo Järvinen {
298bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
299bdf1ee5dSIlpo Järvinen 
300bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
301bdf1ee5dSIlpo Järvinen 	if (sysctl_tcp_ecn) {
302bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
303bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
304bdf1ee5dSIlpo Järvinen 	}
305bdf1ee5dSIlpo Järvinen }
306bdf1ee5dSIlpo Järvinen 
307bdf1ee5dSIlpo Järvinen static __inline__ void
308bdf1ee5dSIlpo Järvinen TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
309bdf1ee5dSIlpo Järvinen {
310bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
311bdf1ee5dSIlpo Järvinen 		th->ece = 1;
312bdf1ee5dSIlpo Järvinen }
313bdf1ee5dSIlpo Järvinen 
314bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
315bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
316bdf1ee5dSIlpo Järvinen {
317bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
318bdf1ee5dSIlpo Järvinen 
319bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
320bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
321bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
322bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
323bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
324bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
325bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
326bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
327bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
328bdf1ee5dSIlpo Järvinen 			}
329bdf1ee5dSIlpo Järvinen 		} else {
330bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
331bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
332bdf1ee5dSIlpo Järvinen 		}
333bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
334bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
335bdf1ee5dSIlpo Järvinen 	}
336bdf1ee5dSIlpo Järvinen }
337bdf1ee5dSIlpo Järvinen 
338df7a3b07SAl Viro static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
339cfb6eeb4SYOSHIFUJI Hideaki 					 __u32 tstamp, __u8 **md5_hash)
34040efc6faSStephen Hemminger {
34140efc6faSStephen Hemminger 	if (tp->rx_opt.tstamp_ok) {
342496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
34340efc6faSStephen Hemminger 			       (TCPOPT_NOP << 16) |
34440efc6faSStephen Hemminger 			       (TCPOPT_TIMESTAMP << 8) |
34540efc6faSStephen Hemminger 			       TCPOLEN_TIMESTAMP);
34640efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);
34740efc6faSStephen Hemminger 		*ptr++ = htonl(tp->rx_opt.ts_recent);
34840efc6faSStephen Hemminger 	}
34940efc6faSStephen Hemminger 	if (tp->rx_opt.eff_sacks) {
35040efc6faSStephen Hemminger 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
35140efc6faSStephen Hemminger 		int this_sack;
35240efc6faSStephen Hemminger 
35340efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
35440efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
35540efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
35640efc6faSStephen Hemminger 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
35740efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
3582de979bdSStephen Hemminger 
35940efc6faSStephen Hemminger 		for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
36040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
36140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
36240efc6faSStephen Hemminger 		}
3632de979bdSStephen Hemminger 
36440efc6faSStephen Hemminger 		if (tp->rx_opt.dsack) {
36540efc6faSStephen Hemminger 			tp->rx_opt.dsack = 0;
36640efc6faSStephen Hemminger 			tp->rx_opt.eff_sacks--;
36740efc6faSStephen Hemminger 		}
36840efc6faSStephen Hemminger 	}
369cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
370cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
371cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
372cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
373cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
374cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
375cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *)ptr;
376cfb6eeb4SYOSHIFUJI Hideaki 	}
377cfb6eeb4SYOSHIFUJI Hideaki #endif
37840efc6faSStephen Hemminger }
37940efc6faSStephen Hemminger 
38040efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet.
38140efc6faSStephen Hemminger  * If this is every changed make sure to change the definition of
38240efc6faSStephen Hemminger  * MAX_SYN_SIZE to match the new maximum number of options that you
38340efc6faSStephen Hemminger  * can generate.
384cfb6eeb4SYOSHIFUJI Hideaki  *
385cfb6eeb4SYOSHIFUJI Hideaki  * Note - that with the RFC2385 TCP option, we make room for the
386cfb6eeb4SYOSHIFUJI Hideaki  * 16 byte MD5 hash. This will be filled in later, so the pointer for the
387cfb6eeb4SYOSHIFUJI Hideaki  * location to be filled is passed back up.
38840efc6faSStephen Hemminger  */
389df7a3b07SAl Viro static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
39040efc6faSStephen Hemminger 				  int offer_wscale, int wscale, __u32 tstamp,
391cfb6eeb4SYOSHIFUJI Hideaki 				  __u32 ts_recent, __u8 **md5_hash)
39240efc6faSStephen Hemminger {
39340efc6faSStephen Hemminger 	/* We always get an MSS option.
39440efc6faSStephen Hemminger 	 * The option bytes which will be seen in normal data
39540efc6faSStephen Hemminger 	 * packets should timestamps be used, must be in the MSS
39640efc6faSStephen Hemminger 	 * advertised.  But we subtract them from tp->mss_cache so
39740efc6faSStephen Hemminger 	 * that calculations in tcp_sendmsg are simpler etc.
39840efc6faSStephen Hemminger 	 * So account for this fact here if necessary.  If we
39940efc6faSStephen Hemminger 	 * don't do this correctly, as a receiver we won't
40040efc6faSStephen Hemminger 	 * recognize data packets as being full sized when we
40140efc6faSStephen Hemminger 	 * should, and thus we won't abide by the delayed ACK
40240efc6faSStephen Hemminger 	 * rules correctly.
40340efc6faSStephen Hemminger 	 * SACKs don't matter, we never delay an ACK when we
40440efc6faSStephen Hemminger 	 * have any of those going out.
40540efc6faSStephen Hemminger 	 */
40640efc6faSStephen Hemminger 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
40740efc6faSStephen Hemminger 	if (ts) {
40840efc6faSStephen Hemminger 		if (sack)
409496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
410496c98dfSYOSHIFUJI Hideaki 				       (TCPOLEN_SACK_PERM << 16) |
411496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
412496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
41340efc6faSStephen Hemminger 		else
414496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
415496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_NOP << 16) |
416496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
417496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
41840efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);		/* TSVAL */
41940efc6faSStephen Hemminger 		*ptr++ = htonl(ts_recent);	/* TSECR */
42040efc6faSStephen Hemminger 	} else if (sack)
421496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
422496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
423496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_SACK_PERM << 8) |
424496c98dfSYOSHIFUJI Hideaki 			       TCPOLEN_SACK_PERM);
42540efc6faSStephen Hemminger 	if (offer_wscale)
426496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
427496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_WINDOW << 16) |
428496c98dfSYOSHIFUJI Hideaki 			       (TCPOLEN_WINDOW << 8) |
429496c98dfSYOSHIFUJI Hideaki 			       (wscale));
430cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
431cfb6eeb4SYOSHIFUJI Hideaki 	/*
432cfb6eeb4SYOSHIFUJI Hideaki 	 * If MD5 is enabled, then we set the option, and include the size
433cfb6eeb4SYOSHIFUJI Hideaki 	 * (always 18). The actual MD5 hash is added just before the
434cfb6eeb4SYOSHIFUJI Hideaki 	 * packet is sent.
435cfb6eeb4SYOSHIFUJI Hideaki 	 */
436cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
437cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
438cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
439cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
440cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
441cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *) ptr;
442cfb6eeb4SYOSHIFUJI Hideaki 	}
443cfb6eeb4SYOSHIFUJI Hideaki #endif
44440efc6faSStephen Hemminger }
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
4471da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
4481da177e4SLinus Torvalds  * transmission and possible later retransmissions.
4491da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
4501da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
4511da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
4521da177e4SLinus Torvalds  * device.
4531da177e4SLinus Torvalds  *
4541da177e4SLinus Torvalds  * We are working here with either a clone of the original
4551da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
4561da177e4SLinus Torvalds  */
457dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
4581da177e4SLinus Torvalds {
4596687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
460dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
461dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
462dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
463dfb4b9dcSDavid S. Miller 	int tcp_header_size;
464cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
465cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
466cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
467cfb6eeb4SYOSHIFUJI Hideaki #endif
4681da177e4SLinus Torvalds 	struct tcphdr *th;
4691da177e4SLinus Torvalds 	int sysctl_flags;
4701da177e4SLinus Torvalds 	int err;
4711da177e4SLinus Torvalds 
472dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
473dfb4b9dcSDavid S. Miller 
474dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
475dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
476dfb4b9dcSDavid S. Miller 	 */
477164891aaSStephen Hemminger 	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
478dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
479dfb4b9dcSDavid S. Miller 
480dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
481dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
482dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
483dfb4b9dcSDavid S. Miller 		else
484dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
485dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
486dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
487dfb4b9dcSDavid S. Miller 	}
488dfb4b9dcSDavid S. Miller 
489dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
490dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
491dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
492dfb4b9dcSDavid S. Miller 	tcp_header_size = tp->tcp_header_len;
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS	0x1
4951da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE	0x2
4961da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK	0x4
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds 	sysctl_flags = 0;
499dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
5001da177e4SLinus Torvalds 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
5011da177e4SLinus Torvalds 		if (sysctl_tcp_timestamps) {
5021da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
5031da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
5041da177e4SLinus Torvalds 		}
5051da177e4SLinus Torvalds 		if (sysctl_tcp_window_scaling) {
5061da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
5071da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
5081da177e4SLinus Torvalds 		}
5091da177e4SLinus Torvalds 		if (sysctl_tcp_sack) {
5101da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_SACK;
5111da177e4SLinus Torvalds 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
5121da177e4SLinus Torvalds 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
5131da177e4SLinus Torvalds 		}
514dfb4b9dcSDavid S. Miller 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
5151da177e4SLinus Torvalds 		/* A SACK is 2 pad bytes, a 2 byte header, plus
5161da177e4SLinus Torvalds 		 * 2 32-bit sequence numbers for each SACK block.
5171da177e4SLinus Torvalds 		 */
5181da177e4SLinus Torvalds 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
519dfb4b9dcSDavid S. Miller 				    (tp->rx_opt.eff_sacks *
520dfb4b9dcSDavid S. Miller 				     TCPOLEN_SACK_PERBLOCK));
5211da177e4SLinus Torvalds 	}
5221da177e4SLinus Torvalds 
523317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
5246687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
5251da177e4SLinus Torvalds 
526cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
527cfb6eeb4SYOSHIFUJI Hideaki 	/*
528cfb6eeb4SYOSHIFUJI Hideaki 	 * Are we doing MD5 on this segment? If so - make
529cfb6eeb4SYOSHIFUJI Hideaki 	 * room for it.
530cfb6eeb4SYOSHIFUJI Hideaki 	 */
531cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tp->af_specific->md5_lookup(sk, sk);
532cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
533cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
534cfb6eeb4SYOSHIFUJI Hideaki #endif
535cfb6eeb4SYOSHIFUJI Hideaki 
536aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
537aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
538e89862f4SDavid S. Miller 	skb_set_owner_w(skb, sk);
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
541aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
5421da177e4SLinus Torvalds 	th->source		= inet->sport;
5431da177e4SLinus Torvalds 	th->dest		= inet->dport;
5441da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
5451da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
546df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
547dfb4b9dcSDavid S. Miller 					tcb->flags);
548dfb4b9dcSDavid S. Miller 
549dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
5501da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
5511da177e4SLinus Torvalds 		 * is never scaled.
5521da177e4SLinus Torvalds 		 */
553600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
5541da177e4SLinus Torvalds 	} else {
5551da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
5561da177e4SLinus Torvalds 	}
5571da177e4SLinus Torvalds 	th->check		= 0;
5581da177e4SLinus Torvalds 	th->urg_ptr		= 0;
5591da177e4SLinus Torvalds 
560dfb4b9dcSDavid S. Miller 	if (unlikely(tp->urg_mode &&
561dfb4b9dcSDavid S. Miller 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
5621da177e4SLinus Torvalds 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
5631da177e4SLinus Torvalds 		th->urg			= 1;
5641da177e4SLinus Torvalds 	}
5651da177e4SLinus Torvalds 
566dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
567df7a3b07SAl Viro 		tcp_syn_build_options((__be32 *)(th + 1),
5681da177e4SLinus Torvalds 				      tcp_advertise_mss(sk),
5691da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
5701da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_SACK),
5711da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
5721da177e4SLinus Torvalds 				      tp->rx_opt.rcv_wscale,
5731da177e4SLinus Torvalds 				      tcb->when,
574cfb6eeb4SYOSHIFUJI Hideaki 				      tp->rx_opt.ts_recent,
575cfb6eeb4SYOSHIFUJI Hideaki 
576cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
577cfb6eeb4SYOSHIFUJI Hideaki 				      md5 ? &md5_hash_location :
578cfb6eeb4SYOSHIFUJI Hideaki #endif
579cfb6eeb4SYOSHIFUJI Hideaki 				      NULL);
5801da177e4SLinus Torvalds 	} else {
581df7a3b07SAl Viro 		tcp_build_and_update_options((__be32 *)(th + 1),
582cfb6eeb4SYOSHIFUJI Hideaki 					     tp, tcb->when,
583cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
584cfb6eeb4SYOSHIFUJI Hideaki 					     md5 ? &md5_hash_location :
585cfb6eeb4SYOSHIFUJI Hideaki #endif
586cfb6eeb4SYOSHIFUJI Hideaki 					     NULL);
5879e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
5881da177e4SLinus Torvalds 	}
589dfb4b9dcSDavid S. Miller 
590cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
591cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
592cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
593cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
594cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
595cfb6eeb4SYOSHIFUJI Hideaki 					       sk, NULL, NULL,
596aa8223c7SArnaldo Carvalho de Melo 					       tcp_hdr(skb),
597cfb6eeb4SYOSHIFUJI Hideaki 					       sk->sk_protocol,
598cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
599cfb6eeb4SYOSHIFUJI Hideaki 	}
600cfb6eeb4SYOSHIFUJI Hideaki #endif
601cfb6eeb4SYOSHIFUJI Hideaki 
6028292a17aSArnaldo Carvalho de Melo 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
6031da177e4SLinus Torvalds 
604dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
605fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
6081da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
6091da177e4SLinus Torvalds 
610bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
6111da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_OUTSEGS);
6121da177e4SLinus Torvalds 
613e89862f4SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
61483de47cdSHua Zhong 	if (likely(err <= 0))
6151da177e4SLinus Torvalds 		return err;
6161da177e4SLinus Torvalds 
6173cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
6181da177e4SLinus Torvalds 
619b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
620dfb4b9dcSDavid S. Miller 
6211da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS
6221da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE
6231da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK
6241da177e4SLinus Torvalds }
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds 
6271da177e4SLinus Torvalds /* This routine just queue's the buffer
6281da177e4SLinus Torvalds  *
6291da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
6301da177e4SLinus Torvalds  * otherwise socket can stall.
6311da177e4SLinus Torvalds  */
6321da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
6331da177e4SLinus Torvalds {
6341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
6371da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
6381da177e4SLinus Torvalds 	skb_header_release(skb);
639fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
640*3ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
641*3ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
6421da177e4SLinus Torvalds }
6431da177e4SLinus Torvalds 
644846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
645f6302d1dSDavid S. Miller {
646bcd76111SHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk)) {
647f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
648f6302d1dSDavid S. Miller 		 * non-TSO case.
649f6302d1dSDavid S. Miller 		 */
6507967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
6517967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
6527967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
653f6302d1dSDavid S. Miller 	} else {
654356f89e1SIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
6557967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
656bcd76111SHerbert Xu 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
6571da177e4SLinus Torvalds 	}
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds 
66091fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
66168f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
66291fed7a1SIlpo Järvinen  */
663a47e5a98SIlpo Järvinen static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
66491fed7a1SIlpo Järvinen 				   int decr)
66591fed7a1SIlpo Järvinen {
666a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
667a47e5a98SIlpo Järvinen 
668dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
66991fed7a1SIlpo Järvinen 		return;
67091fed7a1SIlpo Järvinen 
6716859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
67291fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
67391fed7a1SIlpo Järvinen }
67491fed7a1SIlpo Järvinen 
6751da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
6761da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
6771da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
6781da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
6791da177e4SLinus Torvalds  */
6806475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
6811da177e4SLinus Torvalds {
6821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6831da177e4SLinus Torvalds 	struct sk_buff *buff;
6846475be16SDavid S. Miller 	int nsize, old_factor;
685b60b49eaSHerbert Xu 	int nlen;
6861da177e4SLinus Torvalds 	u16 flags;
6871da177e4SLinus Torvalds 
688b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
6896a438bbeSStephen Hemminger 
690b7689205SIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
6911da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
6921da177e4SLinus Torvalds 	if (nsize < 0)
6931da177e4SLinus Torvalds 		nsize = 0;
6941da177e4SLinus Torvalds 
6951da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
6961da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
6971da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6981da177e4SLinus Torvalds 		return -ENOMEM;
6991da177e4SLinus Torvalds 
7001da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
7011da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
7021da177e4SLinus Torvalds 	if (buff == NULL)
7031da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
704ef5cb973SHerbert Xu 
705*3ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
706*3ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
707b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
708b60b49eaSHerbert Xu 	buff->truesize += nlen;
709b60b49eaSHerbert Xu 	skb->truesize -= nlen;
7101da177e4SLinus Torvalds 
7111da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
7121da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
7131da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
7141da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
7151da177e4SLinus Torvalds 
7161da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
7171da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
7181da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
7191da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
720e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
7211da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
7221da177e4SLinus Torvalds 
72384fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
7241da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
7251da177e4SLinus Torvalds 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
7261da177e4SLinus Torvalds 						       nsize, 0);
7271da177e4SLinus Torvalds 
7281da177e4SLinus Torvalds 		skb_trim(skb, len);
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
7311da177e4SLinus Torvalds 	} else {
73284fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
7331da177e4SLinus Torvalds 		skb_split(skb, buff, len);
7341da177e4SLinus Torvalds 	}
7351da177e4SLinus Torvalds 
7361da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
7391da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
7401da177e4SLinus Torvalds 	 */
7411da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
742a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
7431da177e4SLinus Torvalds 
7446475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
7456475be16SDavid S. Miller 
7461da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
747846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
748846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
7491da177e4SLinus Torvalds 
7506475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
7516475be16SDavid S. Miller 	 * adjust the various packet counters.
7526475be16SDavid S. Miller 	 */
753cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
7546475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
7556475be16SDavid S. Miller 			tcp_skb_pcount(buff);
7561da177e4SLinus Torvalds 
7576475be16SDavid S. Miller 		tp->packets_out -= diff;
758e14c3cafSHerbert Xu 
759e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
760e14c3cafSHerbert Xu 			tp->sacked_out -= diff;
761e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
762e14c3cafSHerbert Xu 			tp->retrans_out -= diff;
763e14c3cafSHerbert Xu 
764b5860bbaSIlpo Järvinen 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
7656475be16SDavid S. Miller 			tp->lost_out -= diff;
76683ca28beSHerbert Xu 
76783ca28beSHerbert Xu 		/* Adjust Reno SACK estimate. */
76891fed7a1SIlpo Järvinen 		if (tcp_is_reno(tp) && diff > 0) {
769af610b4cSIlpo Järvinen 			tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
770005903bcSIlpo Järvinen 			tcp_verify_left_out(tp);
77183ca28beSHerbert Xu 		}
772a47e5a98SIlpo Järvinen 		tcp_adjust_fackets_out(sk, skb, diff);
7731da177e4SLinus Torvalds 	}
7741da177e4SLinus Torvalds 
7751da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
776f44b5271SDavid S. Miller 	skb_header_release(buff);
777fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
7781da177e4SLinus Torvalds 
7791da177e4SLinus Torvalds 	return 0;
7801da177e4SLinus Torvalds }
7811da177e4SLinus Torvalds 
7821da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
7831da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
7841da177e4SLinus Torvalds  * immediately discarded.
7851da177e4SLinus Torvalds  */
786f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
7871da177e4SLinus Torvalds {
7881da177e4SLinus Torvalds 	int i, k, eat;
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds 	eat = len;
7911da177e4SLinus Torvalds 	k = 0;
7921da177e4SLinus Torvalds 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
7931da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
7941da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
7951da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
7961da177e4SLinus Torvalds 		} else {
7971da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
7981da177e4SLinus Torvalds 			if (eat) {
7991da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
8001da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
8011da177e4SLinus Torvalds 				eat = 0;
8021da177e4SLinus Torvalds 			}
8031da177e4SLinus Torvalds 			k++;
8041da177e4SLinus Torvalds 		}
8051da177e4SLinus Torvalds 	}
8061da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
8071da177e4SLinus Torvalds 
80827a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
8091da177e4SLinus Torvalds 	skb->data_len -= len;
8101da177e4SLinus Torvalds 	skb->len = skb->data_len;
8111da177e4SLinus Torvalds }
8121da177e4SLinus Torvalds 
8131da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
8141da177e4SLinus Torvalds {
8151da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
8161da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
8171da177e4SLinus Torvalds 		return -ENOMEM;
8181da177e4SLinus Torvalds 
819f2911969SHerbert Xu ~{PmVHI~} 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
820f2911969SHerbert Xu ~{PmVHI~} 	if (unlikely(len < skb_headlen(skb)))
8211da177e4SLinus Torvalds 		__skb_pull(skb, len);
822f2911969SHerbert Xu ~{PmVHI~} 	else
823f2911969SHerbert Xu ~{PmVHI~} 		__pskb_trim_head(skb, len - skb_headlen(skb));
8241da177e4SLinus Torvalds 
8251da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
82684fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
8271da177e4SLinus Torvalds 
8281da177e4SLinus Torvalds 	skb->truesize	     -= len;
8291da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
830*3ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
8311da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
8341da177e4SLinus Torvalds 	 * factor and mss.
8351da177e4SLinus Torvalds 	 */
8361da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
837846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds 	return 0;
8401da177e4SLinus Torvalds }
8411da177e4SLinus Torvalds 
8425d424d5aSJohn Heffner /* Not accounting for SACKs here. */
8435d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu)
8445d424d5aSJohn Heffner {
8455d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8465d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8475d424d5aSJohn Heffner 	int mss_now;
8485d424d5aSJohn Heffner 
8495d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
8505d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
8515d424d5aSJohn Heffner 	 */
8525d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
8535d424d5aSJohn Heffner 
8545d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
8555d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
8565d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
8575d424d5aSJohn Heffner 
8585d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
8595d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
8605d424d5aSJohn Heffner 
8615d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
8625d424d5aSJohn Heffner 	if (mss_now < 48)
8635d424d5aSJohn Heffner 		mss_now = 48;
8645d424d5aSJohn Heffner 
8655d424d5aSJohn Heffner 	/* Now subtract TCP options size, not including SACKs */
8665d424d5aSJohn Heffner 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
8675d424d5aSJohn Heffner 
8685d424d5aSJohn Heffner 	return mss_now;
8695d424d5aSJohn Heffner }
8705d424d5aSJohn Heffner 
8715d424d5aSJohn Heffner /* Inverse of above */
8725d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss)
8735d424d5aSJohn Heffner {
8745d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8755d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8765d424d5aSJohn Heffner 	int mtu;
8775d424d5aSJohn Heffner 
8785d424d5aSJohn Heffner 	mtu = mss +
8795d424d5aSJohn Heffner 	      tp->tcp_header_len +
8805d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
8815d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
8825d424d5aSJohn Heffner 
8835d424d5aSJohn Heffner 	return mtu;
8845d424d5aSJohn Heffner }
8855d424d5aSJohn Heffner 
8865d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
8875d424d5aSJohn Heffner {
8885d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8895d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8905d424d5aSJohn Heffner 
8915d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
8925d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
8935d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
8945d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
8955d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
8965d424d5aSJohn Heffner }
8975d424d5aSJohn Heffner 
8981da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
9011da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
9021da177e4SLinus Torvalds 
9031da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
904caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
9051da177e4SLinus Torvalds    It also does not include TCP options.
9061da177e4SLinus Torvalds 
907d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
9101da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
9111da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
9121da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
9151da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
9161da177e4SLinus Torvalds 
917d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
918d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
9191da177e4SLinus Torvalds  */
9201da177e4SLinus Torvalds 
9211da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
9221da177e4SLinus Torvalds {
9231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
924d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
9255d424d5aSJohn Heffner 	int mss_now;
9261da177e4SLinus Torvalds 
9275d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
9285d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
9291da177e4SLinus Torvalds 
9305d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds 	/* Bound mss with half of window */
9331da177e4SLinus Torvalds 	if (tp->max_window && mss_now > (tp->max_window>>1))
9341da177e4SLinus Torvalds 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
9351da177e4SLinus Torvalds 
9361da177e4SLinus Torvalds 	/* And store cached results */
937d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
9385d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
9395d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
940c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
9411da177e4SLinus Torvalds 
9421da177e4SLinus Torvalds 	return mss_now;
9431da177e4SLinus Torvalds }
9441da177e4SLinus Torvalds 
9451da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
9461da177e4SLinus Torvalds  * and even PMTU discovery events into account.
9471da177e4SLinus Torvalds  *
9481da177e4SLinus Torvalds  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
9491da177e4SLinus Torvalds  * cannot be large. However, taking into account rare use of URG, this
9501da177e4SLinus Torvalds  * is not a big flaw.
9511da177e4SLinus Torvalds  */
952c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
9531da177e4SLinus Torvalds {
9541da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9551da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
956c1b4a7e6SDavid S. Miller 	u32 mss_now;
957c1b4a7e6SDavid S. Miller 	u16 xmit_size_goal;
958c1b4a7e6SDavid S. Miller 	int doing_tso = 0;
9591da177e4SLinus Torvalds 
960c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
961c1b4a7e6SDavid S. Miller 
962bcd76111SHerbert Xu 	if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
963c1b4a7e6SDavid S. Miller 		doing_tso = 1;
964c1b4a7e6SDavid S. Miller 
9651da177e4SLinus Torvalds 	if (dst) {
9661da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
967d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
9681da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
9691da177e4SLinus Torvalds 	}
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds 	if (tp->rx_opt.eff_sacks)
9721da177e4SLinus Torvalds 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
9731da177e4SLinus Torvalds 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
974c1b4a7e6SDavid S. Miller 
975cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
976cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk))
977cfb6eeb4SYOSHIFUJI Hideaki 		mss_now -= TCPOLEN_MD5SIG_ALIGNED;
978cfb6eeb4SYOSHIFUJI Hideaki #endif
979cfb6eeb4SYOSHIFUJI Hideaki 
980c1b4a7e6SDavid S. Miller 	xmit_size_goal = mss_now;
981c1b4a7e6SDavid S. Miller 
982c1b4a7e6SDavid S. Miller 	if (doing_tso) {
9838292a17aSArnaldo Carvalho de Melo 		xmit_size_goal = (65535 -
9848292a17aSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_af_ops->net_header_len -
985d83d8461SArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_ext_hdr_len -
986d83d8461SArnaldo Carvalho de Melo 				  tp->tcp_header_len);
987c1b4a7e6SDavid S. Miller 
988c1b4a7e6SDavid S. Miller 		if (tp->max_window &&
989c1b4a7e6SDavid S. Miller 		    (xmit_size_goal > (tp->max_window >> 1)))
990c1b4a7e6SDavid S. Miller 			xmit_size_goal = max((tp->max_window >> 1),
991c1b4a7e6SDavid S. Miller 					     68U - tp->tcp_header_len);
992c1b4a7e6SDavid S. Miller 
993c1b4a7e6SDavid S. Miller 		xmit_size_goal -= (xmit_size_goal % mss_now);
994c1b4a7e6SDavid S. Miller 	}
995c1b4a7e6SDavid S. Miller 	tp->xmit_size_goal = xmit_size_goal;
996c1b4a7e6SDavid S. Miller 
9971da177e4SLinus Torvalds 	return mss_now;
9981da177e4SLinus Torvalds }
9991da177e4SLinus Torvalds 
1000a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
1001a762a980SDavid S. Miller 
10029e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1003a762a980SDavid S. Miller {
10049e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1005a762a980SDavid S. Miller 	__u32 packets_out = tp->packets_out;
1006a762a980SDavid S. Miller 
1007a762a980SDavid S. Miller 	if (packets_out >= tp->snd_cwnd) {
1008a762a980SDavid S. Miller 		/* Network is feed fully. */
1009a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1010a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1011a762a980SDavid S. Miller 	} else {
1012a762a980SDavid S. Miller 		/* Network starves. */
1013a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1014a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1015a762a980SDavid S. Miller 
101615d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
101715d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1018a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1019a762a980SDavid S. Miller 	}
1020a762a980SDavid S. Miller }
1021a762a980SDavid S. Miller 
10220e3a4803SIlpo Järvinen /* Returns the portion of skb which can be sent right away without
10230e3a4803SIlpo Järvinen  * introducing MSS oddities to segment boundaries. In rare cases where
10240e3a4803SIlpo Järvinen  * mss_now != mss_cache, we will request caller to create a small skb
10250e3a4803SIlpo Järvinen  * per input skb which could be mostly avoided here (if desired).
10260e3a4803SIlpo Järvinen  */
10270e3a4803SIlpo Järvinen static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
10280e3a4803SIlpo Järvinen 					unsigned int mss_now,
10290e3a4803SIlpo Järvinen 					unsigned int cwnd)
1030c1b4a7e6SDavid S. Miller {
10310e3a4803SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
10320e3a4803SIlpo Järvinen 	u32 needed, window, cwnd_len;
1033c1b4a7e6SDavid S. Miller 
1034c1b4a7e6SDavid S. Miller 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
1035c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
10360e3a4803SIlpo Järvinen 
10370e3a4803SIlpo Järvinen 	if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
10380e3a4803SIlpo Järvinen 		return cwnd_len;
10390e3a4803SIlpo Järvinen 
10400e3a4803SIlpo Järvinen 	if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len)
10410e3a4803SIlpo Järvinen 		return cwnd_len;
10420e3a4803SIlpo Järvinen 
10430e3a4803SIlpo Järvinen 	needed = min(skb->len, window);
10440e3a4803SIlpo Järvinen 	return needed - needed % mss_now;
1045c1b4a7e6SDavid S. Miller }
1046c1b4a7e6SDavid S. Miller 
1047c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1048c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1049c1b4a7e6SDavid S. Miller  */
1050c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
1051c1b4a7e6SDavid S. Miller {
1052c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1053c1b4a7e6SDavid S. Miller 
1054c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
1055104439a8SJohn Heffner 	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1056104439a8SJohn Heffner 	    tcp_skb_pcount(skb) == 1)
1057c1b4a7e6SDavid S. Miller 		return 1;
1058c1b4a7e6SDavid S. Miller 
1059c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1060c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1061c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1062c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1063c1b4a7e6SDavid S. Miller 
1064c1b4a7e6SDavid S. Miller 	return 0;
1065c1b4a7e6SDavid S. Miller }
1066c1b4a7e6SDavid S. Miller 
1067c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting
1068c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1069c1b4a7e6SDavid S. Miller  */
107040efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
1071c1b4a7e6SDavid S. Miller {
1072c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1073c1b4a7e6SDavid S. Miller 
1074846998aeSDavid S. Miller 	if (!tso_segs ||
1075846998aeSDavid S. Miller 	    (tso_segs > 1 &&
10767967168cSHerbert Xu 	     tcp_skb_mss(skb) != mss_now)) {
1077846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1078c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1079c1b4a7e6SDavid S. Miller 	}
1080c1b4a7e6SDavid S. Miller 	return tso_segs;
1081c1b4a7e6SDavid S. Miller }
1082c1b4a7e6SDavid S. Miller 
1083c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
1084c1b4a7e6SDavid S. Miller {
1085c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml,tp->snd_una) &&
1086c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
1087c1b4a7e6SDavid S. Miller }
1088c1b4a7e6SDavid S. Miller 
1089c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
1090c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1091c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
1092c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
1093c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1094c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1095c1b4a7e6SDavid S. Miller  */
1096c1b4a7e6SDavid S. Miller 
1097c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
1098c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
1099c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
1100c1b4a7e6SDavid S. Miller {
1101c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
1102c1b4a7e6SDavid S. Miller 		((nonagle&TCP_NAGLE_CORK) ||
1103c1b4a7e6SDavid S. Miller 		 (!nonagle &&
1104c1b4a7e6SDavid S. Miller 		  tp->packets_out &&
1105c1b4a7e6SDavid S. Miller 		  tcp_minshall_check(tp))));
1106c1b4a7e6SDavid S. Miller }
1107c1b4a7e6SDavid S. Miller 
1108c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
1109c1b4a7e6SDavid S. Miller  * sent now.
1110c1b4a7e6SDavid S. Miller  */
1111c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1112c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1113c1b4a7e6SDavid S. Miller {
1114c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1115c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1116c1b4a7e6SDavid S. Miller 	 *
1117c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1118c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1119c1b4a7e6SDavid S. Miller 	 */
1120c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1121c1b4a7e6SDavid S. Miller 		return 1;
1122c1b4a7e6SDavid S. Miller 
1123d551e454SIlpo Järvinen 	/* Don't use the nagle rule for urgent data (or for the final FIN).
1124d551e454SIlpo Järvinen 	 * Nagle can be ignored during F-RTO too (see RFC4138).
1125d551e454SIlpo Järvinen 	 */
1126d551e454SIlpo Järvinen 	if (tp->urg_mode || (tp->frto_counter == 2) ||
1127c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1128c1b4a7e6SDavid S. Miller 		return 1;
1129c1b4a7e6SDavid S. Miller 
1130c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1131c1b4a7e6SDavid S. Miller 		return 1;
1132c1b4a7e6SDavid S. Miller 
1133c1b4a7e6SDavid S. Miller 	return 0;
1134c1b4a7e6SDavid S. Miller }
1135c1b4a7e6SDavid S. Miller 
1136c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1137c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1138c1b4a7e6SDavid S. Miller {
1139c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1140c1b4a7e6SDavid S. Miller 
1141c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1142c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1143c1b4a7e6SDavid S. Miller 
1144c1b4a7e6SDavid S. Miller 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
1145c1b4a7e6SDavid S. Miller }
1146c1b4a7e6SDavid S. Miller 
1147fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1148c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1149c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1150c1b4a7e6SDavid S. Miller  */
1151c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1152c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1153c1b4a7e6SDavid S. Miller {
1154c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1155c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1156c1b4a7e6SDavid S. Miller 
1157846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1158c1b4a7e6SDavid S. Miller 
1159c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1160c1b4a7e6SDavid S. Miller 		return 0;
1161c1b4a7e6SDavid S. Miller 
1162c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1163c1b4a7e6SDavid S. Miller 	if (cwnd_quota &&
1164c1b4a7e6SDavid S. Miller 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
1165c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1166c1b4a7e6SDavid S. Miller 
1167c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1168c1b4a7e6SDavid S. Miller }
1169c1b4a7e6SDavid S. Miller 
11709e412ba7SIlpo Järvinen int tcp_may_send_now(struct sock *sk)
1171c1b4a7e6SDavid S. Miller {
11729e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1173fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1174c1b4a7e6SDavid S. Miller 
1175c1b4a7e6SDavid S. Miller 	return (skb &&
1176c1b4a7e6SDavid S. Miller 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1177c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
11784e67d876SIlpo Järvinen 			      tp->nonagle : TCP_NAGLE_PUSH)));
1179c1b4a7e6SDavid S. Miller }
1180c1b4a7e6SDavid S. Miller 
1181c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1182c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1183c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1184c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1185c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1186c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1187c1b4a7e6SDavid S. Miller  */
1188846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1189c1b4a7e6SDavid S. Miller {
1190c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1191c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
1192c1b4a7e6SDavid S. Miller 	u16 flags;
1193c1b4a7e6SDavid S. Miller 
1194c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1195c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1196c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1197c1b4a7e6SDavid S. Miller 
1198df97c708SPavel Emelyanov 	buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
1199c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1200c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1201c1b4a7e6SDavid S. Miller 
1202*3ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
1203*3ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1204b60b49eaSHerbert Xu 	buff->truesize += nlen;
1205c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1206c1b4a7e6SDavid S. Miller 
1207c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1208c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1209c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1210c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1211c1b4a7e6SDavid S. Miller 
1212c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
1213c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
1214c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1215c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
1216c1b4a7e6SDavid S. Miller 
1217c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1218c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1219c1b4a7e6SDavid S. Miller 
122084fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1221c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1222c1b4a7e6SDavid S. Miller 
1223c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1224846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1225846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1226c1b4a7e6SDavid S. Miller 
1227c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1228c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1229fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1230c1b4a7e6SDavid S. Miller 
1231c1b4a7e6SDavid S. Miller 	return 0;
1232c1b4a7e6SDavid S. Miller }
1233c1b4a7e6SDavid S. Miller 
1234c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1235c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1236c1b4a7e6SDavid S. Miller  *
1237c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1238c1b4a7e6SDavid S. Miller  */
12399e412ba7SIlpo Järvinen static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1240c1b4a7e6SDavid S. Miller {
12419e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
12426687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1243c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1244c1b4a7e6SDavid S. Miller 
1245c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1246ae8064acSJohn Heffner 		goto send_now;
1247c1b4a7e6SDavid S. Miller 
12486687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1249ae8064acSJohn Heffner 		goto send_now;
1250ae8064acSJohn Heffner 
1251ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1252bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1253bd515c3eSIlpo Järvinen 	    ((jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1254ae8064acSJohn Heffner 		goto send_now;
1255908a75c1SDavid S. Miller 
1256c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1257c1b4a7e6SDavid S. Miller 
1258c1b4a7e6SDavid S. Miller 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1259c1b4a7e6SDavid S. Miller 	       (tp->snd_cwnd <= in_flight));
1260c1b4a7e6SDavid S. Miller 
1261c1b4a7e6SDavid S. Miller 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1262c1b4a7e6SDavid S. Miller 
1263c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1264c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1265c1b4a7e6SDavid S. Miller 
1266c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1267c1b4a7e6SDavid S. Miller 
1268ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1269ba244fe9SDavid S. Miller 	if (limit >= 65536)
1270ae8064acSJohn Heffner 		goto send_now;
1271ba244fe9SDavid S. Miller 
1272c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
1273c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1274c1b4a7e6SDavid S. Miller 
1275c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1276c1b4a7e6SDavid S. Miller 		 * just use it.
1277c1b4a7e6SDavid S. Miller 		 */
1278c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
1279c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1280ae8064acSJohn Heffner 			goto send_now;
1281c1b4a7e6SDavid S. Miller 	} else {
1282c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1283c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1284c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1285c1b4a7e6SDavid S. Miller 		 * then send now.
1286c1b4a7e6SDavid S. Miller 		 */
1287c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1288ae8064acSJohn Heffner 			goto send_now;
1289c1b4a7e6SDavid S. Miller 	}
1290c1b4a7e6SDavid S. Miller 
1291c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1292ae8064acSJohn Heffner 	tp->tso_deferred = 1 | (jiffies<<1);
1293ae8064acSJohn Heffner 
1294c1b4a7e6SDavid S. Miller 	return 1;
1295ae8064acSJohn Heffner 
1296ae8064acSJohn Heffner send_now:
1297ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1298ae8064acSJohn Heffner 	return 0;
1299c1b4a7e6SDavid S. Miller }
1300c1b4a7e6SDavid S. Miller 
13015d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
13025d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
13035d424d5aSJohn Heffner  *         1 if a probe was sent,
13045d424d5aSJohn Heffner  *         -1 otherwise */
13055d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
13065d424d5aSJohn Heffner {
13075d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
13085d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
13095d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
13105d424d5aSJohn Heffner 	int len;
13115d424d5aSJohn Heffner 	int probe_size;
131291cc17c0SIlpo Järvinen 	int size_needed;
13135d424d5aSJohn Heffner 	int copy;
13145d424d5aSJohn Heffner 	int mss_now;
13155d424d5aSJohn Heffner 
13165d424d5aSJohn Heffner 	/* Not currently probing/verifying,
13175d424d5aSJohn Heffner 	 * not in recovery,
13185d424d5aSJohn Heffner 	 * have enough cwnd, and
13195d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
13205d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
13215d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
13225d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
13235d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
13245d424d5aSJohn Heffner 	    tp->rx_opt.eff_sacks)
13255d424d5aSJohn Heffner 		return -1;
13265d424d5aSJohn Heffner 
13275d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
13285d424d5aSJohn Heffner 	mss_now = tcp_current_mss(sk, 0);
13295d424d5aSJohn Heffner 	probe_size = 2*tp->mss_cache;
133091cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
13315d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
13325d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
13335d424d5aSJohn Heffner 		return -1;
13345d424d5aSJohn Heffner 	}
13355d424d5aSJohn Heffner 
13365d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
13377f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
13385d424d5aSJohn Heffner 		return -1;
13395d424d5aSJohn Heffner 
134091cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
13415d424d5aSJohn Heffner 		return -1;
134291cc17c0SIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd))
13435d424d5aSJohn Heffner 		return 0;
13445d424d5aSJohn Heffner 
1345d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1346d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1347d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
13485d424d5aSJohn Heffner 			return -1;
13495d424d5aSJohn Heffner 		else
13505d424d5aSJohn Heffner 			return 0;
13515d424d5aSJohn Heffner 	}
13525d424d5aSJohn Heffner 
13535d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
13545d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
13555d424d5aSJohn Heffner 		return -1;
1356*3ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
1357*3ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
13585d424d5aSJohn Heffner 
1359fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
13605d424d5aSJohn Heffner 
13615d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
13625d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
13635d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
13645d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
13655d424d5aSJohn Heffner 	nskb->csum = 0;
136684fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
13675d424d5aSJohn Heffner 
136850c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
136950c4817eSIlpo Järvinen 
13705d424d5aSJohn Heffner 	len = 0;
1371234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
13725d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
13735d424d5aSJohn Heffner 		if (nskb->ip_summed)
13745d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
13755d424d5aSJohn Heffner 		else
13765d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
13775d424d5aSJohn Heffner 					 skb_put(nskb, copy), copy, nskb->csum);
13785d424d5aSJohn Heffner 
13795d424d5aSJohn Heffner 		if (skb->len <= copy) {
13805d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
13815d424d5aSJohn Heffner 			 * Throw it away. */
13825d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1383fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
1384*3ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
13855d424d5aSJohn Heffner 		} else {
13865d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
13875d424d5aSJohn Heffner 						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
13885d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
13895d424d5aSJohn Heffner 				skb_pull(skb, copy);
139084fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
13915d424d5aSJohn Heffner 					skb->csum = csum_partial(skb->data, skb->len, 0);
13925d424d5aSJohn Heffner 			} else {
13935d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
13945d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
13955d424d5aSJohn Heffner 			}
13965d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
13975d424d5aSJohn Heffner 		}
13985d424d5aSJohn Heffner 
13995d424d5aSJohn Heffner 		len += copy;
1400234b6860SIlpo Järvinen 
1401234b6860SIlpo Järvinen 		if (len >= probe_size)
1402234b6860SIlpo Järvinen 			break;
14035d424d5aSJohn Heffner 	}
14045d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
14055d424d5aSJohn Heffner 
14065d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
14075d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
14085d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
14095d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
14105d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
14115d424d5aSJohn Heffner 		* effectively two packets. */
14125d424d5aSJohn Heffner 		tp->snd_cwnd--;
14139e412ba7SIlpo Järvinen 		update_send_head(sk, nskb);
14145d424d5aSJohn Heffner 
14155d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
14160e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
14170e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
14185d424d5aSJohn Heffner 
14195d424d5aSJohn Heffner 		return 1;
14205d424d5aSJohn Heffner 	}
14215d424d5aSJohn Heffner 
14225d424d5aSJohn Heffner 	return -1;
14235d424d5aSJohn Heffner }
14245d424d5aSJohn Heffner 
14255d424d5aSJohn Heffner 
14261da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
14271da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
14281da177e4SLinus Torvalds  * window for us.
14291da177e4SLinus Torvalds  *
14301da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
14311da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
14321da177e4SLinus Torvalds  */
1433a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
14341da177e4SLinus Torvalds {
14351da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
143692df7b51SDavid S. Miller 	struct sk_buff *skb;
1437c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1438c1b4a7e6SDavid S. Miller 	int cwnd_quota;
14395d424d5aSJohn Heffner 	int result;
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds 	/* If we are closed, the bytes will have to remain here.
14421da177e4SLinus Torvalds 	 * In time closedown will finish, we empty the write queue and all
14431da177e4SLinus Torvalds 	 * will be happy.
14441da177e4SLinus Torvalds 	 */
144592df7b51SDavid S. Miller 	if (unlikely(sk->sk_state == TCP_CLOSE))
144692df7b51SDavid S. Miller 		return 0;
144792df7b51SDavid S. Miller 
1448c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
14495d424d5aSJohn Heffner 
14505d424d5aSJohn Heffner 	/* Do MTU probing. */
14515d424d5aSJohn Heffner 	if ((result = tcp_mtu_probe(sk)) == 0) {
14525d424d5aSJohn Heffner 		return 0;
14535d424d5aSJohn Heffner 	} else if (result > 0) {
14545d424d5aSJohn Heffner 		sent_pkts = 1;
14555d424d5aSJohn Heffner 	}
14565d424d5aSJohn Heffner 
1457fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1458c8ac3774SHerbert Xu 		unsigned int limit;
1459c8ac3774SHerbert Xu 
1460b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1461c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1462c1b4a7e6SDavid S. Miller 
1463b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1464b68e9f85SHerbert Xu 		if (!cwnd_quota)
1465b68e9f85SHerbert Xu 			break;
1466b68e9f85SHerbert Xu 
1467b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1468b68e9f85SHerbert Xu 			break;
1469b68e9f85SHerbert Xu 
1470c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1471aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1472aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1473aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1474aa93466bSDavid S. Miller 				break;
1475c1b4a7e6SDavid S. Miller 		} else {
14769e412ba7SIlpo Järvinen 			if (tcp_tso_should_defer(sk, skb))
1477aa93466bSDavid S. Miller 				break;
1478c1b4a7e6SDavid S. Miller 		}
1479aa93466bSDavid S. Miller 
1480c8ac3774SHerbert Xu 		limit = mss_now;
14810e3a4803SIlpo Järvinen 		if (tso_segs > 1)
14820e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
14830e3a4803SIlpo Järvinen 						    cwnd_quota);
1484c8ac3774SHerbert Xu 
1485c8ac3774SHerbert Xu 		if (skb->len > limit &&
1486c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
14871da177e4SLinus Torvalds 			break;
14881da177e4SLinus Torvalds 
14891da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1490c1b4a7e6SDavid S. Miller 
1491dfb4b9dcSDavid S. Miller 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
14921da177e4SLinus Torvalds 			break;
14931da177e4SLinus Torvalds 
14941da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
14951da177e4SLinus Torvalds 		 * This call will increment packets_out.
14961da177e4SLinus Torvalds 		 */
14979e412ba7SIlpo Järvinen 		update_send_head(sk, skb);
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1500aa93466bSDavid S. Miller 		sent_pkts++;
15011da177e4SLinus Torvalds 	}
15021da177e4SLinus Torvalds 
1503aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
15049e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
15051da177e4SLinus Torvalds 		return 0;
15061da177e4SLinus Torvalds 	}
1507fe067e8aSDavid S. Miller 	return !tp->packets_out && tcp_send_head(sk);
15081da177e4SLinus Torvalds }
15091da177e4SLinus Torvalds 
1510a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1511a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1512a762a980SDavid S. Miller  * The socket must be locked by the caller.
1513a762a980SDavid S. Miller  */
15149e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
15159e412ba7SIlpo Järvinen 			       int nonagle)
1516a762a980SDavid S. Miller {
1517fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1518a762a980SDavid S. Miller 
1519a762a980SDavid S. Miller 	if (skb) {
152055c97f3eSDavid S. Miller 		if (tcp_write_xmit(sk, cur_mss, nonagle))
15219e412ba7SIlpo Järvinen 			tcp_check_probe_timer(sk);
1522a762a980SDavid S. Miller 	}
1523a762a980SDavid S. Miller }
1524a762a980SDavid S. Miller 
1525c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1526c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1527c1b4a7e6SDavid S. Miller  */
1528c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1529c1b4a7e6SDavid S. Miller {
1530fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1531c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, cwnd_quota;
1532c1b4a7e6SDavid S. Miller 
1533c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1534c1b4a7e6SDavid S. Miller 
1535846998aeSDavid S. Miller 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1536c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1537c1b4a7e6SDavid S. Miller 
1538c1b4a7e6SDavid S. Miller 	if (likely(cwnd_quota)) {
1539c8ac3774SHerbert Xu 		unsigned int limit;
1540c8ac3774SHerbert Xu 
1541c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1542c1b4a7e6SDavid S. Miller 
1543c8ac3774SHerbert Xu 		limit = mss_now;
15440e3a4803SIlpo Järvinen 		if (tso_segs > 1)
15450e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
15460e3a4803SIlpo Järvinen 						    cwnd_quota);
1547c8ac3774SHerbert Xu 
1548c8ac3774SHerbert Xu 		if (skb->len > limit &&
1549c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1550c1b4a7e6SDavid S. Miller 			return;
1551c1b4a7e6SDavid S. Miller 
1552c1b4a7e6SDavid S. Miller 		/* Send it out now. */
1553c1b4a7e6SDavid S. Miller 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1554c1b4a7e6SDavid S. Miller 
1555dfb4b9dcSDavid S. Miller 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
15569e412ba7SIlpo Järvinen 			update_send_head(sk, skb);
15579e412ba7SIlpo Järvinen 			tcp_cwnd_validate(sk);
1558c1b4a7e6SDavid S. Miller 			return;
1559c1b4a7e6SDavid S. Miller 		}
1560c1b4a7e6SDavid S. Miller 	}
1561c1b4a7e6SDavid S. Miller }
1562c1b4a7e6SDavid S. Miller 
15631da177e4SLinus Torvalds /* This function returns the amount that we can raise the
15641da177e4SLinus Torvalds  * usable window based on the following constraints
15651da177e4SLinus Torvalds  *
15661da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
15671da177e4SLinus Torvalds  * 2. We limit memory per socket
15681da177e4SLinus Torvalds  *
15691da177e4SLinus Torvalds  * RFC 1122:
15701da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
15711da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
15721da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
15731da177e4SLinus Torvalds  *
15741da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
15751da177e4SLinus Torvalds  * it at least MSS bytes.
15761da177e4SLinus Torvalds  *
15771da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
15781da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
15791da177e4SLinus Torvalds  *
15801da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
15811da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
15821da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
15831da177e4SLinus Torvalds  * window to always advance by a single byte.
15841da177e4SLinus Torvalds  *
15851da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
15861da177e4SLinus Torvalds  * then this will not be a problem.
15871da177e4SLinus Torvalds  *
15881da177e4SLinus Torvalds  * BSD seems to make the following compromise:
15891da177e4SLinus Torvalds  *
15901da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
15911da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
15921da177e4SLinus Torvalds  *	then set the window to 0.
15931da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
15941da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
15951da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
15961da177e4SLinus Torvalds  *
15971da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
15981da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
15991da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
16001da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
16011da177e4SLinus Torvalds  * because the pipeline is full.
16021da177e4SLinus Torvalds  *
16031da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
16041da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
16051da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
16061da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
16071da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
16081da177e4SLinus Torvalds  *
16091da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
16101da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
16111da177e4SLinus Torvalds  *
16121da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
16131da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
16141da177e4SLinus Torvalds  */
16151da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
16161da177e4SLinus Torvalds {
1617463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
16181da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1619caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
16201da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
16211da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
16221da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
16231da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
16241da177e4SLinus Torvalds 	 */
1625463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
16261da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
16271da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
16281da177e4SLinus Torvalds 	int window;
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds 	if (mss > full_space)
16311da177e4SLinus Torvalds 		mss = full_space;
16321da177e4SLinus Torvalds 
1633b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
1634463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds 		if (tcp_memory_pressure)
16371da177e4SLinus Torvalds 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
16381da177e4SLinus Torvalds 
16391da177e4SLinus Torvalds 		if (free_space < mss)
16401da177e4SLinus Torvalds 			return 0;
16411da177e4SLinus Torvalds 	}
16421da177e4SLinus Torvalds 
16431da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
16441da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
16471da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
16481da177e4SLinus Torvalds 	 */
16491da177e4SLinus Torvalds 	window = tp->rcv_wnd;
16501da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
16511da177e4SLinus Torvalds 		window = free_space;
16521da177e4SLinus Torvalds 
16531da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
16541da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
16551da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
16561da177e4SLinus Torvalds 		 */
16571da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
16581da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
16591da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
16601da177e4SLinus Torvalds 	} else {
16611da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
16621da177e4SLinus Torvalds 		 * Window clamp already applied above.
16631da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
16641da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
16651da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
16661da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
16671da177e4SLinus Torvalds 		 * is too small.
16681da177e4SLinus Torvalds 		 */
16691da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
16701da177e4SLinus Torvalds 			window = (free_space/mss)*mss;
167184565070SJohn Heffner 		else if (mss == full_space &&
1672b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
167384565070SJohn Heffner 			window = free_space;
16741da177e4SLinus Torvalds 	}
16751da177e4SLinus Torvalds 
16761da177e4SLinus Torvalds 	return window;
16771da177e4SLinus Torvalds }
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */
16801da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
16811da177e4SLinus Torvalds {
16821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1683fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
16841da177e4SLinus Torvalds 
16851da177e4SLinus Torvalds 	/* The first test we must make is that neither of these two
16861da177e4SLinus Torvalds 	 * SKB's are still referenced by someone else.
16871da177e4SLinus Torvalds 	 */
16881da177e4SLinus Torvalds 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
16891da177e4SLinus Torvalds 		int skb_size = skb->len, next_skb_size = next_skb->len;
16901da177e4SLinus Torvalds 		u16 flags = TCP_SKB_CB(skb)->flags;
16911da177e4SLinus Torvalds 
16921da177e4SLinus Torvalds 		/* Also punt if next skb has been SACK'd. */
16931da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
16941da177e4SLinus Torvalds 			return;
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds 		/* Next skb is out of window. */
16971da177e4SLinus Torvalds 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
16981da177e4SLinus Torvalds 			return;
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds 		/* Punt if not enough space exists in the first SKB for
17011da177e4SLinus Torvalds 		 * the data in the second, or the total combined payload
17021da177e4SLinus Torvalds 		 * would exceed the MSS.
17031da177e4SLinus Torvalds 		 */
17041da177e4SLinus Torvalds 		if ((next_skb_size > skb_tailroom(skb)) ||
17051da177e4SLinus Torvalds 		    ((skb_size + next_skb_size) > mss_now))
17061da177e4SLinus Torvalds 			return;
17071da177e4SLinus Torvalds 
17081da177e4SLinus Torvalds 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
17091da177e4SLinus Torvalds 		       tcp_skb_pcount(next_skb) != 1);
17101da177e4SLinus Torvalds 
17116859d494SIlpo Järvinen 		tcp_highest_sack_combine(sk, next_skb, skb);
1712a6963a6bSIlpo Järvinen 
17131da177e4SLinus Torvalds 		/* Ok.	We will be able to collapse the packet. */
1714fe067e8aSDavid S. Miller 		tcp_unlink_write_queue(next_skb, sk);
17151da177e4SLinus Torvalds 
17161a4e2d09SArnaldo Carvalho de Melo 		skb_copy_from_linear_data(next_skb,
17171a4e2d09SArnaldo Carvalho de Melo 					  skb_put(skb, next_skb_size),
17181a4e2d09SArnaldo Carvalho de Melo 					  next_skb_size);
17191da177e4SLinus Torvalds 
172052d570aaSJarek Poplawski 		if (next_skb->ip_summed == CHECKSUM_PARTIAL)
172152d570aaSJarek Poplawski 			skb->ip_summed = CHECKSUM_PARTIAL;
17221da177e4SLinus Torvalds 
172384fa7933SPatrick McHardy 		if (skb->ip_summed != CHECKSUM_PARTIAL)
17241da177e4SLinus Torvalds 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds 		/* Update sequence range on original skb. */
17271da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
17281da177e4SLinus Torvalds 
17291da177e4SLinus Torvalds 		/* Merge over control information. */
17301da177e4SLinus Torvalds 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
17311da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = flags;
17321da177e4SLinus Torvalds 
17331da177e4SLinus Torvalds 		/* All done, get rid of second SKB and account for it so
17341da177e4SLinus Torvalds 		 * packet counting does not break.
17351da177e4SLinus Torvalds 		 */
17361da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
17371da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
17381da177e4SLinus Torvalds 			tp->retrans_out -= tcp_skb_pcount(next_skb);
1739b5860bbaSIlpo Järvinen 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
17401da177e4SLinus Torvalds 			tp->lost_out -= tcp_skb_pcount(next_skb);
17411da177e4SLinus Torvalds 		/* Reno case is special. Sigh... */
1742e60402d0SIlpo Järvinen 		if (tcp_is_reno(tp) && tp->sacked_out)
17431da177e4SLinus Torvalds 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
17441da177e4SLinus Torvalds 
1745a47e5a98SIlpo Järvinen 		tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
1746e9144bd8SIlpo Järvinen 		tp->packets_out -= tcp_skb_pcount(next_skb);
1747b7689205SIlpo Järvinen 
1748b7689205SIlpo Järvinen 		/* changed transmit queue under us so clear hints */
1749b7689205SIlpo Järvinen 		tcp_clear_retrans_hints_partial(tp);
1750b7689205SIlpo Järvinen 
1751*3ab224beSHideo Aoki 		sk_wmem_free_skb(sk, next_skb);
17521da177e4SLinus Torvalds 	}
17531da177e4SLinus Torvalds }
17541da177e4SLinus Torvalds 
17551da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in
17561da177e4SLinus Torvalds  * tcp_timer. This is used for path mtu discovery.
17571da177e4SLinus Torvalds  * The socket is already locked here.
17581da177e4SLinus Torvalds  */
17591da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk)
17601da177e4SLinus Torvalds {
17616687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
17621da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17631da177e4SLinus Torvalds 	struct sk_buff *skb;
17641da177e4SLinus Torvalds 	unsigned int mss = tcp_current_mss(sk, 0);
17651da177e4SLinus Torvalds 	int lost = 0;
17661da177e4SLinus Torvalds 
1767fe067e8aSDavid S. Miller 	tcp_for_write_queue(skb, sk) {
1768fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
1769fe067e8aSDavid S. Miller 			break;
17701da177e4SLinus Torvalds 		if (skb->len > mss &&
17711da177e4SLinus Torvalds 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
17721da177e4SLinus Torvalds 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
17731da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
17741da177e4SLinus Torvalds 				tp->retrans_out -= tcp_skb_pcount(skb);
17751da177e4SLinus Torvalds 			}
17761da177e4SLinus Torvalds 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
17771da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
17781da177e4SLinus Torvalds 				tp->lost_out += tcp_skb_pcount(skb);
17791da177e4SLinus Torvalds 				lost = 1;
17801da177e4SLinus Torvalds 			}
17811da177e4SLinus Torvalds 		}
17821da177e4SLinus Torvalds 	}
17831da177e4SLinus Torvalds 
17845af4ec23SIlpo Järvinen 	tcp_clear_all_retrans_hints(tp);
17856a438bbeSStephen Hemminger 
17861da177e4SLinus Torvalds 	if (!lost)
17871da177e4SLinus Torvalds 		return;
17881da177e4SLinus Torvalds 
1789005903bcSIlpo Järvinen 	tcp_verify_left_out(tp);
17901da177e4SLinus Torvalds 
17911da177e4SLinus Torvalds 	/* Don't muck with the congestion window here.
17921da177e4SLinus Torvalds 	 * Reason is that we do not increase amount of _data_
17931da177e4SLinus Torvalds 	 * in network, but units changed and effective
17941da177e4SLinus Torvalds 	 * cwnd/ssthresh really reduced now.
17951da177e4SLinus Torvalds 	 */
17966687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
17971da177e4SLinus Torvalds 		tp->high_seq = tp->snd_nxt;
17986687e988SArnaldo Carvalho de Melo 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
17991da177e4SLinus Torvalds 		tp->prior_ssthresh = 0;
18001da177e4SLinus Torvalds 		tp->undo_marker = 0;
18016687e988SArnaldo Carvalho de Melo 		tcp_set_ca_state(sk, TCP_CA_Loss);
18021da177e4SLinus Torvalds 	}
18031da177e4SLinus Torvalds 	tcp_xmit_retransmit_queue(sk);
18041da177e4SLinus Torvalds }
18051da177e4SLinus Torvalds 
18061da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
18071da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
18081da177e4SLinus Torvalds  * error occurred which prevented the send.
18091da177e4SLinus Torvalds  */
18101da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
18111da177e4SLinus Torvalds {
18121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18135d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
18141da177e4SLinus Torvalds 	unsigned int cur_mss = tcp_current_mss(sk, 0);
18151da177e4SLinus Torvalds 	int err;
18161da177e4SLinus Torvalds 
18175d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
18185d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
18195d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
18205d424d5aSJohn Heffner 	}
18215d424d5aSJohn Heffner 
18221da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
1823caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
18241da177e4SLinus Torvalds 	 */
18251da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
18261da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
18271da177e4SLinus Torvalds 		return -EAGAIN;
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
18301da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
18311da177e4SLinus Torvalds 			BUG();
18321da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
18331da177e4SLinus Torvalds 			return -ENOMEM;
18341da177e4SLinus Torvalds 	}
18351da177e4SLinus Torvalds 
18361da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
18371da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
18381da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
18391da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
18401da177e4SLinus Torvalds 	 */
18411da177e4SLinus Torvalds 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
18421da177e4SLinus Torvalds 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
18431da177e4SLinus Torvalds 		return -EAGAIN;
18441da177e4SLinus Torvalds 
18451da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
1846846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
18471da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
18481da177e4SLinus Torvalds 	}
18491da177e4SLinus Torvalds 
18501da177e4SLinus Torvalds 	/* Collapse two adjacent packets if worthwhile and we can. */
18511da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
18521da177e4SLinus Torvalds 	    (skb->len < (cur_mss >> 1)) &&
1853fe067e8aSDavid S. Miller 	    (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1854fe067e8aSDavid S. Miller 	    (!tcp_skb_is_last(sk, skb)) &&
1855fe067e8aSDavid S. Miller 	    (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1856fe067e8aSDavid S. Miller 	    (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
18571da177e4SLinus Torvalds 	    (sysctl_tcp_retrans_collapse != 0))
18581da177e4SLinus Torvalds 		tcp_retrans_try_collapse(sk, skb, cur_mss);
18591da177e4SLinus Torvalds 
18608292a17aSArnaldo Carvalho de Melo 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
18611da177e4SLinus Torvalds 		return -EHOSTUNREACH; /* Routing failure or similar. */
18621da177e4SLinus Torvalds 
18631da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
18641da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
18651da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
18661da177e4SLinus Torvalds 	 */
18671da177e4SLinus Torvalds 	if (skb->len > 0 &&
18681da177e4SLinus Torvalds 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
18691da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
18701da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
18711da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
18727967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 1;
18737967168cSHerbert Xu 			skb_shinfo(skb)->gso_size = 0;
18747967168cSHerbert Xu 			skb_shinfo(skb)->gso_type = 0;
18751da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
18761da177e4SLinus Torvalds 			skb->csum = 0;
18771da177e4SLinus Torvalds 		}
18781da177e4SLinus Torvalds 	}
18791da177e4SLinus Torvalds 
18801da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
18811da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
18821da177e4SLinus Torvalds 	 */
18831da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
18841da177e4SLinus Torvalds 
1885dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
18861da177e4SLinus Torvalds 
18871da177e4SLinus Torvalds 	if (err == 0) {
18881da177e4SLinus Torvalds 		/* Update global TCP statistics. */
18891da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
18901da177e4SLinus Torvalds 
18911da177e4SLinus Torvalds 		tp->total_retrans++;
18921da177e4SLinus Torvalds 
18931da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
18941da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
18951da177e4SLinus Torvalds 			if (net_ratelimit())
18961da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
18971da177e4SLinus Torvalds 		}
18981da177e4SLinus Torvalds #endif
1899b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
1900b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
19011da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
19021da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
19051da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
19061da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
19071da177e4SLinus Torvalds 
19081da177e4SLinus Torvalds 		tp->undo_retrans++;
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
19111da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
19121da177e4SLinus Torvalds 		 */
19131da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
19141da177e4SLinus Torvalds 	}
19151da177e4SLinus Torvalds 	return err;
19161da177e4SLinus Torvalds }
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
19191da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
19201da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
19211da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
19221da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
19231da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
19241da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
19251da177e4SLinus Torvalds  */
19261da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
19271da177e4SLinus Torvalds {
19286687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
19291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19301da177e4SLinus Torvalds 	struct sk_buff *skb;
19316a438bbeSStephen Hemminger 	int packet_cnt;
19326a438bbeSStephen Hemminger 
19336a438bbeSStephen Hemminger 	if (tp->retransmit_skb_hint) {
19346a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
19356a438bbeSStephen Hemminger 		packet_cnt = tp->retransmit_cnt_hint;
19366a438bbeSStephen Hemminger 	}else{
1937fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
19386a438bbeSStephen Hemminger 		packet_cnt = 0;
19396a438bbeSStephen Hemminger 	}
19401da177e4SLinus Torvalds 
19411da177e4SLinus Torvalds 	/* First pass: retransmit lost packets. */
19426a438bbeSStephen Hemminger 	if (tp->lost_out) {
1943fe067e8aSDavid S. Miller 		tcp_for_write_queue_from(skb, sk) {
19441da177e4SLinus Torvalds 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
19451da177e4SLinus Torvalds 
1946fe067e8aSDavid S. Miller 			if (skb == tcp_send_head(sk))
1947fe067e8aSDavid S. Miller 				break;
19486a438bbeSStephen Hemminger 			/* we could do better than to assign each time */
19496a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
19506a438bbeSStephen Hemminger 			tp->retransmit_cnt_hint = packet_cnt;
19516a438bbeSStephen Hemminger 
19521da177e4SLinus Torvalds 			/* Assume this retransmit will generate
19531da177e4SLinus Torvalds 			 * only one packet for congestion window
19541da177e4SLinus Torvalds 			 * calculation purposes.  This works because
19551da177e4SLinus Torvalds 			 * tcp_retransmit_skb() will chop up the
19561da177e4SLinus Torvalds 			 * packet to be MSS sized and all the
19571da177e4SLinus Torvalds 			 * packet counting works out.
19581da177e4SLinus Torvalds 			 */
19591da177e4SLinus Torvalds 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
19601da177e4SLinus Torvalds 				return;
19611da177e4SLinus Torvalds 
19621da177e4SLinus Torvalds 			if (sacked & TCPCB_LOST) {
19631da177e4SLinus Torvalds 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
19646a438bbeSStephen Hemminger 					if (tcp_retransmit_skb(sk, skb)) {
19656a438bbeSStephen Hemminger 						tp->retransmit_skb_hint = NULL;
19661da177e4SLinus Torvalds 						return;
19676a438bbeSStephen Hemminger 					}
19686687e988SArnaldo Carvalho de Melo 					if (icsk->icsk_ca_state != TCP_CA_Loss)
19691da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
19701da177e4SLinus Torvalds 					else
19711da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
19721da177e4SLinus Torvalds 
1973fe067e8aSDavid S. Miller 					if (skb == tcp_write_queue_head(sk))
1974463c84b9SArnaldo Carvalho de Melo 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
19753f421baaSArnaldo Carvalho de Melo 									  inet_csk(sk)->icsk_rto,
19763f421baaSArnaldo Carvalho de Melo 									  TCP_RTO_MAX);
19771da177e4SLinus Torvalds 				}
19781da177e4SLinus Torvalds 
19796a438bbeSStephen Hemminger 				packet_cnt += tcp_skb_pcount(skb);
19806a438bbeSStephen Hemminger 				if (packet_cnt >= tp->lost_out)
19811da177e4SLinus Torvalds 					break;
19821da177e4SLinus Torvalds 			}
19831da177e4SLinus Torvalds 		}
19841da177e4SLinus Torvalds 	}
19851da177e4SLinus Torvalds 
19861da177e4SLinus Torvalds 	/* OK, demanded retransmission is finished. */
19871da177e4SLinus Torvalds 
19881da177e4SLinus Torvalds 	/* Forward retransmissions are possible only during Recovery. */
19896687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
19901da177e4SLinus Torvalds 		return;
19911da177e4SLinus Torvalds 
19921da177e4SLinus Torvalds 	/* No forward retransmissions in Reno are possible. */
1993e60402d0SIlpo Järvinen 	if (tcp_is_reno(tp))
19941da177e4SLinus Torvalds 		return;
19951da177e4SLinus Torvalds 
19961da177e4SLinus Torvalds 	/* Yeah, we have to make difficult choice between forward transmission
19971da177e4SLinus Torvalds 	 * and retransmission... Both ways have their merits...
19981da177e4SLinus Torvalds 	 *
19991da177e4SLinus Torvalds 	 * For now we do not retransmit anything, while we have some new
2000539d243fSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2001539d243fSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
20021da177e4SLinus Torvalds 	 */
20031da177e4SLinus Torvalds 
20049e412ba7SIlpo Järvinen 	if (tcp_may_send_now(sk))
20051da177e4SLinus Torvalds 		return;
20061da177e4SLinus Torvalds 
2007539d243fSIlpo Järvinen 	/* If nothing is SACKed, highest_sack in the loop won't be valid */
2008539d243fSIlpo Järvinen 	if (!tp->sacked_out)
2009539d243fSIlpo Järvinen 		return;
2010539d243fSIlpo Järvinen 
2011539d243fSIlpo Järvinen 	if (tp->forward_skb_hint)
20126a438bbeSStephen Hemminger 		skb = tp->forward_skb_hint;
2013539d243fSIlpo Järvinen 	else
2014fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
20151da177e4SLinus Torvalds 
2016fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
2017fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2018fe067e8aSDavid S. Miller 			break;
20196a438bbeSStephen Hemminger 		tp->forward_skb_hint = skb;
20206a438bbeSStephen Hemminger 
20216859d494SIlpo Järvinen 		if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
20221da177e4SLinus Torvalds 			break;
20231da177e4SLinus Torvalds 
20241da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
20251da177e4SLinus Torvalds 			break;
20261da177e4SLinus Torvalds 
20271da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
20281da177e4SLinus Torvalds 			continue;
20291da177e4SLinus Torvalds 
20301da177e4SLinus Torvalds 		/* Ok, retransmit it. */
20316a438bbeSStephen Hemminger 		if (tcp_retransmit_skb(sk, skb)) {
20326a438bbeSStephen Hemminger 			tp->forward_skb_hint = NULL;
20331da177e4SLinus Torvalds 			break;
20346a438bbeSStephen Hemminger 		}
20351da177e4SLinus Torvalds 
2036fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
20373f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
20383f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
20393f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
20401da177e4SLinus Torvalds 
20411da177e4SLinus Torvalds 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
20421da177e4SLinus Torvalds 	}
20431da177e4SLinus Torvalds }
20441da177e4SLinus Torvalds 
20451da177e4SLinus Torvalds 
20461da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
20471da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
20481da177e4SLinus Torvalds  */
20491da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
20501da177e4SLinus Torvalds {
20511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2052fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
20531da177e4SLinus Torvalds 	int mss_now;
20541da177e4SLinus Torvalds 
20551da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
20561da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
20571da177e4SLinus Torvalds 	 * and IP options.
20581da177e4SLinus Torvalds 	 */
20591da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, 1);
20601da177e4SLinus Torvalds 
2061fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
20621da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
20631da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
20641da177e4SLinus Torvalds 		tp->write_seq++;
20651da177e4SLinus Torvalds 	} else {
20661da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
20671da177e4SLinus Torvalds 		for (;;) {
2068d179cd12SDavid S. Miller 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
20691da177e4SLinus Torvalds 			if (skb)
20701da177e4SLinus Torvalds 				break;
20711da177e4SLinus Torvalds 			yield();
20721da177e4SLinus Torvalds 		}
20731da177e4SLinus Torvalds 
20741da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
20751da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
20761da177e4SLinus Torvalds 		skb->csum = 0;
20771da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
20781da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked = 0;
20797967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
20807967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
20817967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
20821da177e4SLinus Torvalds 
20831da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
20841da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->seq = tp->write_seq;
20851da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
20861da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
20871da177e4SLinus Torvalds 	}
20889e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
20891da177e4SLinus Torvalds }
20901da177e4SLinus Torvalds 
20911da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
20921da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
20931da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
209465bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
20951da177e4SLinus Torvalds  */
2096dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
20971da177e4SLinus Torvalds {
20981da177e4SLinus Torvalds 	struct sk_buff *skb;
20991da177e4SLinus Torvalds 
21001da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
21011da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
21021da177e4SLinus Torvalds 	if (!skb) {
21031da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
21041da177e4SLinus Torvalds 		return;
21051da177e4SLinus Torvalds 	}
21061da177e4SLinus Torvalds 
21071da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
21081da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
21091da177e4SLinus Torvalds 	skb->csum = 0;
21101da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
21111da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
21127967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
21137967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
21147967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
21151da177e4SLinus Torvalds 
21161da177e4SLinus Torvalds 	/* Send it off. */
21179e412ba7SIlpo Järvinen 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
21181da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
21191da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2120dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
21211da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
21221da177e4SLinus Torvalds }
21231da177e4SLinus Torvalds 
21241da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent
21251da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
21261da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
21271da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
21281da177e4SLinus Torvalds  */
21291da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
21301da177e4SLinus Torvalds {
21311da177e4SLinus Torvalds 	struct sk_buff* skb;
21321da177e4SLinus Torvalds 
2133fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
21341da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
21351da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
21361da177e4SLinus Torvalds 		return -EFAULT;
21371da177e4SLinus Torvalds 	}
21381da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
21391da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
21401da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
21411da177e4SLinus Torvalds 			if (nskb == NULL)
21421da177e4SLinus Torvalds 				return -ENOMEM;
2143fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21441da177e4SLinus Torvalds 			skb_header_release(nskb);
2145fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
2146*3ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
2147*3ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
2148*3ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
21491da177e4SLinus Torvalds 			skb = nskb;
21501da177e4SLinus Torvalds 		}
21511da177e4SLinus Torvalds 
21521da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
21531da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
21541da177e4SLinus Torvalds 	}
21551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2156dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
21571da177e4SLinus Torvalds }
21581da177e4SLinus Torvalds 
21591da177e4SLinus Torvalds /*
21601da177e4SLinus Torvalds  * Prepare a SYN-ACK.
21611da177e4SLinus Torvalds  */
21621da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
216360236fddSArnaldo Carvalho de Melo 				 struct request_sock *req)
21641da177e4SLinus Torvalds {
21652e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
21661da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21671da177e4SLinus Torvalds 	struct tcphdr *th;
21681da177e4SLinus Torvalds 	int tcp_header_size;
21691da177e4SLinus Torvalds 	struct sk_buff *skb;
2170cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2171cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2172cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
2173cfb6eeb4SYOSHIFUJI Hideaki #endif
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
21761da177e4SLinus Torvalds 	if (skb == NULL)
21771da177e4SLinus Torvalds 		return NULL;
21781da177e4SLinus Torvalds 
21791da177e4SLinus Torvalds 	/* Reserve space for headers. */
21801da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
21811da177e4SLinus Torvalds 
21821da177e4SLinus Torvalds 	skb->dst = dst_clone(dst);
21831da177e4SLinus Torvalds 
21841da177e4SLinus Torvalds 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
21852e6599cbSArnaldo Carvalho de Melo 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
21862e6599cbSArnaldo Carvalho de Melo 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
21871da177e4SLinus Torvalds 			   /* SACK_PERM is in the place of NOP NOP of TS */
21882e6599cbSArnaldo Carvalho de Melo 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2189cfb6eeb4SYOSHIFUJI Hideaki 
2190cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2191cfb6eeb4SYOSHIFUJI Hideaki 	/* Are we doing MD5 on this segment? If so - make room for it */
2192cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2193cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
2194cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2195cfb6eeb4SYOSHIFUJI Hideaki #endif
2196aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2197aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
21981da177e4SLinus Torvalds 
2199aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
22001da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
22011da177e4SLinus Torvalds 	th->syn = 1;
22021da177e4SLinus Torvalds 	th->ack = 1;
22031da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
22041da177e4SLinus Torvalds 	th->source = inet_sk(sk)->sport;
22052e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
22062e6599cbSArnaldo Carvalho de Melo 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
22071da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
22081da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
22097967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
22107967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
22117967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
22121da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
22132e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
22141da177e4SLinus Torvalds 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
22151da177e4SLinus Torvalds 		__u8 rcv_wscale;
22161da177e4SLinus Torvalds 		/* Set this up on the first call only */
22171da177e4SLinus Torvalds 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
22181da177e4SLinus Torvalds 		/* tcp_full_space because it is guaranteed to be the first packet */
22191da177e4SLinus Torvalds 		tcp_select_initial_window(tcp_full_space(sk),
22202e6599cbSArnaldo Carvalho de Melo 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
22211da177e4SLinus Torvalds 			&req->rcv_wnd,
22221da177e4SLinus Torvalds 			&req->window_clamp,
22232e6599cbSArnaldo Carvalho de Melo 			ireq->wscale_ok,
22241da177e4SLinus Torvalds 			&rcv_wscale);
22252e6599cbSArnaldo Carvalho de Melo 		ireq->rcv_wscale = rcv_wscale;
22261da177e4SLinus Torvalds 	}
22271da177e4SLinus Torvalds 
22281da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2229600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
22301da177e4SLinus Torvalds 
22311da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2232df7a3b07SAl Viro 	tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
22332e6599cbSArnaldo Carvalho de Melo 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
22341da177e4SLinus Torvalds 			      TCP_SKB_CB(skb)->when,
2235cfb6eeb4SYOSHIFUJI Hideaki 			      req->ts_recent,
2236cfb6eeb4SYOSHIFUJI Hideaki 			      (
2237cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2238cfb6eeb4SYOSHIFUJI Hideaki 			       md5 ? &md5_hash_location :
2239cfb6eeb4SYOSHIFUJI Hideaki #endif
2240cfb6eeb4SYOSHIFUJI Hideaki 			       NULL)
2241cfb6eeb4SYOSHIFUJI Hideaki 			      );
22421da177e4SLinus Torvalds 
22431da177e4SLinus Torvalds 	skb->csum = 0;
22441da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
22451da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
2246cfb6eeb4SYOSHIFUJI Hideaki 
2247cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2248cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2249cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2250cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
2251cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
2252cfb6eeb4SYOSHIFUJI Hideaki 					       NULL, dst, req,
2253aa8223c7SArnaldo Carvalho de Melo 					       tcp_hdr(skb), sk->sk_protocol,
2254cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
2255cfb6eeb4SYOSHIFUJI Hideaki 	}
2256cfb6eeb4SYOSHIFUJI Hideaki #endif
2257cfb6eeb4SYOSHIFUJI Hideaki 
22581da177e4SLinus Torvalds 	return skb;
22591da177e4SLinus Torvalds }
22601da177e4SLinus Torvalds 
22611da177e4SLinus Torvalds /*
22621da177e4SLinus Torvalds  * Do all connect socket setups that can be done AF independent.
22631da177e4SLinus Torvalds  */
226440efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk)
22651da177e4SLinus Torvalds {
22661da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
22671da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22681da177e4SLinus Torvalds 	__u8 rcv_wscale;
22691da177e4SLinus Torvalds 
22701da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
22711da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
22721da177e4SLinus Torvalds 	 */
22731da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
22741da177e4SLinus Torvalds 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
22751da177e4SLinus Torvalds 
2276cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2277cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2278cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2279cfb6eeb4SYOSHIFUJI Hideaki #endif
2280cfb6eeb4SYOSHIFUJI Hideaki 
22811da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
22821da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
22831da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
22841da177e4SLinus Torvalds 	tp->max_window = 0;
22855d424d5aSJohn Heffner 	tcp_mtup_init(sk);
22861da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
22871da177e4SLinus Torvalds 
22881da177e4SLinus Torvalds 	if (!tp->window_clamp)
22891da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
22901da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
22911da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
22941da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
22951da177e4SLinus Torvalds 				  &tp->rcv_wnd,
22961da177e4SLinus Torvalds 				  &tp->window_clamp,
22971da177e4SLinus Torvalds 				  sysctl_tcp_window_scaling,
22981da177e4SLinus Torvalds 				  &rcv_wscale);
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
23011da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
23021da177e4SLinus Torvalds 
23031da177e4SLinus Torvalds 	sk->sk_err = 0;
23041da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
23051da177e4SLinus Torvalds 	tp->snd_wnd = 0;
23061da177e4SLinus Torvalds 	tcp_init_wl(tp, tp->write_seq, 0);
23071da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
23081da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
23091da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
23101da177e4SLinus Torvalds 	tp->rcv_wup = 0;
23111da177e4SLinus Torvalds 	tp->copied_seq = 0;
23121da177e4SLinus Torvalds 
2313463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2314463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
23151da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
23161da177e4SLinus Torvalds }
23171da177e4SLinus Torvalds 
23181da177e4SLinus Torvalds /*
23191da177e4SLinus Torvalds  * Build a SYN and send it off.
23201da177e4SLinus Torvalds  */
23211da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
23221da177e4SLinus Torvalds {
23231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
23241da177e4SLinus Torvalds 	struct sk_buff *buff;
23251da177e4SLinus Torvalds 
23261da177e4SLinus Torvalds 	tcp_connect_init(sk);
23271da177e4SLinus Torvalds 
2328d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
23291da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
23301da177e4SLinus Torvalds 		return -ENOBUFS;
23311da177e4SLinus Torvalds 
23321da177e4SLinus Torvalds 	/* Reserve space for headers. */
23331da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
23341da177e4SLinus Torvalds 
23351da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
23369e412ba7SIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
23371da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->sacked = 0;
23387967168cSHerbert Xu 	skb_shinfo(buff)->gso_segs = 1;
23397967168cSHerbert Xu 	skb_shinfo(buff)->gso_size = 0;
23407967168cSHerbert Xu 	skb_shinfo(buff)->gso_type = 0;
23411da177e4SLinus Torvalds 	buff->csum = 0;
2342bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
23431da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
23441da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
23451da177e4SLinus Torvalds 
23461da177e4SLinus Torvalds 	/* Send it off. */
23471da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
23481da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
23491da177e4SLinus Torvalds 	skb_header_release(buff);
2350fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, buff);
2351*3ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
2352*3ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
23531da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
2354dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2355bd37a088SWei Yongjun 
2356bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2357bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
2358bd37a088SWei Yongjun 	 */
2359bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2360bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
23611da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
23621da177e4SLinus Torvalds 
23631da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
23643f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
23653f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
23661da177e4SLinus Torvalds 	return 0;
23671da177e4SLinus Torvalds }
23681da177e4SLinus Torvalds 
23691da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
23701da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
23711da177e4SLinus Torvalds  * for details.
23721da177e4SLinus Torvalds  */
23731da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
23741da177e4SLinus Torvalds {
2375463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2376463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
23771da177e4SLinus Torvalds 	unsigned long timeout;
23781da177e4SLinus Torvalds 
23791da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2380463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
23811da177e4SLinus Torvalds 		int max_ato = HZ/2;
23821da177e4SLinus Torvalds 
2383463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
23841da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
23851da177e4SLinus Torvalds 
23861da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2389463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
23901da177e4SLinus Torvalds 		 * directly.
23911da177e4SLinus Torvalds 		 */
23921da177e4SLinus Torvalds 		if (tp->srtt) {
23931da177e4SLinus Torvalds 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
23941da177e4SLinus Torvalds 
23951da177e4SLinus Torvalds 			if (rtt < max_ato)
23961da177e4SLinus Torvalds 				max_ato = rtt;
23971da177e4SLinus Torvalds 		}
23981da177e4SLinus Torvalds 
23991da177e4SLinus Torvalds 		ato = min(ato, max_ato);
24001da177e4SLinus Torvalds 	}
24011da177e4SLinus Torvalds 
24021da177e4SLinus Torvalds 	/* Stay within the limit we were given */
24031da177e4SLinus Torvalds 	timeout = jiffies + ato;
24041da177e4SLinus Torvalds 
24051da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
2406463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
24071da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
24081da177e4SLinus Torvalds 		 * send ACK now.
24091da177e4SLinus Torvalds 		 */
2410463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
2411463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
24121da177e4SLinus Torvalds 			tcp_send_ack(sk);
24131da177e4SLinus Torvalds 			return;
24141da177e4SLinus Torvalds 		}
24151da177e4SLinus Torvalds 
2416463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2417463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
24181da177e4SLinus Torvalds 	}
2419463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2420463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
2421463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
24221da177e4SLinus Torvalds }
24231da177e4SLinus Torvalds 
24241da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
24251da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
24261da177e4SLinus Torvalds {
24271da177e4SLinus Torvalds 	/* If we have been reset, we may not send again. */
24281da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
24291da177e4SLinus Torvalds 		struct sk_buff *buff;
24301da177e4SLinus Torvalds 
24311da177e4SLinus Torvalds 		/* We are not putting this on the write queue, so
24321da177e4SLinus Torvalds 		 * tcp_transmit_skb() will set the ownership to this
24331da177e4SLinus Torvalds 		 * sock.
24341da177e4SLinus Torvalds 		 */
24351da177e4SLinus Torvalds 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
24361da177e4SLinus Torvalds 		if (buff == NULL) {
2437463c84b9SArnaldo Carvalho de Melo 			inet_csk_schedule_ack(sk);
2438463c84b9SArnaldo Carvalho de Melo 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
24393f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
24403f421baaSArnaldo Carvalho de Melo 						  TCP_DELACK_MAX, TCP_RTO_MAX);
24411da177e4SLinus Torvalds 			return;
24421da177e4SLinus Torvalds 		}
24431da177e4SLinus Torvalds 
24441da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
24451da177e4SLinus Torvalds 		skb_reserve(buff, MAX_TCP_HEADER);
24461da177e4SLinus Torvalds 		buff->csum = 0;
24471da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
24481da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->sacked = 0;
24497967168cSHerbert Xu 		skb_shinfo(buff)->gso_segs = 1;
24507967168cSHerbert Xu 		skb_shinfo(buff)->gso_size = 0;
24517967168cSHerbert Xu 		skb_shinfo(buff)->gso_type = 0;
24521da177e4SLinus Torvalds 
24531da177e4SLinus Torvalds 		/* Send it off, this clears delayed acks for us. */
24549e412ba7SIlpo Järvinen 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
24551da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2456dfb4b9dcSDavid S. Miller 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
24571da177e4SLinus Torvalds 	}
24581da177e4SLinus Torvalds }
24591da177e4SLinus Torvalds 
24601da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
24611da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
24621da177e4SLinus Torvalds  *
24631da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
24641da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
24651da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
24661da177e4SLinus Torvalds  *
24671da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
24681da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
24691da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
24701da177e4SLinus Torvalds  */
24711da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
24721da177e4SLinus Torvalds {
24731da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24741da177e4SLinus Torvalds 	struct sk_buff *skb;
24751da177e4SLinus Torvalds 
24761da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
24771da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
24781da177e4SLinus Torvalds 	if (skb == NULL)
24791da177e4SLinus Torvalds 		return -1;
24801da177e4SLinus Torvalds 
24811da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
24821da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
24831da177e4SLinus Torvalds 	skb->csum = 0;
24841da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
24851da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = urgent;
24867967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
24877967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
24887967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
24891da177e4SLinus Torvalds 
24901da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
24911da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
24921da177e4SLinus Torvalds 	 * send it.
24931da177e4SLinus Torvalds 	 */
24941da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
24951da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
24961da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2497dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
24981da177e4SLinus Torvalds }
24991da177e4SLinus Torvalds 
25001da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
25011da177e4SLinus Torvalds {
25021da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
25031da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
25041da177e4SLinus Torvalds 		struct sk_buff *skb;
25051da177e4SLinus Torvalds 
2506fe067e8aSDavid S. Miller 		if ((skb = tcp_send_head(sk)) != NULL &&
25071da177e4SLinus Torvalds 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
25081da177e4SLinus Torvalds 			int err;
25091da177e4SLinus Torvalds 			unsigned int mss = tcp_current_mss(sk, 0);
25101da177e4SLinus Torvalds 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
25111da177e4SLinus Torvalds 
25121da177e4SLinus Torvalds 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
25131da177e4SLinus Torvalds 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds 			/* We are probing the opening of a window
25161da177e4SLinus Torvalds 			 * but the window size is != 0
25171da177e4SLinus Torvalds 			 * must have been a result SWS avoidance ( sender )
25181da177e4SLinus Torvalds 			 */
25191da177e4SLinus Torvalds 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
25201da177e4SLinus Torvalds 			    skb->len > mss) {
25211da177e4SLinus Torvalds 				seg_size = min(seg_size, mss);
25221da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2523846998aeSDavid S. Miller 				if (tcp_fragment(sk, skb, seg_size, mss))
25241da177e4SLinus Torvalds 					return -1;
25251da177e4SLinus Torvalds 			} else if (!tcp_skb_pcount(skb))
2526846998aeSDavid S. Miller 				tcp_set_skb_tso_segs(sk, skb, mss);
25271da177e4SLinus Torvalds 
25281da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
25291da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2530dfb4b9dcSDavid S. Miller 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
25311da177e4SLinus Torvalds 			if (!err) {
25329e412ba7SIlpo Järvinen 				update_send_head(sk, skb);
25331da177e4SLinus Torvalds 			}
25341da177e4SLinus Torvalds 			return err;
25351da177e4SLinus Torvalds 		} else {
25361da177e4SLinus Torvalds 			if (tp->urg_mode &&
25371da177e4SLinus Torvalds 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
25381da177e4SLinus Torvalds 				tcp_xmit_probe_skb(sk, TCPCB_URG);
25391da177e4SLinus Torvalds 			return tcp_xmit_probe_skb(sk, 0);
25401da177e4SLinus Torvalds 		}
25411da177e4SLinus Torvalds 	}
25421da177e4SLinus Torvalds 	return -1;
25431da177e4SLinus Torvalds }
25441da177e4SLinus Torvalds 
25451da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
25461da177e4SLinus Torvalds  * a partial packet else a zero probe.
25471da177e4SLinus Torvalds  */
25481da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
25491da177e4SLinus Torvalds {
2550463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
25511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25521da177e4SLinus Torvalds 	int err;
25531da177e4SLinus Torvalds 
25541da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
25551da177e4SLinus Torvalds 
2556fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
25571da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
25586687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2559463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
25601da177e4SLinus Torvalds 		return;
25611da177e4SLinus Torvalds 	}
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds 	if (err <= 0) {
2564463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2565463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
25666687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2567463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
25683f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
25693f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25701da177e4SLinus Torvalds 	} else {
25711da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
25726687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
25731da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
25741da177e4SLinus Torvalds 		 *
25751da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
25761da177e4SLinus Torvalds 		 */
25776687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
25786687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2579463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2580463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
25813f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
25823f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25831da177e4SLinus Torvalds 	}
25841da177e4SLinus Torvalds }
25851da177e4SLinus Torvalds 
25861da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
25871da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
25881da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
25891da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
2590f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
25915d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init);
2592