xref: /linux/net/ipv4/tcp_output.c (revision 2de979bd7da9c8b39cc0aabb0ab5aa1516d929eb)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
251da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
261da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
271da177e4SLinus Torvalds  *				:	AF independence
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
301da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
311da177e4SLinus Torvalds  *					during syn/ack processing.
321da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
331da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
341da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
351da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
421da177e4SLinus Torvalds #include <linux/module.h>
431da177e4SLinus Torvalds #include <linux/smp_lock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to  work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
531da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
541da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
551da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
561da177e4SLinus Torvalds  */
57ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
581da177e4SLinus Torvalds 
59ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
60ab32ea5dSBrian Haley int sysctl_tcp_base_mss __read_mostly = 512;
615d424d5aSJohn Heffner 
6235089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
63ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6435089bb2SDavid S. Miller 
6540efc6faSStephen Hemminger static void update_send_head(struct sock *sk, struct tcp_sock *tp,
661da177e4SLinus Torvalds 			     struct sk_buff *skb)
671da177e4SLinus Torvalds {
68fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
691da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
701da177e4SLinus Torvalds 	tcp_packets_out_inc(sk, tp, skb);
711da177e4SLinus Torvalds }
721da177e4SLinus Torvalds 
731da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
741da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
751da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
761da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
771da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
781da177e4SLinus Torvalds  */
791da177e4SLinus Torvalds static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
801da177e4SLinus Torvalds {
811da177e4SLinus Torvalds 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
821da177e4SLinus Torvalds 		return tp->snd_nxt;
831da177e4SLinus Torvalds 	else
841da177e4SLinus Torvalds 		return tp->snd_una+tp->snd_wnd;
851da177e4SLinus Torvalds }
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
881da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
891da177e4SLinus Torvalds  *
901da177e4SLinus Torvalds  * 1. It is independent of path mtu.
911da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
921da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
931da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
941da177e4SLinus Torvalds  *    large MSS.
951da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
961da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
971da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
981da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
991da177e4SLinus Torvalds  *    probably even Jumbo".
1001da177e4SLinus Torvalds  */
1011da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1021da177e4SLinus Torvalds {
1031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1041da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1051da177e4SLinus Torvalds 	int mss = tp->advmss;
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
1081da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1091da177e4SLinus Torvalds 		tp->advmss = mss;
1101da177e4SLinus Torvalds 	}
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds 	return (__u16)mss;
1131da177e4SLinus Torvalds }
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1161da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
117463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1181da177e4SLinus Torvalds {
119463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1201da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1211da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1221da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1231da177e4SLinus Torvalds 
1246687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1251da177e4SLinus Torvalds 
1266687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1271da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1281da177e4SLinus Torvalds 
129463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1301da177e4SLinus Torvalds 		cwnd >>= 1;
1311da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1321da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1331da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1341da177e4SLinus Torvalds }
1351da177e4SLinus Torvalds 
13640efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
1371da177e4SLinus Torvalds 				struct sk_buff *skb, struct sock *sk)
1381da177e4SLinus Torvalds {
139463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
140463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1411da177e4SLinus Torvalds 
14235089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
14335089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
144463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1451da177e4SLinus Torvalds 
1461da177e4SLinus Torvalds 	tp->lsndtime = now;
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1491da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1501da177e4SLinus Torvalds 	 */
151463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
152463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1531da177e4SLinus Torvalds }
1541da177e4SLinus Torvalds 
15540efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1561da177e4SLinus Torvalds {
157463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
158463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1621da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1631da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1641da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1651da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1661da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1691da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
1701da177e4SLinus Torvalds 			       int wscale_ok, __u8 *rcv_wscale)
1711da177e4SLinus Torvalds {
1721da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1751da177e4SLinus Torvalds 	if (*window_clamp == 0)
1761da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1771da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1801da177e4SLinus Torvalds 	if (space > mss)
1811da177e4SLinus Torvalds 		space = (space / mss) * mss;
1821da177e4SLinus Torvalds 
1831da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
18415d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
18515d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
18615d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
18715d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
18815d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
18915d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
1901da177e4SLinus Torvalds 	 */
19115d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
1921da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
19315d99e02SRick Jones 	else
19415d99e02SRick Jones 		(*rcv_wnd) = space;
19515d99e02SRick Jones 
1961da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
1971da177e4SLinus Torvalds 	if (wscale_ok) {
1981da177e4SLinus Torvalds 		/* Set window scaling on max possible window
1991da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2001da177e4SLinus Torvalds 		 */
2011da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
202316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2031da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2041da177e4SLinus Torvalds 			space >>= 1;
2051da177e4SLinus Torvalds 			(*rcv_wscale)++;
2061da177e4SLinus Torvalds 		}
2071da177e4SLinus Torvalds 	}
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
2106b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
2111da177e4SLinus Torvalds 	 * will be satisfied with 2.
2121da177e4SLinus Torvalds 	 */
2131da177e4SLinus Torvalds 	if (mss > (1<<*rcv_wscale)) {
21401ff367eSDavid S. Miller 		int init_cwnd = 4;
21501ff367eSDavid S. Miller 		if (mss > 1460*3)
2161da177e4SLinus Torvalds 			init_cwnd = 2;
21701ff367eSDavid S. Miller 		else if (mss > 1460)
21801ff367eSDavid S. Miller 			init_cwnd = 3;
2191da177e4SLinus Torvalds 		if (*rcv_wnd > init_cwnd*mss)
2201da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd*mss;
2211da177e4SLinus Torvalds 	}
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2241da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2251da177e4SLinus Torvalds }
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2281da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2291da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2301da177e4SLinus Torvalds  * frame.
2311da177e4SLinus Torvalds  */
23240efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2331da177e4SLinus Torvalds {
2341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2351da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2361da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds 	/* Never shrink the offered window */
2391da177e4SLinus Torvalds 	if (new_win < cur_win) {
2401da177e4SLinus Torvalds 		/* Danger Will Robinson!
2411da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2421da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2431da177e4SLinus Torvalds 		 * window in time.  --DaveM
2441da177e4SLinus Torvalds 		 *
2451da177e4SLinus Torvalds 		 * Relax Will Robinson.
2461da177e4SLinus Torvalds 		 */
2471da177e4SLinus Torvalds 		new_win = cur_win;
2481da177e4SLinus Torvalds 	}
2491da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2501da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2531da177e4SLinus Torvalds 	 * scaled window.
2541da177e4SLinus Torvalds 	 */
25515d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2561da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2571da177e4SLinus Torvalds 	else
2581da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2591da177e4SLinus Torvalds 
2601da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2611da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2641da177e4SLinus Torvalds 	if (new_win == 0)
2651da177e4SLinus Torvalds 		tp->pred_flags = 0;
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds 	return new_win;
2681da177e4SLinus Torvalds }
2691da177e4SLinus Torvalds 
270df7a3b07SAl Viro static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
271cfb6eeb4SYOSHIFUJI Hideaki 					 __u32 tstamp, __u8 **md5_hash)
27240efc6faSStephen Hemminger {
27340efc6faSStephen Hemminger 	if (tp->rx_opt.tstamp_ok) {
274496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
27540efc6faSStephen Hemminger 			       (TCPOPT_NOP << 16) |
27640efc6faSStephen Hemminger 			       (TCPOPT_TIMESTAMP << 8) |
27740efc6faSStephen Hemminger 			       TCPOLEN_TIMESTAMP);
27840efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);
27940efc6faSStephen Hemminger 		*ptr++ = htonl(tp->rx_opt.ts_recent);
28040efc6faSStephen Hemminger 	}
28140efc6faSStephen Hemminger 	if (tp->rx_opt.eff_sacks) {
28240efc6faSStephen Hemminger 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
28340efc6faSStephen Hemminger 		int this_sack;
28440efc6faSStephen Hemminger 
28540efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
28640efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
28740efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
28840efc6faSStephen Hemminger 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
28940efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
290*2de979bdSStephen Hemminger 
29140efc6faSStephen Hemminger 		for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
29240efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
29340efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
29440efc6faSStephen Hemminger 		}
295*2de979bdSStephen Hemminger 
29640efc6faSStephen Hemminger 		if (tp->rx_opt.dsack) {
29740efc6faSStephen Hemminger 			tp->rx_opt.dsack = 0;
29840efc6faSStephen Hemminger 			tp->rx_opt.eff_sacks--;
29940efc6faSStephen Hemminger 		}
30040efc6faSStephen Hemminger 	}
301cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
302cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
303cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
304cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
305cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
306cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
307cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *)ptr;
308cfb6eeb4SYOSHIFUJI Hideaki 	}
309cfb6eeb4SYOSHIFUJI Hideaki #endif
31040efc6faSStephen Hemminger }
31140efc6faSStephen Hemminger 
31240efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet.
31340efc6faSStephen Hemminger  * If this is every changed make sure to change the definition of
31440efc6faSStephen Hemminger  * MAX_SYN_SIZE to match the new maximum number of options that you
31540efc6faSStephen Hemminger  * can generate.
316cfb6eeb4SYOSHIFUJI Hideaki  *
317cfb6eeb4SYOSHIFUJI Hideaki  * Note - that with the RFC2385 TCP option, we make room for the
318cfb6eeb4SYOSHIFUJI Hideaki  * 16 byte MD5 hash. This will be filled in later, so the pointer for the
319cfb6eeb4SYOSHIFUJI Hideaki  * location to be filled is passed back up.
32040efc6faSStephen Hemminger  */
321df7a3b07SAl Viro static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
32240efc6faSStephen Hemminger 				  int offer_wscale, int wscale, __u32 tstamp,
323cfb6eeb4SYOSHIFUJI Hideaki 				  __u32 ts_recent, __u8 **md5_hash)
32440efc6faSStephen Hemminger {
32540efc6faSStephen Hemminger 	/* We always get an MSS option.
32640efc6faSStephen Hemminger 	 * The option bytes which will be seen in normal data
32740efc6faSStephen Hemminger 	 * packets should timestamps be used, must be in the MSS
32840efc6faSStephen Hemminger 	 * advertised.  But we subtract them from tp->mss_cache so
32940efc6faSStephen Hemminger 	 * that calculations in tcp_sendmsg are simpler etc.
33040efc6faSStephen Hemminger 	 * So account for this fact here if necessary.  If we
33140efc6faSStephen Hemminger 	 * don't do this correctly, as a receiver we won't
33240efc6faSStephen Hemminger 	 * recognize data packets as being full sized when we
33340efc6faSStephen Hemminger 	 * should, and thus we won't abide by the delayed ACK
33440efc6faSStephen Hemminger 	 * rules correctly.
33540efc6faSStephen Hemminger 	 * SACKs don't matter, we never delay an ACK when we
33640efc6faSStephen Hemminger 	 * have any of those going out.
33740efc6faSStephen Hemminger 	 */
33840efc6faSStephen Hemminger 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
33940efc6faSStephen Hemminger 	if (ts) {
34040efc6faSStephen Hemminger 		if (sack)
341496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
342496c98dfSYOSHIFUJI Hideaki 				       (TCPOLEN_SACK_PERM << 16) |
343496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
344496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
34540efc6faSStephen Hemminger 		else
346496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
347496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_NOP << 16) |
348496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
349496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
35040efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);		/* TSVAL */
35140efc6faSStephen Hemminger 		*ptr++ = htonl(ts_recent);	/* TSECR */
35240efc6faSStephen Hemminger 	} else if (sack)
353496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
354496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
355496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_SACK_PERM << 8) |
356496c98dfSYOSHIFUJI Hideaki 			       TCPOLEN_SACK_PERM);
35740efc6faSStephen Hemminger 	if (offer_wscale)
358496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
359496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_WINDOW << 16) |
360496c98dfSYOSHIFUJI Hideaki 			       (TCPOLEN_WINDOW << 8) |
361496c98dfSYOSHIFUJI Hideaki 			       (wscale));
362cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
363cfb6eeb4SYOSHIFUJI Hideaki 	/*
364cfb6eeb4SYOSHIFUJI Hideaki 	 * If MD5 is enabled, then we set the option, and include the size
365cfb6eeb4SYOSHIFUJI Hideaki 	 * (always 18). The actual MD5 hash is added just before the
366cfb6eeb4SYOSHIFUJI Hideaki 	 * packet is sent.
367cfb6eeb4SYOSHIFUJI Hideaki 	 */
368cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
369cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
370cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
371cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
372cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
373cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *) ptr;
374cfb6eeb4SYOSHIFUJI Hideaki 	}
375cfb6eeb4SYOSHIFUJI Hideaki #endif
37640efc6faSStephen Hemminger }
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
3791da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
3801da177e4SLinus Torvalds  * transmission and possible later retransmissions.
3811da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
3821da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
3831da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
3841da177e4SLinus Torvalds  * device.
3851da177e4SLinus Torvalds  *
3861da177e4SLinus Torvalds  * We are working here with either a clone of the original
3871da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
3881da177e4SLinus Torvalds  */
389dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
3901da177e4SLinus Torvalds {
3916687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
392dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
393dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
394dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
395dfb4b9dcSDavid S. Miller 	int tcp_header_size;
396cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
397cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
398cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
399cfb6eeb4SYOSHIFUJI Hideaki #endif
4001da177e4SLinus Torvalds 	struct tcphdr *th;
4011da177e4SLinus Torvalds 	int sysctl_flags;
4021da177e4SLinus Torvalds 	int err;
4031da177e4SLinus Torvalds 
404dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
405dfb4b9dcSDavid S. Miller 
406dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
407dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
408dfb4b9dcSDavid S. Miller 	 */
409dfb4b9dcSDavid S. Miller 	if (icsk->icsk_ca_ops->rtt_sample)
410dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
411dfb4b9dcSDavid S. Miller 
412dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
413dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
414dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
415dfb4b9dcSDavid S. Miller 		else
416dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
417dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
418dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
419dfb4b9dcSDavid S. Miller 	}
420dfb4b9dcSDavid S. Miller 
421dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
422dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
423dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
424dfb4b9dcSDavid S. Miller 	tcp_header_size = tp->tcp_header_len;
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS	0x1
4271da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE	0x2
4281da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK	0x4
4291da177e4SLinus Torvalds 
4301da177e4SLinus Torvalds 	sysctl_flags = 0;
431dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
4321da177e4SLinus Torvalds 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
4331da177e4SLinus Torvalds 		if (sysctl_tcp_timestamps) {
4341da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
4351da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
4361da177e4SLinus Torvalds 		}
4371da177e4SLinus Torvalds 		if (sysctl_tcp_window_scaling) {
4381da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
4391da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
4401da177e4SLinus Torvalds 		}
4411da177e4SLinus Torvalds 		if (sysctl_tcp_sack) {
4421da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_SACK;
4431da177e4SLinus Torvalds 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
4441da177e4SLinus Torvalds 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
4451da177e4SLinus Torvalds 		}
446dfb4b9dcSDavid S. Miller 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
4471da177e4SLinus Torvalds 		/* A SACK is 2 pad bytes, a 2 byte header, plus
4481da177e4SLinus Torvalds 		 * 2 32-bit sequence numbers for each SACK block.
4491da177e4SLinus Torvalds 		 */
4501da177e4SLinus Torvalds 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
451dfb4b9dcSDavid S. Miller 				    (tp->rx_opt.eff_sacks *
452dfb4b9dcSDavid S. Miller 				     TCPOLEN_SACK_PERBLOCK));
4531da177e4SLinus Torvalds 	}
4541da177e4SLinus Torvalds 
455317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
4566687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
4571da177e4SLinus Torvalds 
458cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
459cfb6eeb4SYOSHIFUJI Hideaki 	/*
460cfb6eeb4SYOSHIFUJI Hideaki 	 * Are we doing MD5 on this segment? If so - make
461cfb6eeb4SYOSHIFUJI Hideaki 	 * room for it.
462cfb6eeb4SYOSHIFUJI Hideaki 	 */
463cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tp->af_specific->md5_lookup(sk, sk);
464cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
465cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
466cfb6eeb4SYOSHIFUJI Hideaki #endif
467cfb6eeb4SYOSHIFUJI Hideaki 
4681da177e4SLinus Torvalds 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
4691da177e4SLinus Torvalds 	skb->h.th = th;
470e89862f4SDavid S. Miller 	skb_set_owner_w(skb, sk);
4711da177e4SLinus Torvalds 
4721da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
4731da177e4SLinus Torvalds 	th->source		= inet->sport;
4741da177e4SLinus Torvalds 	th->dest		= inet->dport;
4751da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
4761da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
477df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
478dfb4b9dcSDavid S. Miller 					tcb->flags);
479dfb4b9dcSDavid S. Miller 
480dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
4811da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
4821da177e4SLinus Torvalds 		 * is never scaled.
4831da177e4SLinus Torvalds 		 */
484600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
4851da177e4SLinus Torvalds 	} else {
4861da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
4871da177e4SLinus Torvalds 	}
4881da177e4SLinus Torvalds 	th->check		= 0;
4891da177e4SLinus Torvalds 	th->urg_ptr		= 0;
4901da177e4SLinus Torvalds 
491dfb4b9dcSDavid S. Miller 	if (unlikely(tp->urg_mode &&
492dfb4b9dcSDavid S. Miller 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
4931da177e4SLinus Torvalds 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
4941da177e4SLinus Torvalds 		th->urg			= 1;
4951da177e4SLinus Torvalds 	}
4961da177e4SLinus Torvalds 
497dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
498df7a3b07SAl Viro 		tcp_syn_build_options((__be32 *)(th + 1),
4991da177e4SLinus Torvalds 				      tcp_advertise_mss(sk),
5001da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
5011da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_SACK),
5021da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
5031da177e4SLinus Torvalds 				      tp->rx_opt.rcv_wscale,
5041da177e4SLinus Torvalds 				      tcb->when,
505cfb6eeb4SYOSHIFUJI Hideaki 				      tp->rx_opt.ts_recent,
506cfb6eeb4SYOSHIFUJI Hideaki 
507cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
508cfb6eeb4SYOSHIFUJI Hideaki 				      md5 ? &md5_hash_location :
509cfb6eeb4SYOSHIFUJI Hideaki #endif
510cfb6eeb4SYOSHIFUJI Hideaki 				      NULL);
5111da177e4SLinus Torvalds 	} else {
512df7a3b07SAl Viro 		tcp_build_and_update_options((__be32 *)(th + 1),
513cfb6eeb4SYOSHIFUJI Hideaki 					     tp, tcb->when,
514cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
515cfb6eeb4SYOSHIFUJI Hideaki 					     md5 ? &md5_hash_location :
516cfb6eeb4SYOSHIFUJI Hideaki #endif
517cfb6eeb4SYOSHIFUJI Hideaki 					     NULL);
5181da177e4SLinus Torvalds 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
5191da177e4SLinus Torvalds 	}
520dfb4b9dcSDavid S. Miller 
521cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
522cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
523cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
524cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
525cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
526cfb6eeb4SYOSHIFUJI Hideaki 					       sk, NULL, NULL,
527cfb6eeb4SYOSHIFUJI Hideaki 					       skb->h.th,
528cfb6eeb4SYOSHIFUJI Hideaki 					       sk->sk_protocol,
529cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
530cfb6eeb4SYOSHIFUJI Hideaki 	}
531cfb6eeb4SYOSHIFUJI Hideaki #endif
532cfb6eeb4SYOSHIFUJI Hideaki 
5338292a17aSArnaldo Carvalho de Melo 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
5341da177e4SLinus Torvalds 
535dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
536fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
5371da177e4SLinus Torvalds 
5381da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
5391da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
5401da177e4SLinus Torvalds 
541bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
5421da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_OUTSEGS);
5431da177e4SLinus Torvalds 
544e89862f4SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
54583de47cdSHua Zhong 	if (likely(err <= 0))
5461da177e4SLinus Torvalds 		return err;
5471da177e4SLinus Torvalds 
5483cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
5491da177e4SLinus Torvalds 
550b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
551dfb4b9dcSDavid S. Miller 
5521da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS
5531da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE
5541da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK
5551da177e4SLinus Torvalds }
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds 
5581da177e4SLinus Torvalds /* This routine just queue's the buffer
5591da177e4SLinus Torvalds  *
5601da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
5611da177e4SLinus Torvalds  * otherwise socket can stall.
5621da177e4SLinus Torvalds  */
5631da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
5641da177e4SLinus Torvalds {
5651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
5681da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
5691da177e4SLinus Torvalds 	skb_header_release(skb);
570fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
5711da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
5721da177e4SLinus Torvalds }
5731da177e4SLinus Torvalds 
574846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
575f6302d1dSDavid S. Miller {
576bcd76111SHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk)) {
577f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
578f6302d1dSDavid S. Miller 		 * non-TSO case.
579f6302d1dSDavid S. Miller 		 */
5807967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
5817967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
5827967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
583f6302d1dSDavid S. Miller 	} else {
584f6302d1dSDavid S. Miller 		unsigned int factor;
585f6302d1dSDavid S. Miller 
586846998aeSDavid S. Miller 		factor = skb->len + (mss_now - 1);
587846998aeSDavid S. Miller 		factor /= mss_now;
5887967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = factor;
5897967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
590bcd76111SHerbert Xu 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
5911da177e4SLinus Torvalds 	}
5921da177e4SLinus Torvalds }
5931da177e4SLinus Torvalds 
5941da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
5951da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
5961da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
5971da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
5981da177e4SLinus Torvalds  */
5996475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
6001da177e4SLinus Torvalds {
6011da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6021da177e4SLinus Torvalds 	struct sk_buff *buff;
6036475be16SDavid S. Miller 	int nsize, old_factor;
604b60b49eaSHerbert Xu 	int nlen;
6051da177e4SLinus Torvalds 	u16 flags;
6061da177e4SLinus Torvalds 
607b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
6086a438bbeSStephen Hemminger 
6096a438bbeSStephen Hemminger 	clear_all_retrans_hints(tp);
6101da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
6111da177e4SLinus Torvalds 	if (nsize < 0)
6121da177e4SLinus Torvalds 		nsize = 0;
6131da177e4SLinus Torvalds 
6141da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
6151da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
6161da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6171da177e4SLinus Torvalds 		return -ENOMEM;
6181da177e4SLinus Torvalds 
6191da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
6201da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
6211da177e4SLinus Torvalds 	if (buff == NULL)
6221da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
623ef5cb973SHerbert Xu 
624b60b49eaSHerbert Xu 	sk_charge_skb(sk, buff);
625b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
626b60b49eaSHerbert Xu 	buff->truesize += nlen;
627b60b49eaSHerbert Xu 	skb->truesize -= nlen;
6281da177e4SLinus Torvalds 
6291da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
6301da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
6311da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
6321da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
6331da177e4SLinus Torvalds 
6341da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
6351da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
6361da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
6371da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
638e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
6391da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
6401da177e4SLinus Torvalds 
64184fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
6421da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
6431da177e4SLinus Torvalds 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
6441da177e4SLinus Torvalds 						       nsize, 0);
6451da177e4SLinus Torvalds 
6461da177e4SLinus Torvalds 		skb_trim(skb, len);
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
6491da177e4SLinus Torvalds 	} else {
65084fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
6511da177e4SLinus Torvalds 		skb_split(skb, buff, len);
6521da177e4SLinus Torvalds 	}
6531da177e4SLinus Torvalds 
6541da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
6551da177e4SLinus Torvalds 
6561da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
6571da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
6581da177e4SLinus Torvalds 	 */
6591da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
660a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
6611da177e4SLinus Torvalds 
6626475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
6636475be16SDavid S. Miller 
6641da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
665846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
666846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
6671da177e4SLinus Torvalds 
6686475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
6696475be16SDavid S. Miller 	 * adjust the various packet counters.
6706475be16SDavid S. Miller 	 */
671cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
6726475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
6736475be16SDavid S. Miller 			tcp_skb_pcount(buff);
6741da177e4SLinus Torvalds 
6756475be16SDavid S. Miller 		tp->packets_out -= diff;
676e14c3cafSHerbert Xu 
677e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
678e14c3cafSHerbert Xu 			tp->sacked_out -= diff;
679e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
680e14c3cafSHerbert Xu 			tp->retrans_out -= diff;
681e14c3cafSHerbert Xu 
6826475be16SDavid S. Miller 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
6836475be16SDavid S. Miller 			tp->lost_out -= diff;
6846475be16SDavid S. Miller 			tp->left_out -= diff;
6856475be16SDavid S. Miller 		}
68683ca28beSHerbert Xu 
6876475be16SDavid S. Miller 		if (diff > 0) {
68883ca28beSHerbert Xu 			/* Adjust Reno SACK estimate. */
68983ca28beSHerbert Xu 			if (!tp->rx_opt.sack_ok) {
69083ca28beSHerbert Xu 				tp->sacked_out -= diff;
69183ca28beSHerbert Xu 				if ((int)tp->sacked_out < 0)
69283ca28beSHerbert Xu 					tp->sacked_out = 0;
69383ca28beSHerbert Xu 				tcp_sync_left_out(tp);
69483ca28beSHerbert Xu 			}
69583ca28beSHerbert Xu 
6966475be16SDavid S. Miller 			tp->fackets_out -= diff;
6976475be16SDavid S. Miller 			if ((int)tp->fackets_out < 0)
6986475be16SDavid S. Miller 				tp->fackets_out = 0;
6996475be16SDavid S. Miller 		}
7001da177e4SLinus Torvalds 	}
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
703f44b5271SDavid S. Miller 	skb_header_release(buff);
704fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
7051da177e4SLinus Torvalds 
7061da177e4SLinus Torvalds 	return 0;
7071da177e4SLinus Torvalds }
7081da177e4SLinus Torvalds 
7091da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
7101da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
7111da177e4SLinus Torvalds  * immediately discarded.
7121da177e4SLinus Torvalds  */
713f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
7141da177e4SLinus Torvalds {
7151da177e4SLinus Torvalds 	int i, k, eat;
7161da177e4SLinus Torvalds 
7171da177e4SLinus Torvalds 	eat = len;
7181da177e4SLinus Torvalds 	k = 0;
7191da177e4SLinus Torvalds 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
7201da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
7211da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
7221da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
7231da177e4SLinus Torvalds 		} else {
7241da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
7251da177e4SLinus Torvalds 			if (eat) {
7261da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
7271da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
7281da177e4SLinus Torvalds 				eat = 0;
7291da177e4SLinus Torvalds 			}
7301da177e4SLinus Torvalds 			k++;
7311da177e4SLinus Torvalds 		}
7321da177e4SLinus Torvalds 	}
7331da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
7341da177e4SLinus Torvalds 
7351da177e4SLinus Torvalds 	skb->tail = skb->data;
7361da177e4SLinus Torvalds 	skb->data_len -= len;
7371da177e4SLinus Torvalds 	skb->len = skb->data_len;
7381da177e4SLinus Torvalds }
7391da177e4SLinus Torvalds 
7401da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
7411da177e4SLinus Torvalds {
7421da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
7431da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7441da177e4SLinus Torvalds 		return -ENOMEM;
7451da177e4SLinus Torvalds 
746f2911969SHerbert Xu ~{PmVHI~} 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
747f2911969SHerbert Xu ~{PmVHI~} 	if (unlikely(len < skb_headlen(skb)))
7481da177e4SLinus Torvalds 		__skb_pull(skb, len);
749f2911969SHerbert Xu ~{PmVHI~} 	else
750f2911969SHerbert Xu ~{PmVHI~} 		__pskb_trim_head(skb, len - skb_headlen(skb));
7511da177e4SLinus Torvalds 
7521da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
75384fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds 	skb->truesize	     -= len;
7561da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
7571da177e4SLinus Torvalds 	sk->sk_forward_alloc += len;
7581da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
7611da177e4SLinus Torvalds 	 * factor and mss.
7621da177e4SLinus Torvalds 	 */
7631da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
764846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
7651da177e4SLinus Torvalds 
7661da177e4SLinus Torvalds 	return 0;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
7695d424d5aSJohn Heffner /* Not accounting for SACKs here. */
7705d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu)
7715d424d5aSJohn Heffner {
7725d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
7735d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
7745d424d5aSJohn Heffner 	int mss_now;
7755d424d5aSJohn Heffner 
7765d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
7775d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
7785d424d5aSJohn Heffner 	 */
7795d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
7805d424d5aSJohn Heffner 
7815d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
7825d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
7835d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
7845d424d5aSJohn Heffner 
7855d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
7865d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
7875d424d5aSJohn Heffner 
7885d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
7895d424d5aSJohn Heffner 	if (mss_now < 48)
7905d424d5aSJohn Heffner 		mss_now = 48;
7915d424d5aSJohn Heffner 
7925d424d5aSJohn Heffner 	/* Now subtract TCP options size, not including SACKs */
7935d424d5aSJohn Heffner 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
7945d424d5aSJohn Heffner 
7955d424d5aSJohn Heffner 	return mss_now;
7965d424d5aSJohn Heffner }
7975d424d5aSJohn Heffner 
7985d424d5aSJohn Heffner /* Inverse of above */
7995d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss)
8005d424d5aSJohn Heffner {
8015d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8025d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8035d424d5aSJohn Heffner 	int mtu;
8045d424d5aSJohn Heffner 
8055d424d5aSJohn Heffner 	mtu = mss +
8065d424d5aSJohn Heffner 	      tp->tcp_header_len +
8075d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
8085d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
8095d424d5aSJohn Heffner 
8105d424d5aSJohn Heffner 	return mtu;
8115d424d5aSJohn Heffner }
8125d424d5aSJohn Heffner 
8135d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
8145d424d5aSJohn Heffner {
8155d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8165d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8175d424d5aSJohn Heffner 
8185d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
8195d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
8205d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
8215d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
8225d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
8235d424d5aSJohn Heffner }
8245d424d5aSJohn Heffner 
8251da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
8261da177e4SLinus Torvalds 
8271da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
8281da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
8291da177e4SLinus Torvalds 
8301da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
831caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
8321da177e4SLinus Torvalds    It also does not include TCP options.
8331da177e4SLinus Torvalds 
834d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
8351da177e4SLinus Torvalds 
8361da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
8371da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
8381da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
8391da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
8421da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
8431da177e4SLinus Torvalds 
844d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
845d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
8461da177e4SLinus Torvalds  */
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
8491da177e4SLinus Torvalds {
8501da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
851d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
8525d424d5aSJohn Heffner 	int mss_now;
8531da177e4SLinus Torvalds 
8545d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
8555d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
8561da177e4SLinus Torvalds 
8575d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
8581da177e4SLinus Torvalds 
8591da177e4SLinus Torvalds 	/* Bound mss with half of window */
8601da177e4SLinus Torvalds 	if (tp->max_window && mss_now > (tp->max_window>>1))
8611da177e4SLinus Torvalds 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds 	/* And store cached results */
864d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
8655d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
8665d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
867c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
8681da177e4SLinus Torvalds 
8691da177e4SLinus Torvalds 	return mss_now;
8701da177e4SLinus Torvalds }
8711da177e4SLinus Torvalds 
8721da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
8731da177e4SLinus Torvalds  * and even PMTU discovery events into account.
8741da177e4SLinus Torvalds  *
8751da177e4SLinus Torvalds  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
8761da177e4SLinus Torvalds  * cannot be large. However, taking into account rare use of URG, this
8771da177e4SLinus Torvalds  * is not a big flaw.
8781da177e4SLinus Torvalds  */
879c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
8801da177e4SLinus Torvalds {
8811da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
8821da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
883c1b4a7e6SDavid S. Miller 	u32 mss_now;
884c1b4a7e6SDavid S. Miller 	u16 xmit_size_goal;
885c1b4a7e6SDavid S. Miller 	int doing_tso = 0;
8861da177e4SLinus Torvalds 
887c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
888c1b4a7e6SDavid S. Miller 
889bcd76111SHerbert Xu 	if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
890c1b4a7e6SDavid S. Miller 		doing_tso = 1;
891c1b4a7e6SDavid S. Miller 
8921da177e4SLinus Torvalds 	if (dst) {
8931da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
894d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
8951da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
8961da177e4SLinus Torvalds 	}
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds 	if (tp->rx_opt.eff_sacks)
8991da177e4SLinus Torvalds 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
9001da177e4SLinus Torvalds 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
901c1b4a7e6SDavid S. Miller 
902cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
903cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk))
904cfb6eeb4SYOSHIFUJI Hideaki 		mss_now -= TCPOLEN_MD5SIG_ALIGNED;
905cfb6eeb4SYOSHIFUJI Hideaki #endif
906cfb6eeb4SYOSHIFUJI Hideaki 
907c1b4a7e6SDavid S. Miller 	xmit_size_goal = mss_now;
908c1b4a7e6SDavid S. Miller 
909c1b4a7e6SDavid S. Miller 	if (doing_tso) {
9108292a17aSArnaldo Carvalho de Melo 		xmit_size_goal = (65535 -
9118292a17aSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_af_ops->net_header_len -
912d83d8461SArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_ext_hdr_len -
913d83d8461SArnaldo Carvalho de Melo 				  tp->tcp_header_len);
914c1b4a7e6SDavid S. Miller 
915c1b4a7e6SDavid S. Miller 		if (tp->max_window &&
916c1b4a7e6SDavid S. Miller 		    (xmit_size_goal > (tp->max_window >> 1)))
917c1b4a7e6SDavid S. Miller 			xmit_size_goal = max((tp->max_window >> 1),
918c1b4a7e6SDavid S. Miller 					     68U - tp->tcp_header_len);
919c1b4a7e6SDavid S. Miller 
920c1b4a7e6SDavid S. Miller 		xmit_size_goal -= (xmit_size_goal % mss_now);
921c1b4a7e6SDavid S. Miller 	}
922c1b4a7e6SDavid S. Miller 	tp->xmit_size_goal = xmit_size_goal;
923c1b4a7e6SDavid S. Miller 
9241da177e4SLinus Torvalds 	return mss_now;
9251da177e4SLinus Torvalds }
9261da177e4SLinus Torvalds 
927a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
928a762a980SDavid S. Miller 
92940efc6faSStephen Hemminger static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
930a762a980SDavid S. Miller {
931a762a980SDavid S. Miller 	__u32 packets_out = tp->packets_out;
932a762a980SDavid S. Miller 
933a762a980SDavid S. Miller 	if (packets_out >= tp->snd_cwnd) {
934a762a980SDavid S. Miller 		/* Network is feed fully. */
935a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
936a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
937a762a980SDavid S. Miller 	} else {
938a762a980SDavid S. Miller 		/* Network starves. */
939a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
940a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
941a762a980SDavid S. Miller 
94215d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
94315d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
944a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
945a762a980SDavid S. Miller 	}
946a762a980SDavid S. Miller }
947a762a980SDavid S. Miller 
948c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
949c1b4a7e6SDavid S. Miller {
950c1b4a7e6SDavid S. Miller 	u32 window, cwnd_len;
951c1b4a7e6SDavid S. Miller 
952c1b4a7e6SDavid S. Miller 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
953c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
954c1b4a7e6SDavid S. Miller 	return min(window, cwnd_len);
955c1b4a7e6SDavid S. Miller }
956c1b4a7e6SDavid S. Miller 
957c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
958c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
959c1b4a7e6SDavid S. Miller  */
960c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
961c1b4a7e6SDavid S. Miller {
962c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
963c1b4a7e6SDavid S. Miller 
964c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
965104439a8SJohn Heffner 	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
966104439a8SJohn Heffner 	    tcp_skb_pcount(skb) == 1)
967c1b4a7e6SDavid S. Miller 		return 1;
968c1b4a7e6SDavid S. Miller 
969c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
970c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
971c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
972c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
973c1b4a7e6SDavid S. Miller 
974c1b4a7e6SDavid S. Miller 	return 0;
975c1b4a7e6SDavid S. Miller }
976c1b4a7e6SDavid S. Miller 
977c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting
978c1b4a7e6SDavid S. Miller  * SKB onto the wire.
979c1b4a7e6SDavid S. Miller  */
98040efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
981c1b4a7e6SDavid S. Miller {
982c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
983c1b4a7e6SDavid S. Miller 
984846998aeSDavid S. Miller 	if (!tso_segs ||
985846998aeSDavid S. Miller 	    (tso_segs > 1 &&
9867967168cSHerbert Xu 	     tcp_skb_mss(skb) != mss_now)) {
987846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
988c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
989c1b4a7e6SDavid S. Miller 	}
990c1b4a7e6SDavid S. Miller 	return tso_segs;
991c1b4a7e6SDavid S. Miller }
992c1b4a7e6SDavid S. Miller 
993c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
994c1b4a7e6SDavid S. Miller {
995c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml,tp->snd_una) &&
996c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
997c1b4a7e6SDavid S. Miller }
998c1b4a7e6SDavid S. Miller 
999c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
1000c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1001c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
1002c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
1003c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1004c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1005c1b4a7e6SDavid S. Miller  */
1006c1b4a7e6SDavid S. Miller 
1007c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
1008c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
1009c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
1010c1b4a7e6SDavid S. Miller {
1011c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
1012c1b4a7e6SDavid S. Miller 		((nonagle&TCP_NAGLE_CORK) ||
1013c1b4a7e6SDavid S. Miller 		 (!nonagle &&
1014c1b4a7e6SDavid S. Miller 		  tp->packets_out &&
1015c1b4a7e6SDavid S. Miller 		  tcp_minshall_check(tp))));
1016c1b4a7e6SDavid S. Miller }
1017c1b4a7e6SDavid S. Miller 
1018c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
1019c1b4a7e6SDavid S. Miller  * sent now.
1020c1b4a7e6SDavid S. Miller  */
1021c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1022c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1023c1b4a7e6SDavid S. Miller {
1024c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1025c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1026c1b4a7e6SDavid S. Miller 	 *
1027c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1028c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1029c1b4a7e6SDavid S. Miller 	 */
1030c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1031c1b4a7e6SDavid S. Miller 		return 1;
1032c1b4a7e6SDavid S. Miller 
1033c1b4a7e6SDavid S. Miller 	/* Don't use the nagle rule for urgent data (or for the final FIN).  */
1034c1b4a7e6SDavid S. Miller 	if (tp->urg_mode ||
1035c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1036c1b4a7e6SDavid S. Miller 		return 1;
1037c1b4a7e6SDavid S. Miller 
1038c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1039c1b4a7e6SDavid S. Miller 		return 1;
1040c1b4a7e6SDavid S. Miller 
1041c1b4a7e6SDavid S. Miller 	return 0;
1042c1b4a7e6SDavid S. Miller }
1043c1b4a7e6SDavid S. Miller 
1044c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1045c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1046c1b4a7e6SDavid S. Miller {
1047c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1048c1b4a7e6SDavid S. Miller 
1049c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1050c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1051c1b4a7e6SDavid S. Miller 
1052c1b4a7e6SDavid S. Miller 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
1053c1b4a7e6SDavid S. Miller }
1054c1b4a7e6SDavid S. Miller 
1055fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1056c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1057c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1058c1b4a7e6SDavid S. Miller  */
1059c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1060c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1061c1b4a7e6SDavid S. Miller {
1062c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1063c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1064c1b4a7e6SDavid S. Miller 
1065846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1066c1b4a7e6SDavid S. Miller 
1067c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1068c1b4a7e6SDavid S. Miller 		return 0;
1069c1b4a7e6SDavid S. Miller 
1070c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1071c1b4a7e6SDavid S. Miller 	if (cwnd_quota &&
1072c1b4a7e6SDavid S. Miller 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
1073c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1074c1b4a7e6SDavid S. Miller 
1075c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1076c1b4a7e6SDavid S. Miller }
1077c1b4a7e6SDavid S. Miller 
1078c1b4a7e6SDavid S. Miller int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1079c1b4a7e6SDavid S. Miller {
1080fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1081c1b4a7e6SDavid S. Miller 
1082c1b4a7e6SDavid S. Miller 	return (skb &&
1083c1b4a7e6SDavid S. Miller 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1084c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1085c1b4a7e6SDavid S. Miller 			      TCP_NAGLE_PUSH :
1086c1b4a7e6SDavid S. Miller 			      tp->nonagle)));
1087c1b4a7e6SDavid S. Miller }
1088c1b4a7e6SDavid S. Miller 
1089c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1090c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1091c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1092c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1093c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1094c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1095c1b4a7e6SDavid S. Miller  */
1096846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1097c1b4a7e6SDavid S. Miller {
1098c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1099c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
1100c1b4a7e6SDavid S. Miller 	u16 flags;
1101c1b4a7e6SDavid S. Miller 
1102c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1103c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1104c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1105c1b4a7e6SDavid S. Miller 
1106c1b4a7e6SDavid S. Miller 	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1107c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1108c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1109c1b4a7e6SDavid S. Miller 
1110b60b49eaSHerbert Xu 	sk_charge_skb(sk, buff);
1111b60b49eaSHerbert Xu 	buff->truesize += nlen;
1112c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1113c1b4a7e6SDavid S. Miller 
1114c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1115c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1116c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1117c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1118c1b4a7e6SDavid S. Miller 
1119c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
1120c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
1121c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1122c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
1123c1b4a7e6SDavid S. Miller 
1124c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1125c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1126c1b4a7e6SDavid S. Miller 
112784fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1128c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1129c1b4a7e6SDavid S. Miller 
1130c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1131846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1132846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1133c1b4a7e6SDavid S. Miller 
1134c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1135c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1136fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1137c1b4a7e6SDavid S. Miller 
1138c1b4a7e6SDavid S. Miller 	return 0;
1139c1b4a7e6SDavid S. Miller }
1140c1b4a7e6SDavid S. Miller 
1141c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1142c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1143c1b4a7e6SDavid S. Miller  *
1144c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1145c1b4a7e6SDavid S. Miller  */
1146c1b4a7e6SDavid S. Miller static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1147c1b4a7e6SDavid S. Miller {
11486687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1149c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1150c1b4a7e6SDavid S. Miller 
1151c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1152ae8064acSJohn Heffner 		goto send_now;
1153c1b4a7e6SDavid S. Miller 
11546687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1155ae8064acSJohn Heffner 		goto send_now;
1156ae8064acSJohn Heffner 
1157ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1158ae8064acSJohn Heffner 	if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1159ae8064acSJohn Heffner 		goto send_now;
1160908a75c1SDavid S. Miller 
1161c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1162c1b4a7e6SDavid S. Miller 
1163c1b4a7e6SDavid S. Miller 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1164c1b4a7e6SDavid S. Miller 	       (tp->snd_cwnd <= in_flight));
1165c1b4a7e6SDavid S. Miller 
1166c1b4a7e6SDavid S. Miller 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1167c1b4a7e6SDavid S. Miller 
1168c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1169c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1170c1b4a7e6SDavid S. Miller 
1171c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1172c1b4a7e6SDavid S. Miller 
1173ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1174ba244fe9SDavid S. Miller 	if (limit >= 65536)
1175ae8064acSJohn Heffner 		goto send_now;
1176ba244fe9SDavid S. Miller 
1177c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
1178c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1179c1b4a7e6SDavid S. Miller 
1180c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1181c1b4a7e6SDavid S. Miller 		 * just use it.
1182c1b4a7e6SDavid S. Miller 		 */
1183c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
1184c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1185ae8064acSJohn Heffner 			goto send_now;
1186c1b4a7e6SDavid S. Miller 	} else {
1187c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1188c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1189c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1190c1b4a7e6SDavid S. Miller 		 * then send now.
1191c1b4a7e6SDavid S. Miller 		 */
1192c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1193ae8064acSJohn Heffner 			goto send_now;
1194c1b4a7e6SDavid S. Miller 	}
1195c1b4a7e6SDavid S. Miller 
1196c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1197ae8064acSJohn Heffner 	tp->tso_deferred = 1 | (jiffies<<1);
1198ae8064acSJohn Heffner 
1199c1b4a7e6SDavid S. Miller 	return 1;
1200ae8064acSJohn Heffner 
1201ae8064acSJohn Heffner send_now:
1202ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1203ae8064acSJohn Heffner 	return 0;
1204c1b4a7e6SDavid S. Miller }
1205c1b4a7e6SDavid S. Miller 
12065d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
12075d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
12085d424d5aSJohn Heffner  *         1 if a probe was sent,
12095d424d5aSJohn Heffner  *         -1 otherwise */
12105d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
12115d424d5aSJohn Heffner {
12125d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
12135d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
12145d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
12155d424d5aSJohn Heffner 	int len;
12165d424d5aSJohn Heffner 	int probe_size;
12175d424d5aSJohn Heffner 	unsigned int pif;
12185d424d5aSJohn Heffner 	int copy;
12195d424d5aSJohn Heffner 	int mss_now;
12205d424d5aSJohn Heffner 
12215d424d5aSJohn Heffner 	/* Not currently probing/verifying,
12225d424d5aSJohn Heffner 	 * not in recovery,
12235d424d5aSJohn Heffner 	 * have enough cwnd, and
12245d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
12255d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
12265d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
12275d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
12285d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
12295d424d5aSJohn Heffner 	    tp->rx_opt.eff_sacks)
12305d424d5aSJohn Heffner 		return -1;
12315d424d5aSJohn Heffner 
12325d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
12335d424d5aSJohn Heffner 	mss_now = tcp_current_mss(sk, 0);
12345d424d5aSJohn Heffner 	probe_size = 2*tp->mss_cache;
12355d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
12365d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
12375d424d5aSJohn Heffner 		return -1;
12385d424d5aSJohn Heffner 	}
12395d424d5aSJohn Heffner 
12405d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
12415d424d5aSJohn Heffner 	len = 0;
1242fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) == NULL)
12435d424d5aSJohn Heffner 		return -1;
12445d424d5aSJohn Heffner 	while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1245fe067e8aSDavid S. Miller 		skb = tcp_write_queue_next(sk, skb);
12465d424d5aSJohn Heffner 	if (len < probe_size)
12475d424d5aSJohn Heffner 		return -1;
12485d424d5aSJohn Heffner 
12495d424d5aSJohn Heffner 	/* Receive window check. */
12505d424d5aSJohn Heffner 	if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
12515d424d5aSJohn Heffner 		if (tp->snd_wnd < probe_size)
12525d424d5aSJohn Heffner 			return -1;
12535d424d5aSJohn Heffner 		else
12545d424d5aSJohn Heffner 			return 0;
12555d424d5aSJohn Heffner 	}
12565d424d5aSJohn Heffner 
12575d424d5aSJohn Heffner 	/* Do we need to wait to drain cwnd? */
12585d424d5aSJohn Heffner 	pif = tcp_packets_in_flight(tp);
12595d424d5aSJohn Heffner 	if (pif + 2 > tp->snd_cwnd) {
12605d424d5aSJohn Heffner 		/* With no packets in flight, don't stall. */
12615d424d5aSJohn Heffner 		if (pif == 0)
12625d424d5aSJohn Heffner 			return -1;
12635d424d5aSJohn Heffner 		else
12645d424d5aSJohn Heffner 			return 0;
12655d424d5aSJohn Heffner 	}
12665d424d5aSJohn Heffner 
12675d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
12685d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
12695d424d5aSJohn Heffner 		return -1;
12705d424d5aSJohn Heffner 	sk_charge_skb(sk, nskb);
12715d424d5aSJohn Heffner 
1272fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
1273fe067e8aSDavid S. Miller 	tcp_insert_write_queue_before(nskb, skb, sk);
1274fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
12755d424d5aSJohn Heffner 
12765d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
12775d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
12785d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
12795d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
12805d424d5aSJohn Heffner 	nskb->csum = 0;
128184fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
12825d424d5aSJohn Heffner 
12835d424d5aSJohn Heffner 	len = 0;
12845d424d5aSJohn Heffner 	while (len < probe_size) {
1285fe067e8aSDavid S. Miller 		next = tcp_write_queue_next(sk, skb);
12865d424d5aSJohn Heffner 
12875d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
12885d424d5aSJohn Heffner 		if (nskb->ip_summed)
12895d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
12905d424d5aSJohn Heffner 		else
12915d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
12925d424d5aSJohn Heffner 					 skb_put(nskb, copy), copy, nskb->csum);
12935d424d5aSJohn Heffner 
12945d424d5aSJohn Heffner 		if (skb->len <= copy) {
12955d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
12965d424d5aSJohn Heffner 			 * Throw it away. */
12975d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1298fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
12995d424d5aSJohn Heffner 			sk_stream_free_skb(sk, skb);
13005d424d5aSJohn Heffner 		} else {
13015d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
13025d424d5aSJohn Heffner 						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
13035d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
13045d424d5aSJohn Heffner 				skb_pull(skb, copy);
130584fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
13065d424d5aSJohn Heffner 					skb->csum = csum_partial(skb->data, skb->len, 0);
13075d424d5aSJohn Heffner 			} else {
13085d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
13095d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
13105d424d5aSJohn Heffner 			}
13115d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
13125d424d5aSJohn Heffner 		}
13135d424d5aSJohn Heffner 
13145d424d5aSJohn Heffner 		len += copy;
13155d424d5aSJohn Heffner 		skb = next;
13165d424d5aSJohn Heffner 	}
13175d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
13185d424d5aSJohn Heffner 
13195d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
13205d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
13215d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
13225d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
13235d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
13245d424d5aSJohn Heffner 		* effectively two packets. */
13255d424d5aSJohn Heffner 		tp->snd_cwnd--;
13265d424d5aSJohn Heffner 		update_send_head(sk, tp, nskb);
13275d424d5aSJohn Heffner 
13285d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
13290e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
13300e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
13315d424d5aSJohn Heffner 
13325d424d5aSJohn Heffner 		return 1;
13335d424d5aSJohn Heffner 	}
13345d424d5aSJohn Heffner 
13355d424d5aSJohn Heffner 	return -1;
13365d424d5aSJohn Heffner }
13375d424d5aSJohn Heffner 
13385d424d5aSJohn Heffner 
13391da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
13401da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
13411da177e4SLinus Torvalds  * window for us.
13421da177e4SLinus Torvalds  *
13431da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
13441da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
13451da177e4SLinus Torvalds  */
1346a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
13471da177e4SLinus Torvalds {
13481da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
134992df7b51SDavid S. Miller 	struct sk_buff *skb;
1350c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1351c1b4a7e6SDavid S. Miller 	int cwnd_quota;
13525d424d5aSJohn Heffner 	int result;
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds 	/* If we are closed, the bytes will have to remain here.
13551da177e4SLinus Torvalds 	 * In time closedown will finish, we empty the write queue and all
13561da177e4SLinus Torvalds 	 * will be happy.
13571da177e4SLinus Torvalds 	 */
135892df7b51SDavid S. Miller 	if (unlikely(sk->sk_state == TCP_CLOSE))
135992df7b51SDavid S. Miller 		return 0;
136092df7b51SDavid S. Miller 
1361c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
13625d424d5aSJohn Heffner 
13635d424d5aSJohn Heffner 	/* Do MTU probing. */
13645d424d5aSJohn Heffner 	if ((result = tcp_mtu_probe(sk)) == 0) {
13655d424d5aSJohn Heffner 		return 0;
13665d424d5aSJohn Heffner 	} else if (result > 0) {
13675d424d5aSJohn Heffner 		sent_pkts = 1;
13685d424d5aSJohn Heffner 	}
13695d424d5aSJohn Heffner 
1370fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1371c8ac3774SHerbert Xu 		unsigned int limit;
1372c8ac3774SHerbert Xu 
1373b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1374c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1375c1b4a7e6SDavid S. Miller 
1376b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1377b68e9f85SHerbert Xu 		if (!cwnd_quota)
1378b68e9f85SHerbert Xu 			break;
1379b68e9f85SHerbert Xu 
1380b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1381b68e9f85SHerbert Xu 			break;
1382b68e9f85SHerbert Xu 
1383c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1384aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1385aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1386aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1387aa93466bSDavid S. Miller 				break;
1388c1b4a7e6SDavid S. Miller 		} else {
1389c1b4a7e6SDavid S. Miller 			if (tcp_tso_should_defer(sk, tp, skb))
1390aa93466bSDavid S. Miller 				break;
1391c1b4a7e6SDavid S. Miller 		}
1392aa93466bSDavid S. Miller 
1393c8ac3774SHerbert Xu 		limit = mss_now;
1394c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1395c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1396c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1397c1b4a7e6SDavid S. Miller 
1398c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1399c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1400c1b4a7e6SDavid S. Miller 
1401c1b4a7e6SDavid S. Miller 				if (trim)
1402c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1403c1b4a7e6SDavid S. Miller 			}
1404c1b4a7e6SDavid S. Miller 		}
1405c8ac3774SHerbert Xu 
1406c8ac3774SHerbert Xu 		if (skb->len > limit &&
1407c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
14081da177e4SLinus Torvalds 			break;
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1411c1b4a7e6SDavid S. Miller 
1412dfb4b9dcSDavid S. Miller 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
14131da177e4SLinus Torvalds 			break;
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
14161da177e4SLinus Torvalds 		 * This call will increment packets_out.
14171da177e4SLinus Torvalds 		 */
14181da177e4SLinus Torvalds 		update_send_head(sk, tp, skb);
14191da177e4SLinus Torvalds 
14201da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1421aa93466bSDavid S. Miller 		sent_pkts++;
14221da177e4SLinus Torvalds 	}
14231da177e4SLinus Torvalds 
1424aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
14251da177e4SLinus Torvalds 		tcp_cwnd_validate(sk, tp);
14261da177e4SLinus Torvalds 		return 0;
14271da177e4SLinus Torvalds 	}
1428fe067e8aSDavid S. Miller 	return !tp->packets_out && tcp_send_head(sk);
14291da177e4SLinus Torvalds }
14301da177e4SLinus Torvalds 
1431a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1432a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1433a762a980SDavid S. Miller  * The socket must be locked by the caller.
1434a762a980SDavid S. Miller  */
1435a762a980SDavid S. Miller void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1436a2e2a59cSDavid S. Miller 			       unsigned int cur_mss, int nonagle)
1437a762a980SDavid S. Miller {
1438fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1439a762a980SDavid S. Miller 
1440a762a980SDavid S. Miller 	if (skb) {
144155c97f3eSDavid S. Miller 		if (tcp_write_xmit(sk, cur_mss, nonagle))
1442a762a980SDavid S. Miller 			tcp_check_probe_timer(sk, tp);
1443a762a980SDavid S. Miller 	}
1444a762a980SDavid S. Miller }
1445a762a980SDavid S. Miller 
1446c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1447c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1448c1b4a7e6SDavid S. Miller  */
1449c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1450c1b4a7e6SDavid S. Miller {
1451c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1452fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1453c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, cwnd_quota;
1454c1b4a7e6SDavid S. Miller 
1455c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1456c1b4a7e6SDavid S. Miller 
1457846998aeSDavid S. Miller 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1458c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1459c1b4a7e6SDavid S. Miller 
1460c1b4a7e6SDavid S. Miller 	if (likely(cwnd_quota)) {
1461c8ac3774SHerbert Xu 		unsigned int limit;
1462c8ac3774SHerbert Xu 
1463c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1464c1b4a7e6SDavid S. Miller 
1465c8ac3774SHerbert Xu 		limit = mss_now;
1466c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1467c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1468c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1469c1b4a7e6SDavid S. Miller 
1470c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1471c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1472c1b4a7e6SDavid S. Miller 
1473c1b4a7e6SDavid S. Miller 				if (trim)
1474c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1475c1b4a7e6SDavid S. Miller 			}
1476c1b4a7e6SDavid S. Miller 		}
1477c8ac3774SHerbert Xu 
1478c8ac3774SHerbert Xu 		if (skb->len > limit &&
1479c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1480c1b4a7e6SDavid S. Miller 			return;
1481c1b4a7e6SDavid S. Miller 
1482c1b4a7e6SDavid S. Miller 		/* Send it out now. */
1483c1b4a7e6SDavid S. Miller 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1484c1b4a7e6SDavid S. Miller 
1485dfb4b9dcSDavid S. Miller 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1486c1b4a7e6SDavid S. Miller 			update_send_head(sk, tp, skb);
1487c1b4a7e6SDavid S. Miller 			tcp_cwnd_validate(sk, tp);
1488c1b4a7e6SDavid S. Miller 			return;
1489c1b4a7e6SDavid S. Miller 		}
1490c1b4a7e6SDavid S. Miller 	}
1491c1b4a7e6SDavid S. Miller }
1492c1b4a7e6SDavid S. Miller 
14931da177e4SLinus Torvalds /* This function returns the amount that we can raise the
14941da177e4SLinus Torvalds  * usable window based on the following constraints
14951da177e4SLinus Torvalds  *
14961da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
14971da177e4SLinus Torvalds  * 2. We limit memory per socket
14981da177e4SLinus Torvalds  *
14991da177e4SLinus Torvalds  * RFC 1122:
15001da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
15011da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
15021da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
15031da177e4SLinus Torvalds  *
15041da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
15051da177e4SLinus Torvalds  * it at least MSS bytes.
15061da177e4SLinus Torvalds  *
15071da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
15081da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
15091da177e4SLinus Torvalds  *
15101da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
15111da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
15121da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
15131da177e4SLinus Torvalds  * window to always advance by a single byte.
15141da177e4SLinus Torvalds  *
15151da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
15161da177e4SLinus Torvalds  * then this will not be a problem.
15171da177e4SLinus Torvalds  *
15181da177e4SLinus Torvalds  * BSD seems to make the following compromise:
15191da177e4SLinus Torvalds  *
15201da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
15211da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
15221da177e4SLinus Torvalds  *	then set the window to 0.
15231da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
15241da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
15251da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
15261da177e4SLinus Torvalds  *
15271da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
15281da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
15291da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
15301da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
15311da177e4SLinus Torvalds  * because the pipeline is full.
15321da177e4SLinus Torvalds  *
15331da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
15341da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
15351da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
15361da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
15371da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
15381da177e4SLinus Torvalds  *
15391da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
15401da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
15411da177e4SLinus Torvalds  *
15421da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
15431da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
15441da177e4SLinus Torvalds  */
15451da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
15461da177e4SLinus Torvalds {
1547463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15481da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1549caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
15501da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
15511da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
15521da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
15531da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
15541da177e4SLinus Torvalds 	 */
1555463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
15561da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
15571da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
15581da177e4SLinus Torvalds 	int window;
15591da177e4SLinus Torvalds 
15601da177e4SLinus Torvalds 	if (mss > full_space)
15611da177e4SLinus Torvalds 		mss = full_space;
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	if (free_space < full_space/2) {
1564463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 		if (tcp_memory_pressure)
15671da177e4SLinus Torvalds 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 		if (free_space < mss)
15701da177e4SLinus Torvalds 			return 0;
15711da177e4SLinus Torvalds 	}
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
15741da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
15751da177e4SLinus Torvalds 
15761da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
15771da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
15781da177e4SLinus Torvalds 	 */
15791da177e4SLinus Torvalds 	window = tp->rcv_wnd;
15801da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
15811da177e4SLinus Torvalds 		window = free_space;
15821da177e4SLinus Torvalds 
15831da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
15841da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
15851da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
15861da177e4SLinus Torvalds 		 */
15871da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
15881da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
15891da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
15901da177e4SLinus Torvalds 	} else {
15911da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
15921da177e4SLinus Torvalds 		 * Window clamp already applied above.
15931da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
15941da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
15951da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
15961da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
15971da177e4SLinus Torvalds 		 * is too small.
15981da177e4SLinus Torvalds 		 */
15991da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
16001da177e4SLinus Torvalds 			window = (free_space/mss)*mss;
160184565070SJohn Heffner 		else if (mss == full_space &&
160284565070SJohn Heffner 		         free_space > window + full_space/2)
160384565070SJohn Heffner 			window = free_space;
16041da177e4SLinus Torvalds 	}
16051da177e4SLinus Torvalds 
16061da177e4SLinus Torvalds 	return window;
16071da177e4SLinus Torvalds }
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */
16101da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
16111da177e4SLinus Torvalds {
16121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1613fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
16141da177e4SLinus Torvalds 
16151da177e4SLinus Torvalds 	/* The first test we must make is that neither of these two
16161da177e4SLinus Torvalds 	 * SKB's are still referenced by someone else.
16171da177e4SLinus Torvalds 	 */
16181da177e4SLinus Torvalds 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
16191da177e4SLinus Torvalds 		int skb_size = skb->len, next_skb_size = next_skb->len;
16201da177e4SLinus Torvalds 		u16 flags = TCP_SKB_CB(skb)->flags;
16211da177e4SLinus Torvalds 
16221da177e4SLinus Torvalds 		/* Also punt if next skb has been SACK'd. */
16231da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
16241da177e4SLinus Torvalds 			return;
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 		/* Next skb is out of window. */
16271da177e4SLinus Torvalds 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
16281da177e4SLinus Torvalds 			return;
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds 		/* Punt if not enough space exists in the first SKB for
16311da177e4SLinus Torvalds 		 * the data in the second, or the total combined payload
16321da177e4SLinus Torvalds 		 * would exceed the MSS.
16331da177e4SLinus Torvalds 		 */
16341da177e4SLinus Torvalds 		if ((next_skb_size > skb_tailroom(skb)) ||
16351da177e4SLinus Torvalds 		    ((skb_size + next_skb_size) > mss_now))
16361da177e4SLinus Torvalds 			return;
16371da177e4SLinus Torvalds 
16381da177e4SLinus Torvalds 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
16391da177e4SLinus Torvalds 		       tcp_skb_pcount(next_skb) != 1);
16401da177e4SLinus Torvalds 
16416a438bbeSStephen Hemminger 		/* changing transmit queue under us so clear hints */
16426a438bbeSStephen Hemminger 		clear_all_retrans_hints(tp);
16436a438bbeSStephen Hemminger 
16441da177e4SLinus Torvalds 		/* Ok.	We will be able to collapse the packet. */
1645fe067e8aSDavid S. Miller 		tcp_unlink_write_queue(next_skb, sk);
16461da177e4SLinus Torvalds 
16471da177e4SLinus Torvalds 		memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
16481da177e4SLinus Torvalds 
164952d570aaSJarek Poplawski 		if (next_skb->ip_summed == CHECKSUM_PARTIAL)
165052d570aaSJarek Poplawski 			skb->ip_summed = CHECKSUM_PARTIAL;
16511da177e4SLinus Torvalds 
165284fa7933SPatrick McHardy 		if (skb->ip_summed != CHECKSUM_PARTIAL)
16531da177e4SLinus Torvalds 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
16541da177e4SLinus Torvalds 
16551da177e4SLinus Torvalds 		/* Update sequence range on original skb. */
16561da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
16571da177e4SLinus Torvalds 
16581da177e4SLinus Torvalds 		/* Merge over control information. */
16591da177e4SLinus Torvalds 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
16601da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = flags;
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 		/* All done, get rid of second SKB and account for it so
16631da177e4SLinus Torvalds 		 * packet counting does not break.
16641da177e4SLinus Torvalds 		 */
16651da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
16661da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
16671da177e4SLinus Torvalds 			tp->retrans_out -= tcp_skb_pcount(next_skb);
16681da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
16691da177e4SLinus Torvalds 			tp->lost_out -= tcp_skb_pcount(next_skb);
16701da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
16711da177e4SLinus Torvalds 		}
16721da177e4SLinus Torvalds 		/* Reno case is special. Sigh... */
16731da177e4SLinus Torvalds 		if (!tp->rx_opt.sack_ok && tp->sacked_out) {
16741da177e4SLinus Torvalds 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
16751da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
16761da177e4SLinus Torvalds 		}
16771da177e4SLinus Torvalds 
16781da177e4SLinus Torvalds 		/* Not quite right: it can be > snd.fack, but
16791da177e4SLinus Torvalds 		 * it is better to underestimate fackets.
16801da177e4SLinus Torvalds 		 */
16811da177e4SLinus Torvalds 		tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
16821da177e4SLinus Torvalds 		tcp_packets_out_dec(tp, next_skb);
16831da177e4SLinus Torvalds 		sk_stream_free_skb(sk, next_skb);
16841da177e4SLinus Torvalds 	}
16851da177e4SLinus Torvalds }
16861da177e4SLinus Torvalds 
16871da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in
16881da177e4SLinus Torvalds  * tcp_timer. This is used for path mtu discovery.
16891da177e4SLinus Torvalds  * The socket is already locked here.
16901da177e4SLinus Torvalds  */
16911da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk)
16921da177e4SLinus Torvalds {
16936687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
16941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16951da177e4SLinus Torvalds 	struct sk_buff *skb;
16961da177e4SLinus Torvalds 	unsigned int mss = tcp_current_mss(sk, 0);
16971da177e4SLinus Torvalds 	int lost = 0;
16981da177e4SLinus Torvalds 
1699fe067e8aSDavid S. Miller 	tcp_for_write_queue(skb, sk) {
1700fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
1701fe067e8aSDavid S. Miller 			break;
17021da177e4SLinus Torvalds 		if (skb->len > mss &&
17031da177e4SLinus Torvalds 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
17041da177e4SLinus Torvalds 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
17051da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
17061da177e4SLinus Torvalds 				tp->retrans_out -= tcp_skb_pcount(skb);
17071da177e4SLinus Torvalds 			}
17081da177e4SLinus Torvalds 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
17091da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
17101da177e4SLinus Torvalds 				tp->lost_out += tcp_skb_pcount(skb);
17111da177e4SLinus Torvalds 				lost = 1;
17121da177e4SLinus Torvalds 			}
17131da177e4SLinus Torvalds 		}
17141da177e4SLinus Torvalds 	}
17151da177e4SLinus Torvalds 
17166a438bbeSStephen Hemminger 	clear_all_retrans_hints(tp);
17176a438bbeSStephen Hemminger 
17181da177e4SLinus Torvalds 	if (!lost)
17191da177e4SLinus Torvalds 		return;
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds 	tcp_sync_left_out(tp);
17221da177e4SLinus Torvalds 
17231da177e4SLinus Torvalds 	/* Don't muck with the congestion window here.
17241da177e4SLinus Torvalds 	 * Reason is that we do not increase amount of _data_
17251da177e4SLinus Torvalds 	 * in network, but units changed and effective
17261da177e4SLinus Torvalds 	 * cwnd/ssthresh really reduced now.
17271da177e4SLinus Torvalds 	 */
17286687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
17291da177e4SLinus Torvalds 		tp->high_seq = tp->snd_nxt;
17306687e988SArnaldo Carvalho de Melo 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
17311da177e4SLinus Torvalds 		tp->prior_ssthresh = 0;
17321da177e4SLinus Torvalds 		tp->undo_marker = 0;
17336687e988SArnaldo Carvalho de Melo 		tcp_set_ca_state(sk, TCP_CA_Loss);
17341da177e4SLinus Torvalds 	}
17351da177e4SLinus Torvalds 	tcp_xmit_retransmit_queue(sk);
17361da177e4SLinus Torvalds }
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
17391da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
17401da177e4SLinus Torvalds  * error occurred which prevented the send.
17411da177e4SLinus Torvalds  */
17421da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
17431da177e4SLinus Torvalds {
17441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17455d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
17461da177e4SLinus Torvalds 	unsigned int cur_mss = tcp_current_mss(sk, 0);
17471da177e4SLinus Torvalds 	int err;
17481da177e4SLinus Torvalds 
17495d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
17505d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
17515d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
17525d424d5aSJohn Heffner 	}
17535d424d5aSJohn Heffner 
17541da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
1755caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
17561da177e4SLinus Torvalds 	 */
17571da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
17581da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
17591da177e4SLinus Torvalds 		return -EAGAIN;
17601da177e4SLinus Torvalds 
17611da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
17621da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
17631da177e4SLinus Torvalds 			BUG();
17641da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
17651da177e4SLinus Torvalds 			return -ENOMEM;
17661da177e4SLinus Torvalds 	}
17671da177e4SLinus Torvalds 
17681da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
17691da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
17701da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
17711da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
17721da177e4SLinus Torvalds 	 */
17731da177e4SLinus Torvalds 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
17741da177e4SLinus Torvalds 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
17751da177e4SLinus Torvalds 		return -EAGAIN;
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
1778846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
17791da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
17801da177e4SLinus Torvalds 	}
17811da177e4SLinus Torvalds 
17821da177e4SLinus Torvalds 	/* Collapse two adjacent packets if worthwhile and we can. */
17831da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
17841da177e4SLinus Torvalds 	    (skb->len < (cur_mss >> 1)) &&
1785fe067e8aSDavid S. Miller 	    (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1786fe067e8aSDavid S. Miller 	    (!tcp_skb_is_last(sk, skb)) &&
1787fe067e8aSDavid S. Miller 	    (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1788fe067e8aSDavid S. Miller 	    (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
17891da177e4SLinus Torvalds 	    (sysctl_tcp_retrans_collapse != 0))
17901da177e4SLinus Torvalds 		tcp_retrans_try_collapse(sk, skb, cur_mss);
17911da177e4SLinus Torvalds 
17928292a17aSArnaldo Carvalho de Melo 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
17931da177e4SLinus Torvalds 		return -EHOSTUNREACH; /* Routing failure or similar. */
17941da177e4SLinus Torvalds 
17951da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
17961da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
17971da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
17981da177e4SLinus Torvalds 	 */
17991da177e4SLinus Torvalds 	if (skb->len > 0 &&
18001da177e4SLinus Torvalds 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
18011da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
18021da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
18031da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
18047967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 1;
18057967168cSHerbert Xu 			skb_shinfo(skb)->gso_size = 0;
18067967168cSHerbert Xu 			skb_shinfo(skb)->gso_type = 0;
18071da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
18081da177e4SLinus Torvalds 			skb->csum = 0;
18091da177e4SLinus Torvalds 		}
18101da177e4SLinus Torvalds 	}
18111da177e4SLinus Torvalds 
18121da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
18131da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
18141da177e4SLinus Torvalds 	 */
18151da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
18161da177e4SLinus Torvalds 
1817dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
18181da177e4SLinus Torvalds 
18191da177e4SLinus Torvalds 	if (err == 0) {
18201da177e4SLinus Torvalds 		/* Update global TCP statistics. */
18211da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
18221da177e4SLinus Torvalds 
18231da177e4SLinus Torvalds 		tp->total_retrans++;
18241da177e4SLinus Torvalds 
18251da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
18261da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
18271da177e4SLinus Torvalds 			if (net_ratelimit())
18281da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
18291da177e4SLinus Torvalds 		}
18301da177e4SLinus Torvalds #endif
18311da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
18321da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
18331da177e4SLinus Torvalds 
18341da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
18351da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
18361da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
18371da177e4SLinus Torvalds 
18381da177e4SLinus Torvalds 		tp->undo_retrans++;
18391da177e4SLinus Torvalds 
18401da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
18411da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
18421da177e4SLinus Torvalds 		 */
18431da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
18441da177e4SLinus Torvalds 	}
18451da177e4SLinus Torvalds 	return err;
18461da177e4SLinus Torvalds }
18471da177e4SLinus Torvalds 
18481da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
18491da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
18501da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
18511da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
18521da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
18531da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
18541da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
18551da177e4SLinus Torvalds  */
18561da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
18571da177e4SLinus Torvalds {
18586687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
18591da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18601da177e4SLinus Torvalds 	struct sk_buff *skb;
18616a438bbeSStephen Hemminger 	int packet_cnt;
18626a438bbeSStephen Hemminger 
18636a438bbeSStephen Hemminger 	if (tp->retransmit_skb_hint) {
18646a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
18656a438bbeSStephen Hemminger 		packet_cnt = tp->retransmit_cnt_hint;
18666a438bbeSStephen Hemminger 	}else{
1867fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
18686a438bbeSStephen Hemminger 		packet_cnt = 0;
18696a438bbeSStephen Hemminger 	}
18701da177e4SLinus Torvalds 
18711da177e4SLinus Torvalds 	/* First pass: retransmit lost packets. */
18726a438bbeSStephen Hemminger 	if (tp->lost_out) {
1873fe067e8aSDavid S. Miller 		tcp_for_write_queue_from(skb, sk) {
18741da177e4SLinus Torvalds 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
18751da177e4SLinus Torvalds 
1876fe067e8aSDavid S. Miller 			if (skb == tcp_send_head(sk))
1877fe067e8aSDavid S. Miller 				break;
18786a438bbeSStephen Hemminger 			/* we could do better than to assign each time */
18796a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
18806a438bbeSStephen Hemminger 			tp->retransmit_cnt_hint = packet_cnt;
18816a438bbeSStephen Hemminger 
18821da177e4SLinus Torvalds 			/* Assume this retransmit will generate
18831da177e4SLinus Torvalds 			 * only one packet for congestion window
18841da177e4SLinus Torvalds 			 * calculation purposes.  This works because
18851da177e4SLinus Torvalds 			 * tcp_retransmit_skb() will chop up the
18861da177e4SLinus Torvalds 			 * packet to be MSS sized and all the
18871da177e4SLinus Torvalds 			 * packet counting works out.
18881da177e4SLinus Torvalds 			 */
18891da177e4SLinus Torvalds 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
18901da177e4SLinus Torvalds 				return;
18911da177e4SLinus Torvalds 
18921da177e4SLinus Torvalds 			if (sacked & TCPCB_LOST) {
18931da177e4SLinus Torvalds 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
18946a438bbeSStephen Hemminger 					if (tcp_retransmit_skb(sk, skb)) {
18956a438bbeSStephen Hemminger 						tp->retransmit_skb_hint = NULL;
18961da177e4SLinus Torvalds 						return;
18976a438bbeSStephen Hemminger 					}
18986687e988SArnaldo Carvalho de Melo 					if (icsk->icsk_ca_state != TCP_CA_Loss)
18991da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
19001da177e4SLinus Torvalds 					else
19011da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
19021da177e4SLinus Torvalds 
1903fe067e8aSDavid S. Miller 					if (skb == tcp_write_queue_head(sk))
1904463c84b9SArnaldo Carvalho de Melo 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
19053f421baaSArnaldo Carvalho de Melo 									  inet_csk(sk)->icsk_rto,
19063f421baaSArnaldo Carvalho de Melo 									  TCP_RTO_MAX);
19071da177e4SLinus Torvalds 				}
19081da177e4SLinus Torvalds 
19096a438bbeSStephen Hemminger 				packet_cnt += tcp_skb_pcount(skb);
19106a438bbeSStephen Hemminger 				if (packet_cnt >= tp->lost_out)
19111da177e4SLinus Torvalds 					break;
19121da177e4SLinus Torvalds 			}
19131da177e4SLinus Torvalds 		}
19141da177e4SLinus Torvalds 	}
19151da177e4SLinus Torvalds 
19161da177e4SLinus Torvalds 	/* OK, demanded retransmission is finished. */
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds 	/* Forward retransmissions are possible only during Recovery. */
19196687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
19201da177e4SLinus Torvalds 		return;
19211da177e4SLinus Torvalds 
19221da177e4SLinus Torvalds 	/* No forward retransmissions in Reno are possible. */
19231da177e4SLinus Torvalds 	if (!tp->rx_opt.sack_ok)
19241da177e4SLinus Torvalds 		return;
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds 	/* Yeah, we have to make difficult choice between forward transmission
19271da177e4SLinus Torvalds 	 * and retransmission... Both ways have their merits...
19281da177e4SLinus Torvalds 	 *
19291da177e4SLinus Torvalds 	 * For now we do not retransmit anything, while we have some new
19301da177e4SLinus Torvalds 	 * segments to send.
19311da177e4SLinus Torvalds 	 */
19321da177e4SLinus Torvalds 
19331da177e4SLinus Torvalds 	if (tcp_may_send_now(sk, tp))
19341da177e4SLinus Torvalds 		return;
19351da177e4SLinus Torvalds 
19366a438bbeSStephen Hemminger 	if (tp->forward_skb_hint) {
19376a438bbeSStephen Hemminger 		skb = tp->forward_skb_hint;
19386a438bbeSStephen Hemminger 		packet_cnt = tp->forward_cnt_hint;
19396a438bbeSStephen Hemminger 	} else{
1940fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
19411da177e4SLinus Torvalds 		packet_cnt = 0;
19426a438bbeSStephen Hemminger 	}
19431da177e4SLinus Torvalds 
1944fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
1945fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
1946fe067e8aSDavid S. Miller 			break;
19476a438bbeSStephen Hemminger 		tp->forward_cnt_hint = packet_cnt;
19486a438bbeSStephen Hemminger 		tp->forward_skb_hint = skb;
19496a438bbeSStephen Hemminger 
19501da177e4SLinus Torvalds 		/* Similar to the retransmit loop above we
19511da177e4SLinus Torvalds 		 * can pretend that the retransmitted SKB
19521da177e4SLinus Torvalds 		 * we send out here will be composed of one
19531da177e4SLinus Torvalds 		 * real MSS sized packet because tcp_retransmit_skb()
19541da177e4SLinus Torvalds 		 * will fragment it if necessary.
19551da177e4SLinus Torvalds 		 */
19561da177e4SLinus Torvalds 		if (++packet_cnt > tp->fackets_out)
19571da177e4SLinus Torvalds 			break;
19581da177e4SLinus Torvalds 
19591da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
19601da177e4SLinus Torvalds 			break;
19611da177e4SLinus Torvalds 
19621da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
19631da177e4SLinus Torvalds 			continue;
19641da177e4SLinus Torvalds 
19651da177e4SLinus Torvalds 		/* Ok, retransmit it. */
19666a438bbeSStephen Hemminger 		if (tcp_retransmit_skb(sk, skb)) {
19676a438bbeSStephen Hemminger 			tp->forward_skb_hint = NULL;
19681da177e4SLinus Torvalds 			break;
19696a438bbeSStephen Hemminger 		}
19701da177e4SLinus Torvalds 
1971fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
19723f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
19733f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
19743f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
19751da177e4SLinus Torvalds 
19761da177e4SLinus Torvalds 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
19771da177e4SLinus Torvalds 	}
19781da177e4SLinus Torvalds }
19791da177e4SLinus Torvalds 
19801da177e4SLinus Torvalds 
19811da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
19821da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
19831da177e4SLinus Torvalds  */
19841da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
19851da177e4SLinus Torvalds {
19861da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1987fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
19881da177e4SLinus Torvalds 	int mss_now;
19891da177e4SLinus Torvalds 
19901da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
19911da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
19921da177e4SLinus Torvalds 	 * and IP options.
19931da177e4SLinus Torvalds 	 */
19941da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, 1);
19951da177e4SLinus Torvalds 
1996fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
19971da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
19981da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
19991da177e4SLinus Torvalds 		tp->write_seq++;
20001da177e4SLinus Torvalds 	} else {
20011da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
20021da177e4SLinus Torvalds 		for (;;) {
2003d179cd12SDavid S. Miller 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
20041da177e4SLinus Torvalds 			if (skb)
20051da177e4SLinus Torvalds 				break;
20061da177e4SLinus Torvalds 			yield();
20071da177e4SLinus Torvalds 		}
20081da177e4SLinus Torvalds 
20091da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
20101da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
20111da177e4SLinus Torvalds 		skb->csum = 0;
20121da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
20131da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked = 0;
20147967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
20157967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
20167967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
20171da177e4SLinus Torvalds 
20181da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
20191da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->seq = tp->write_seq;
20201da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
20211da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
20221da177e4SLinus Torvalds 	}
20231da177e4SLinus Torvalds 	__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
20241da177e4SLinus Torvalds }
20251da177e4SLinus Torvalds 
20261da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
20271da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
20281da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
20291da177e4SLinus Torvalds  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
20301da177e4SLinus Torvalds  */
2031dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
20321da177e4SLinus Torvalds {
20331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20341da177e4SLinus Torvalds 	struct sk_buff *skb;
20351da177e4SLinus Torvalds 
20361da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
20371da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
20381da177e4SLinus Torvalds 	if (!skb) {
20391da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
20401da177e4SLinus Torvalds 		return;
20411da177e4SLinus Torvalds 	}
20421da177e4SLinus Torvalds 
20431da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
20441da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
20451da177e4SLinus Torvalds 	skb->csum = 0;
20461da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
20471da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
20487967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
20497967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
20507967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds 	/* Send it off. */
20531da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
20541da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
20551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2056dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
20571da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
20581da177e4SLinus Torvalds }
20591da177e4SLinus Torvalds 
20601da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent
20611da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
20621da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
20631da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
20641da177e4SLinus Torvalds  */
20651da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
20661da177e4SLinus Torvalds {
20671da177e4SLinus Torvalds 	struct sk_buff* skb;
20681da177e4SLinus Torvalds 
2069fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
20701da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
20711da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
20721da177e4SLinus Torvalds 		return -EFAULT;
20731da177e4SLinus Torvalds 	}
20741da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
20751da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
20761da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
20771da177e4SLinus Torvalds 			if (nskb == NULL)
20781da177e4SLinus Torvalds 				return -ENOMEM;
2079fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
20801da177e4SLinus Torvalds 			skb_header_release(nskb);
2081fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
20821da177e4SLinus Torvalds 			sk_stream_free_skb(sk, skb);
20831da177e4SLinus Torvalds 			sk_charge_skb(sk, nskb);
20841da177e4SLinus Torvalds 			skb = nskb;
20851da177e4SLinus Torvalds 		}
20861da177e4SLinus Torvalds 
20871da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
20881da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
20891da177e4SLinus Torvalds 	}
20901da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2091dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
20921da177e4SLinus Torvalds }
20931da177e4SLinus Torvalds 
20941da177e4SLinus Torvalds /*
20951da177e4SLinus Torvalds  * Prepare a SYN-ACK.
20961da177e4SLinus Torvalds  */
20971da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
209860236fddSArnaldo Carvalho de Melo 				 struct request_sock *req)
20991da177e4SLinus Torvalds {
21002e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
21011da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21021da177e4SLinus Torvalds 	struct tcphdr *th;
21031da177e4SLinus Torvalds 	int tcp_header_size;
21041da177e4SLinus Torvalds 	struct sk_buff *skb;
2105cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2106cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2107cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
2108cfb6eeb4SYOSHIFUJI Hideaki #endif
21091da177e4SLinus Torvalds 
21101da177e4SLinus Torvalds 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
21111da177e4SLinus Torvalds 	if (skb == NULL)
21121da177e4SLinus Torvalds 		return NULL;
21131da177e4SLinus Torvalds 
21141da177e4SLinus Torvalds 	/* Reserve space for headers. */
21151da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
21161da177e4SLinus Torvalds 
21171da177e4SLinus Torvalds 	skb->dst = dst_clone(dst);
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
21202e6599cbSArnaldo Carvalho de Melo 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
21212e6599cbSArnaldo Carvalho de Melo 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
21221da177e4SLinus Torvalds 			   /* SACK_PERM is in the place of NOP NOP of TS */
21232e6599cbSArnaldo Carvalho de Melo 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2124cfb6eeb4SYOSHIFUJI Hideaki 
2125cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2126cfb6eeb4SYOSHIFUJI Hideaki 	/* Are we doing MD5 on this segment? If so - make room for it */
2127cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2128cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
2129cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2130cfb6eeb4SYOSHIFUJI Hideaki #endif
21311da177e4SLinus Torvalds 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
21341da177e4SLinus Torvalds 	th->syn = 1;
21351da177e4SLinus Torvalds 	th->ack = 1;
21361da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
21371da177e4SLinus Torvalds 	th->source = inet_sk(sk)->sport;
21382e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
21392e6599cbSArnaldo Carvalho de Melo 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
21401da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
21411da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
21427967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
21437967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
21447967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
21451da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
21462e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
21471da177e4SLinus Torvalds 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
21481da177e4SLinus Torvalds 		__u8 rcv_wscale;
21491da177e4SLinus Torvalds 		/* Set this up on the first call only */
21501da177e4SLinus Torvalds 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
21511da177e4SLinus Torvalds 		/* tcp_full_space because it is guaranteed to be the first packet */
21521da177e4SLinus Torvalds 		tcp_select_initial_window(tcp_full_space(sk),
21532e6599cbSArnaldo Carvalho de Melo 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
21541da177e4SLinus Torvalds 			&req->rcv_wnd,
21551da177e4SLinus Torvalds 			&req->window_clamp,
21562e6599cbSArnaldo Carvalho de Melo 			ireq->wscale_ok,
21571da177e4SLinus Torvalds 			&rcv_wscale);
21582e6599cbSArnaldo Carvalho de Melo 		ireq->rcv_wscale = rcv_wscale;
21591da177e4SLinus Torvalds 	}
21601da177e4SLinus Torvalds 
21611da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2162600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
21631da177e4SLinus Torvalds 
21641da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2165df7a3b07SAl Viro 	tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
21662e6599cbSArnaldo Carvalho de Melo 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
21671da177e4SLinus Torvalds 			      TCP_SKB_CB(skb)->when,
2168cfb6eeb4SYOSHIFUJI Hideaki 			      req->ts_recent,
2169cfb6eeb4SYOSHIFUJI Hideaki 			      (
2170cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2171cfb6eeb4SYOSHIFUJI Hideaki 			       md5 ? &md5_hash_location :
2172cfb6eeb4SYOSHIFUJI Hideaki #endif
2173cfb6eeb4SYOSHIFUJI Hideaki 			       NULL)
2174cfb6eeb4SYOSHIFUJI Hideaki 			      );
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds 	skb->csum = 0;
21771da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
21781da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
2179cfb6eeb4SYOSHIFUJI Hideaki 
2180cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2181cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2182cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2183cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
2184cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
2185cfb6eeb4SYOSHIFUJI Hideaki 					       NULL, dst, req,
2186cfb6eeb4SYOSHIFUJI Hideaki 					       skb->h.th, sk->sk_protocol,
2187cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
2188cfb6eeb4SYOSHIFUJI Hideaki 	}
2189cfb6eeb4SYOSHIFUJI Hideaki #endif
2190cfb6eeb4SYOSHIFUJI Hideaki 
21911da177e4SLinus Torvalds 	return skb;
21921da177e4SLinus Torvalds }
21931da177e4SLinus Torvalds 
21941da177e4SLinus Torvalds /*
21951da177e4SLinus Torvalds  * Do all connect socket setups that can be done AF independent.
21961da177e4SLinus Torvalds  */
219740efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk)
21981da177e4SLinus Torvalds {
21991da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
22001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22011da177e4SLinus Torvalds 	__u8 rcv_wscale;
22021da177e4SLinus Torvalds 
22031da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
22041da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
22051da177e4SLinus Torvalds 	 */
22061da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
22071da177e4SLinus Torvalds 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
22081da177e4SLinus Torvalds 
2209cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2210cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2211cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2212cfb6eeb4SYOSHIFUJI Hideaki #endif
2213cfb6eeb4SYOSHIFUJI Hideaki 
22141da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
22151da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
22161da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
22171da177e4SLinus Torvalds 	tp->max_window = 0;
22185d424d5aSJohn Heffner 	tcp_mtup_init(sk);
22191da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
22201da177e4SLinus Torvalds 
22211da177e4SLinus Torvalds 	if (!tp->window_clamp)
22221da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
22231da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
22241da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
22251da177e4SLinus Torvalds 
22261da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
22271da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
22281da177e4SLinus Torvalds 				  &tp->rcv_wnd,
22291da177e4SLinus Torvalds 				  &tp->window_clamp,
22301da177e4SLinus Torvalds 				  sysctl_tcp_window_scaling,
22311da177e4SLinus Torvalds 				  &rcv_wscale);
22321da177e4SLinus Torvalds 
22331da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
22341da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
22351da177e4SLinus Torvalds 
22361da177e4SLinus Torvalds 	sk->sk_err = 0;
22371da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
22381da177e4SLinus Torvalds 	tp->snd_wnd = 0;
22391da177e4SLinus Torvalds 	tcp_init_wl(tp, tp->write_seq, 0);
22401da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
22411da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
22421da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
22431da177e4SLinus Torvalds 	tp->rcv_wup = 0;
22441da177e4SLinus Torvalds 	tp->copied_seq = 0;
22451da177e4SLinus Torvalds 
2246463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2247463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
22481da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
22491da177e4SLinus Torvalds }
22501da177e4SLinus Torvalds 
22511da177e4SLinus Torvalds /*
22521da177e4SLinus Torvalds  * Build a SYN and send it off.
22531da177e4SLinus Torvalds  */
22541da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
22551da177e4SLinus Torvalds {
22561da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22571da177e4SLinus Torvalds 	struct sk_buff *buff;
22581da177e4SLinus Torvalds 
22591da177e4SLinus Torvalds 	tcp_connect_init(sk);
22601da177e4SLinus Torvalds 
2261d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
22621da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
22631da177e4SLinus Torvalds 		return -ENOBUFS;
22641da177e4SLinus Torvalds 
22651da177e4SLinus Torvalds 	/* Reserve space for headers. */
22661da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
22671da177e4SLinus Torvalds 
22681da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
22691da177e4SLinus Torvalds 	TCP_ECN_send_syn(sk, tp, buff);
22701da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->sacked = 0;
22717967168cSHerbert Xu 	skb_shinfo(buff)->gso_segs = 1;
22727967168cSHerbert Xu 	skb_shinfo(buff)->gso_size = 0;
22737967168cSHerbert Xu 	skb_shinfo(buff)->gso_type = 0;
22741da177e4SLinus Torvalds 	buff->csum = 0;
2275bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
22761da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
22771da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
22781da177e4SLinus Torvalds 
22791da177e4SLinus Torvalds 	/* Send it off. */
22801da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
22811da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
22821da177e4SLinus Torvalds 	skb_header_release(buff);
2283fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, buff);
22841da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
22851da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
2286dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2287bd37a088SWei Yongjun 
2288bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2289bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
2290bd37a088SWei Yongjun 	 */
2291bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2292bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
22931da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
22941da177e4SLinus Torvalds 
22951da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
22963f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
22973f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
22981da177e4SLinus Torvalds 	return 0;
22991da177e4SLinus Torvalds }
23001da177e4SLinus Torvalds 
23011da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
23021da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
23031da177e4SLinus Torvalds  * for details.
23041da177e4SLinus Torvalds  */
23051da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
23061da177e4SLinus Torvalds {
2307463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2308463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
23091da177e4SLinus Torvalds 	unsigned long timeout;
23101da177e4SLinus Torvalds 
23111da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2312463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
23131da177e4SLinus Torvalds 		int max_ato = HZ/2;
23141da177e4SLinus Torvalds 
2315463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
23161da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
23171da177e4SLinus Torvalds 
23181da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
23191da177e4SLinus Torvalds 
23201da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2321463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
23221da177e4SLinus Torvalds 		 * directly.
23231da177e4SLinus Torvalds 		 */
23241da177e4SLinus Torvalds 		if (tp->srtt) {
23251da177e4SLinus Torvalds 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
23261da177e4SLinus Torvalds 
23271da177e4SLinus Torvalds 			if (rtt < max_ato)
23281da177e4SLinus Torvalds 				max_ato = rtt;
23291da177e4SLinus Torvalds 		}
23301da177e4SLinus Torvalds 
23311da177e4SLinus Torvalds 		ato = min(ato, max_ato);
23321da177e4SLinus Torvalds 	}
23331da177e4SLinus Torvalds 
23341da177e4SLinus Torvalds 	/* Stay within the limit we were given */
23351da177e4SLinus Torvalds 	timeout = jiffies + ato;
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
2338463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
23391da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
23401da177e4SLinus Torvalds 		 * send ACK now.
23411da177e4SLinus Torvalds 		 */
2342463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
2343463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
23441da177e4SLinus Torvalds 			tcp_send_ack(sk);
23451da177e4SLinus Torvalds 			return;
23461da177e4SLinus Torvalds 		}
23471da177e4SLinus Torvalds 
2348463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2349463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
23501da177e4SLinus Torvalds 	}
2351463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2352463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
2353463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
23541da177e4SLinus Torvalds }
23551da177e4SLinus Torvalds 
23561da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
23571da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
23581da177e4SLinus Torvalds {
23591da177e4SLinus Torvalds 	/* If we have been reset, we may not send again. */
23601da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
23611da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
23621da177e4SLinus Torvalds 		struct sk_buff *buff;
23631da177e4SLinus Torvalds 
23641da177e4SLinus Torvalds 		/* We are not putting this on the write queue, so
23651da177e4SLinus Torvalds 		 * tcp_transmit_skb() will set the ownership to this
23661da177e4SLinus Torvalds 		 * sock.
23671da177e4SLinus Torvalds 		 */
23681da177e4SLinus Torvalds 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
23691da177e4SLinus Torvalds 		if (buff == NULL) {
2370463c84b9SArnaldo Carvalho de Melo 			inet_csk_schedule_ack(sk);
2371463c84b9SArnaldo Carvalho de Melo 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
23723f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
23733f421baaSArnaldo Carvalho de Melo 						  TCP_DELACK_MAX, TCP_RTO_MAX);
23741da177e4SLinus Torvalds 			return;
23751da177e4SLinus Torvalds 		}
23761da177e4SLinus Torvalds 
23771da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
23781da177e4SLinus Torvalds 		skb_reserve(buff, MAX_TCP_HEADER);
23791da177e4SLinus Torvalds 		buff->csum = 0;
23801da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
23811da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->sacked = 0;
23827967168cSHerbert Xu 		skb_shinfo(buff)->gso_segs = 1;
23837967168cSHerbert Xu 		skb_shinfo(buff)->gso_size = 0;
23847967168cSHerbert Xu 		skb_shinfo(buff)->gso_type = 0;
23851da177e4SLinus Torvalds 
23861da177e4SLinus Torvalds 		/* Send it off, this clears delayed acks for us. */
23871da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
23881da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2389dfb4b9dcSDavid S. Miller 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
23901da177e4SLinus Torvalds 	}
23911da177e4SLinus Torvalds }
23921da177e4SLinus Torvalds 
23931da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
23941da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
23951da177e4SLinus Torvalds  *
23961da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
23971da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
23981da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
23991da177e4SLinus Torvalds  *
24001da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
24011da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
24021da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
24031da177e4SLinus Torvalds  */
24041da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
24051da177e4SLinus Torvalds {
24061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24071da177e4SLinus Torvalds 	struct sk_buff *skb;
24081da177e4SLinus Torvalds 
24091da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
24101da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
24111da177e4SLinus Torvalds 	if (skb == NULL)
24121da177e4SLinus Torvalds 		return -1;
24131da177e4SLinus Torvalds 
24141da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
24151da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
24161da177e4SLinus Torvalds 	skb->csum = 0;
24171da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
24181da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = urgent;
24197967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
24207967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
24217967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
24221da177e4SLinus Torvalds 
24231da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
24241da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
24251da177e4SLinus Torvalds 	 * send it.
24261da177e4SLinus Torvalds 	 */
24271da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
24281da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
24291da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2430dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
24311da177e4SLinus Torvalds }
24321da177e4SLinus Torvalds 
24331da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
24341da177e4SLinus Torvalds {
24351da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
24361da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
24371da177e4SLinus Torvalds 		struct sk_buff *skb;
24381da177e4SLinus Torvalds 
2439fe067e8aSDavid S. Miller 		if ((skb = tcp_send_head(sk)) != NULL &&
24401da177e4SLinus Torvalds 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
24411da177e4SLinus Torvalds 			int err;
24421da177e4SLinus Torvalds 			unsigned int mss = tcp_current_mss(sk, 0);
24431da177e4SLinus Torvalds 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
24441da177e4SLinus Torvalds 
24451da177e4SLinus Torvalds 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
24461da177e4SLinus Torvalds 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
24471da177e4SLinus Torvalds 
24481da177e4SLinus Torvalds 			/* We are probing the opening of a window
24491da177e4SLinus Torvalds 			 * but the window size is != 0
24501da177e4SLinus Torvalds 			 * must have been a result SWS avoidance ( sender )
24511da177e4SLinus Torvalds 			 */
24521da177e4SLinus Torvalds 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
24531da177e4SLinus Torvalds 			    skb->len > mss) {
24541da177e4SLinus Torvalds 				seg_size = min(seg_size, mss);
24551da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2456846998aeSDavid S. Miller 				if (tcp_fragment(sk, skb, seg_size, mss))
24571da177e4SLinus Torvalds 					return -1;
24581da177e4SLinus Torvalds 			} else if (!tcp_skb_pcount(skb))
2459846998aeSDavid S. Miller 				tcp_set_skb_tso_segs(sk, skb, mss);
24601da177e4SLinus Torvalds 
24611da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
24621da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2463dfb4b9dcSDavid S. Miller 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
24641da177e4SLinus Torvalds 			if (!err) {
24651da177e4SLinus Torvalds 				update_send_head(sk, tp, skb);
24661da177e4SLinus Torvalds 			}
24671da177e4SLinus Torvalds 			return err;
24681da177e4SLinus Torvalds 		} else {
24691da177e4SLinus Torvalds 			if (tp->urg_mode &&
24701da177e4SLinus Torvalds 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
24711da177e4SLinus Torvalds 				tcp_xmit_probe_skb(sk, TCPCB_URG);
24721da177e4SLinus Torvalds 			return tcp_xmit_probe_skb(sk, 0);
24731da177e4SLinus Torvalds 		}
24741da177e4SLinus Torvalds 	}
24751da177e4SLinus Torvalds 	return -1;
24761da177e4SLinus Torvalds }
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
24791da177e4SLinus Torvalds  * a partial packet else a zero probe.
24801da177e4SLinus Torvalds  */
24811da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
24821da177e4SLinus Torvalds {
2483463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
24841da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24851da177e4SLinus Torvalds 	int err;
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
24881da177e4SLinus Torvalds 
2489fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
24901da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
24916687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2492463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
24931da177e4SLinus Torvalds 		return;
24941da177e4SLinus Torvalds 	}
24951da177e4SLinus Torvalds 
24961da177e4SLinus Torvalds 	if (err <= 0) {
2497463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2498463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
24996687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2500463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
25013f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
25023f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25031da177e4SLinus Torvalds 	} else {
25041da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
25056687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
25061da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
25071da177e4SLinus Torvalds 		 *
25081da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
25091da177e4SLinus Torvalds 		 */
25106687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
25116687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2512463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2513463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
25143f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
25153f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25161da177e4SLinus Torvalds 	}
25171da177e4SLinus Torvalds }
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
25201da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
25211da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
25221da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
2523f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
25245d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init);
2525