xref: /linux/net/ipv4/tcp_output.c (revision c84a57113f59486e6688be1cd443b96e3118efa0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */
5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
645d424d5aSJohn Heffner 
6535089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6735089bb2SDavid S. Miller 
68c9bee3b7SEric Dumazet unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
69c9bee3b7SEric Dumazet EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
70c9bee3b7SEric Dumazet 
7146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
7246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
73519855c5SWilliam Allen Simpson 
7467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
75cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
766ff03ac3SIlpo Järvinen {
776ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
786ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
809e412ba7SIlpo Järvinen 
81fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
821da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
838512430eSIlpo Järvinen 
8466f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
856ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
866a5dc9e5SEric Dumazet 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
87750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
881da177e4SLinus Torvalds 	}
896a5dc9e5SEric Dumazet }
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
921da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
931da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
941da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
951da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
961da177e4SLinus Torvalds  */
97cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
981da177e4SLinus Torvalds {
99cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1009e412ba7SIlpo Järvinen 
10190840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1021da177e4SLinus Torvalds 		return tp->snd_nxt;
1031da177e4SLinus Torvalds 	else
10490840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1081da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1111da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1121da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1131da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1141da177e4SLinus Torvalds  *    large MSS.
1151da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1161da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1171da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1181da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1191da177e4SLinus Torvalds  *    probably even Jumbo".
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1221da177e4SLinus Torvalds {
1231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
124cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1251da177e4SLinus Torvalds 	int mss = tp->advmss;
1261da177e4SLinus Torvalds 
1270dbaee3bSDavid S. Miller 	if (dst) {
1280dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1290dbaee3bSDavid S. Miller 
1300dbaee3bSDavid S. Miller 		if (metric < mss) {
1310dbaee3bSDavid S. Miller 			mss = metric;
1321da177e4SLinus Torvalds 			tp->advmss = mss;
1331da177e4SLinus Torvalds 		}
1340dbaee3bSDavid S. Miller 	}
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds 	return (__u16)mss;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1401da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
141cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1421da177e4SLinus Torvalds {
143463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1441da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1451da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1461da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1511da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1521da177e4SLinus Torvalds 
153463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1541da177e4SLinus Torvalds 		cwnd >>= 1;
1551da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1561da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1571da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
162cf533ea5SEric Dumazet 				struct sock *sk)
1631da177e4SLinus Torvalds {
164463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
165463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
166bcefe17cSCong Wang 	const struct dst_entry *dst = __sk_dst_get(sk);
1671da177e4SLinus Torvalds 
16835089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
16935089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
170463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1711da177e4SLinus Torvalds 
1721da177e4SLinus Torvalds 	tp->lsndtime = now;
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1751da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1761da177e4SLinus Torvalds 	 */
177bcefe17cSCong Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
178bcefe17cSCong Wang 	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
179463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
18267edfef7SAndi Kleen /* Account for an ACK we sent. */
18340efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1841da177e4SLinus Torvalds {
185463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
186463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds 
18985f16525SYuchung Cheng 
19085f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
19185f16525SYuchung Cheng {
19285f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1939ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19485f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19585f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19685f16525SYuchung Cheng 	 */
19785f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
19885f16525SYuchung Cheng 
19985f16525SYuchung Cheng 	if (mss > 1460)
20085f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
20185f16525SYuchung Cheng 	return init_rwnd;
20285f16525SYuchung Cheng }
20385f16525SYuchung Cheng 
2041da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2051da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2061da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2071da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2081da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2091da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2101da177e4SLinus Torvalds  */
2111da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2121da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21331d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21431d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2151da177e4SLinus Torvalds {
2161da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2171da177e4SLinus Torvalds 
2181da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2191da177e4SLinus Torvalds 	if (*window_clamp == 0)
2201da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2211da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2241da177e4SLinus Torvalds 	if (space > mss)
2251da177e4SLinus Torvalds 		space = (space / mss) * mss;
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22815d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22915d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
23015d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
23115d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
23215d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23315d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2341da177e4SLinus Torvalds 	 */
23515d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2361da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23715d99e02SRick Jones 	else
23815d99e02SRick Jones 		(*rcv_wnd) = space;
23915d99e02SRick Jones 
2401da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2411da177e4SLinus Torvalds 	if (wscale_ok) {
2421da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2431da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2441da177e4SLinus Torvalds 		 */
2451da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
246316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2471da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2481da177e4SLinus Torvalds 			space >>= 1;
2491da177e4SLinus Torvalds 			(*rcv_wscale)++;
2501da177e4SLinus Torvalds 		}
2511da177e4SLinus Torvalds 	}
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25485f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25585f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
256b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2571da177e4SLinus Torvalds 	}
2581da177e4SLinus Torvalds 
2591da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2601da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2611da177e4SLinus Torvalds }
2624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2651da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2661da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2671da177e4SLinus Torvalds  * frame.
2681da177e4SLinus Torvalds  */
26940efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2701da177e4SLinus Torvalds {
2711da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2721da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2731da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2741da177e4SLinus Torvalds 
2751da177e4SLinus Torvalds 	/* Never shrink the offered window */
2761da177e4SLinus Torvalds 	if (new_win < cur_win) {
2771da177e4SLinus Torvalds 		/* Danger Will Robinson!
2781da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2791da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2801da177e4SLinus Torvalds 		 * window in time.  --DaveM
2811da177e4SLinus Torvalds 		 *
2821da177e4SLinus Torvalds 		 * Relax Will Robinson.
2831da177e4SLinus Torvalds 		 */
284607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2851da177e4SLinus Torvalds 	}
2861da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2871da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2881da177e4SLinus Torvalds 
2891da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2901da177e4SLinus Torvalds 	 * scaled window.
2911da177e4SLinus Torvalds 	 */
29215d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2931da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2941da177e4SLinus Torvalds 	else
2951da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2981da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
3011da177e4SLinus Torvalds 	if (new_win == 0)
3021da177e4SLinus Torvalds 		tp->pred_flags = 0;
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	return new_win;
3051da177e4SLinus Torvalds }
3061da177e4SLinus Torvalds 
30767edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
308cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
309bdf1ee5dSIlpo Järvinen {
3104de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
311bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3124de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
313bdf1ee5dSIlpo Järvinen }
314bdf1ee5dSIlpo Järvinen 
31567edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
316bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
317bdf1ee5dSIlpo Järvinen {
318bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
319bdf1ee5dSIlpo Järvinen 
320bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
3215d134f1cSHannes Frederic Sowa 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
3224de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
323bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
324bdf1ee5dSIlpo Järvinen 	}
325bdf1ee5dSIlpo Järvinen }
326bdf1ee5dSIlpo Järvinen 
327bdf1ee5dSIlpo Järvinen static __inline__ void
328cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
329bdf1ee5dSIlpo Järvinen {
330bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
331bdf1ee5dSIlpo Järvinen 		th->ece = 1;
332bdf1ee5dSIlpo Järvinen }
333bdf1ee5dSIlpo Järvinen 
33467edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
33567edfef7SAndi Kleen  * be sent.
33667edfef7SAndi Kleen  */
337bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
338bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
339bdf1ee5dSIlpo Järvinen {
340bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
341bdf1ee5dSIlpo Järvinen 
342bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
343bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
344bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
345bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
346bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
347bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
348bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
349bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
350bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
351bdf1ee5dSIlpo Järvinen 			}
352bdf1ee5dSIlpo Järvinen 		} else {
353bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
354bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
355bdf1ee5dSIlpo Järvinen 		}
356bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
357bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
358bdf1ee5dSIlpo Järvinen 	}
359bdf1ee5dSIlpo Järvinen }
360bdf1ee5dSIlpo Järvinen 
361e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
362e870a8efSIlpo Järvinen  * auto increment end seqno.
363e870a8efSIlpo Järvinen  */
364e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
365e870a8efSIlpo Järvinen {
3667b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
3677b7fc97aSEric Dumazet 
3682e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
369e870a8efSIlpo Järvinen 	skb->csum = 0;
370e870a8efSIlpo Järvinen 
3714de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
372e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
373e870a8efSIlpo Järvinen 
3747b7fc97aSEric Dumazet 	shinfo->gso_segs = 1;
3757b7fc97aSEric Dumazet 	shinfo->gso_size = 0;
3767b7fc97aSEric Dumazet 	shinfo->gso_type = 0;
377e870a8efSIlpo Järvinen 
378e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
379a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
380e870a8efSIlpo Järvinen 		seq++;
381e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
382e870a8efSIlpo Järvinen }
383e870a8efSIlpo Järvinen 
384a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
38533f5f57eSIlpo Järvinen {
38633f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
38733f5f57eSIlpo Järvinen }
38833f5f57eSIlpo Järvinen 
38933ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
39033ad798cSAdam Langley #define OPTION_TS		(1 << 1)
39133ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
39289e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
3932100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
39433ad798cSAdam Langley 
39533ad798cSAdam Langley struct tcp_out_options {
3962100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
3972100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
39833ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
39933ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
400bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
401bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4022100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4032100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
40433ad798cSAdam Langley };
40533ad798cSAdam Langley 
40667edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
40767edfef7SAndi Kleen  *
40867edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
409fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
410fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4118e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
412fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
413fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
414fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
415fd6149d3SIlpo Järvinen  *
416fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
417fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
418fd6149d3SIlpo Järvinen  */
41933ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
420bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
421bd0388aeSWilliam Allen Simpson {
4222100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
423bd0388aeSWilliam Allen Simpson 
424bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4251a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4261a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
427bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
428bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
42933ad798cSAdam Langley 		ptr += 4;
43033ad798cSAdam Langley 	}
43133ad798cSAdam Langley 
432fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
433fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
434fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
435fd6149d3SIlpo Järvinen 			       opts->mss);
436fd6149d3SIlpo Järvinen 	}
437fd6149d3SIlpo Järvinen 
438bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
439bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
44033ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
44133ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
44233ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
44333ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
444bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
44533ad798cSAdam Langley 		} else {
446496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
44740efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
44840efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
44940efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
45040efc6faSStephen Hemminger 		}
45133ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
45233ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
45333ad798cSAdam Langley 	}
45433ad798cSAdam Langley 
455bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
45633ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
45733ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
45833ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
45933ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
46033ad798cSAdam Langley 	}
46133ad798cSAdam Langley 
462bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
46333ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
46433ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
46533ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
46633ad798cSAdam Langley 			       opts->ws);
46733ad798cSAdam Langley 	}
46833ad798cSAdam Langley 
46933ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
47033ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
47133ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
47240efc6faSStephen Hemminger 		int this_sack;
47340efc6faSStephen Hemminger 
47440efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
47540efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
47640efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
47733ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
47840efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
4792de979bdSStephen Hemminger 
48033ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
48133ad798cSAdam Langley 		     ++this_sack) {
48240efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
48340efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
48440efc6faSStephen Hemminger 		}
4852de979bdSStephen Hemminger 
48640efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
48740efc6faSStephen Hemminger 	}
4882100c8d2SYuchung Cheng 
4892100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
4902100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
4912100c8d2SYuchung Cheng 
4922100c8d2SYuchung Cheng 		*ptr++ = htonl((TCPOPT_EXP << 24) |
4932100c8d2SYuchung Cheng 			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
4942100c8d2SYuchung Cheng 			       TCPOPT_FASTOPEN_MAGIC);
4952100c8d2SYuchung Cheng 
4962100c8d2SYuchung Cheng 		memcpy(ptr, foc->val, foc->len);
4972100c8d2SYuchung Cheng 		if ((foc->len & 3) == 2) {
4982100c8d2SYuchung Cheng 			u8 *align = ((u8 *)ptr) + foc->len;
4992100c8d2SYuchung Cheng 			align[0] = align[1] = TCPOPT_NOP;
5002100c8d2SYuchung Cheng 		}
5012100c8d2SYuchung Cheng 		ptr += (foc->len + 3) >> 2;
5022100c8d2SYuchung Cheng 	}
50340efc6faSStephen Hemminger }
50440efc6faSStephen Hemminger 
50567edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
50667edfef7SAndi Kleen  * network wire format yet.
50767edfef7SAndi Kleen  */
50895c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
50933ad798cSAdam Langley 				struct tcp_out_options *opts,
510cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
511cf533ea5SEric Dumazet {
51233ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
51395c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
514783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
51533ad798cSAdam Langley 
516cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
51733ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
51833ad798cSAdam Langley 	if (*md5) {
51933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
520bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
521cfb6eeb4SYOSHIFUJI Hideaki 	}
52233ad798cSAdam Langley #else
52333ad798cSAdam Langley 	*md5 = NULL;
524cfb6eeb4SYOSHIFUJI Hideaki #endif
52533ad798cSAdam Langley 
52633ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
52733ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
52833ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
52933ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
53033ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
53133ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
53233ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
53333ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
53433ad798cSAdam Langley 	 * going out.  */
53533ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
536bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
53733ad798cSAdam Langley 
538bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
53933ad798cSAdam Langley 		opts->options |= OPTION_TS;
540ee684b6fSAndrey Vagin 		opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
54133ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
542bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
54333ad798cSAdam Langley 	}
544bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
54533ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
54689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
547bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
54833ad798cSAdam Langley 	}
549bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
55033ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
551b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
552bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
55333ad798cSAdam Langley 	}
55433ad798cSAdam Langley 
555783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
556783237e8SYuchung Cheng 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
557783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
558783237e8SYuchung Cheng 		if (remaining >= need) {
559783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
560783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
561783237e8SYuchung Cheng 			remaining -= need;
562783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
563783237e8SYuchung Cheng 		}
564783237e8SYuchung Cheng 	}
565bd0388aeSWilliam Allen Simpson 
566bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
56733ad798cSAdam Langley }
56833ad798cSAdam Langley 
56967edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
57095c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk,
57133ad798cSAdam Langley 				   struct request_sock *req,
57295c96174SEric Dumazet 				   unsigned int mss, struct sk_buff *skb,
57333ad798cSAdam Langley 				   struct tcp_out_options *opts,
5744957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
5758336886fSJerry Chu 				   struct tcp_fastopen_cookie *foc)
5764957faadSWilliam Allen Simpson {
57733ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
57895c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
57933ad798cSAdam Langley 
58033ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
58133ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
58233ad798cSAdam Langley 	if (*md5) {
58333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
5844957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
5854957faadSWilliam Allen Simpson 
5864957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
5874957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
5884957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
5894957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
5904957faadSWilliam Allen Simpson 		 */
591de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
59233ad798cSAdam Langley 	}
59333ad798cSAdam Langley #else
59433ad798cSAdam Langley 	*md5 = NULL;
59533ad798cSAdam Langley #endif
59633ad798cSAdam Langley 
5974957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
59833ad798cSAdam Langley 	opts->mss = mss;
5994957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
60033ad798cSAdam Langley 
60133ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
60233ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
60389e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6044957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
60533ad798cSAdam Langley 	}
606de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
60733ad798cSAdam Langley 		opts->options |= OPTION_TS;
60833ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
60933ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6104957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
61133ad798cSAdam Langley 	}
61233ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
61333ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
614de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6154957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
61633ad798cSAdam Langley 	}
6178336886fSJerry Chu 	if (foc != NULL) {
6188336886fSJerry Chu 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
6198336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6208336886fSJerry Chu 		if (remaining >= need) {
6218336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6228336886fSJerry Chu 			opts->fastopen_cookie = foc;
6238336886fSJerry Chu 			remaining -= need;
6248336886fSJerry Chu 		}
6258336886fSJerry Chu 	}
6264957faadSWilliam Allen Simpson 
6274957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
62833ad798cSAdam Langley }
62933ad798cSAdam Langley 
63067edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
63167edfef7SAndi Kleen  * final wire format yet.
63267edfef7SAndi Kleen  */
63395c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
63433ad798cSAdam Langley 					struct tcp_out_options *opts,
635cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
636cf533ea5SEric Dumazet {
63733ad798cSAdam Langley 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
63833ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
63995c96174SEric Dumazet 	unsigned int size = 0;
640cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
64133ad798cSAdam Langley 
6425843ef42SAndi Kleen 	opts->options = 0;
6435843ef42SAndi Kleen 
64433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
64533ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
64633ad798cSAdam Langley 	if (unlikely(*md5)) {
64733ad798cSAdam Langley 		opts->options |= OPTION_MD5;
64833ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
64933ad798cSAdam Langley 	}
65033ad798cSAdam Langley #else
65133ad798cSAdam Langley 	*md5 = NULL;
65233ad798cSAdam Langley #endif
65333ad798cSAdam Langley 
65433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
65533ad798cSAdam Langley 		opts->options |= OPTION_TS;
656ee684b6fSAndrey Vagin 		opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
65733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
65833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
65933ad798cSAdam Langley 	}
66033ad798cSAdam Langley 
661cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
662cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
66395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
66433ad798cSAdam Langley 		opts->num_sack_blocks =
66595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
66633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
66733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
66833ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
66933ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
67033ad798cSAdam Langley 	}
67133ad798cSAdam Langley 
67233ad798cSAdam Langley 	return size;
67340efc6faSStephen Hemminger }
6741da177e4SLinus Torvalds 
67546d3ceabSEric Dumazet 
67646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
67746d3ceabSEric Dumazet  *
67846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
67946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
68046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
68146d3ceabSEric Dumazet  *
68246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
68346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
6848e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
68546d3ceabSEric Dumazet  *
68646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
68746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
68846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
68946d3ceabSEric Dumazet  */
69046d3ceabSEric Dumazet struct tsq_tasklet {
69146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
69246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
69346d3ceabSEric Dumazet };
69446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
69546d3ceabSEric Dumazet 
6966f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
6976f458dfbSEric Dumazet {
6986f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
6996f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
7006f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
701bf06200eSJohn Ogness 		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
702bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7036f458dfbSEric Dumazet }
70446d3ceabSEric Dumazet /*
7058e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
70646d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7078e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
70846d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
70946d3ceabSEric Dumazet  */
71046d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
71146d3ceabSEric Dumazet {
71246d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
71346d3ceabSEric Dumazet 	LIST_HEAD(list);
71446d3ceabSEric Dumazet 	unsigned long flags;
71546d3ceabSEric Dumazet 	struct list_head *q, *n;
71646d3ceabSEric Dumazet 	struct tcp_sock *tp;
71746d3ceabSEric Dumazet 	struct sock *sk;
71846d3ceabSEric Dumazet 
71946d3ceabSEric Dumazet 	local_irq_save(flags);
72046d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
72146d3ceabSEric Dumazet 	local_irq_restore(flags);
72246d3ceabSEric Dumazet 
72346d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
72446d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
72546d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
72646d3ceabSEric Dumazet 
72746d3ceabSEric Dumazet 		sk = (struct sock *)tp;
72846d3ceabSEric Dumazet 		bh_lock_sock(sk);
72946d3ceabSEric Dumazet 
73046d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
7316f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
73246d3ceabSEric Dumazet 		} else {
73346d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
7346f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
73546d3ceabSEric Dumazet 		}
73646d3ceabSEric Dumazet 		bh_unlock_sock(sk);
73746d3ceabSEric Dumazet 
73846d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
73946d3ceabSEric Dumazet 		sk_free(sk);
74046d3ceabSEric Dumazet 	}
74146d3ceabSEric Dumazet }
74246d3ceabSEric Dumazet 
7436f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
7446f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
745563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
746563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
74746d3ceabSEric Dumazet /**
74846d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
74946d3ceabSEric Dumazet  * @sk: socket
75046d3ceabSEric Dumazet  *
75146d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
75246d3ceabSEric Dumazet  * actions before socket release.
75346d3ceabSEric Dumazet  */
75446d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
75546d3ceabSEric Dumazet {
75646d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
7576f458dfbSEric Dumazet 	unsigned long flags, nflags;
75846d3ceabSEric Dumazet 
7596f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
7606f458dfbSEric Dumazet 	do {
7616f458dfbSEric Dumazet 		flags = tp->tsq_flags;
7626f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
7636f458dfbSEric Dumazet 			return;
7646f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
7656f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
7666f458dfbSEric Dumazet 
7676f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
7686f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
7696f458dfbSEric Dumazet 
770144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
7716f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
772144d56e9SEric Dumazet 		__sock_put(sk);
773144d56e9SEric Dumazet 	}
774144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
7756f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
776144d56e9SEric Dumazet 		__sock_put(sk);
777144d56e9SEric Dumazet 	}
778144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
779563d34d0SEric Dumazet 		sk->sk_prot->mtu_reduced(sk);
780144d56e9SEric Dumazet 		__sock_put(sk);
781144d56e9SEric Dumazet 	}
78246d3ceabSEric Dumazet }
78346d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
78446d3ceabSEric Dumazet 
78546d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
78646d3ceabSEric Dumazet {
78746d3ceabSEric Dumazet 	int i;
78846d3ceabSEric Dumazet 
78946d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
79046d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
79146d3ceabSEric Dumazet 
79246d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
79346d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
79446d3ceabSEric Dumazet 			     tcp_tasklet_func,
79546d3ceabSEric Dumazet 			     (unsigned long)tsq);
79646d3ceabSEric Dumazet 	}
79746d3ceabSEric Dumazet }
79846d3ceabSEric Dumazet 
79946d3ceabSEric Dumazet /*
80046d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8018e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
80246d3ceabSEric Dumazet  * hold qdisc lock.
80346d3ceabSEric Dumazet  */
804d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
80546d3ceabSEric Dumazet {
80646d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
80746d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
80846d3ceabSEric Dumazet 
80946d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
81046d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
81146d3ceabSEric Dumazet 		unsigned long flags;
81246d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
81346d3ceabSEric Dumazet 
81446d3ceabSEric Dumazet 		/* Keep a ref on socket.
81546d3ceabSEric Dumazet 		 * This last ref will be released in tcp_tasklet_func()
81646d3ceabSEric Dumazet 		 */
81746d3ceabSEric Dumazet 		atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
81846d3ceabSEric Dumazet 
81946d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
82046d3ceabSEric Dumazet 		local_irq_save(flags);
82146d3ceabSEric Dumazet 		tsq = &__get_cpu_var(tsq_tasklet);
82246d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
82346d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
82446d3ceabSEric Dumazet 		local_irq_restore(flags);
82546d3ceabSEric Dumazet 	} else {
82646d3ceabSEric Dumazet 		sock_wfree(skb);
82746d3ceabSEric Dumazet 	}
82846d3ceabSEric Dumazet }
82946d3ceabSEric Dumazet 
8301da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
8311da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
8321da177e4SLinus Torvalds  * transmission and possible later retransmissions.
8331da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
8341da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
8351da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
8361da177e4SLinus Torvalds  * device.
8371da177e4SLinus Torvalds  *
8381da177e4SLinus Torvalds  * We are working here with either a clone of the original
8391da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
8401da177e4SLinus Torvalds  */
841056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
842056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
8431da177e4SLinus Torvalds {
8446687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
845dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
846dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
847dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
84833ad798cSAdam Langley 	struct tcp_out_options opts;
84995c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
850cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
8511da177e4SLinus Torvalds 	struct tcphdr *th;
8521da177e4SLinus Torvalds 	int err;
8531da177e4SLinus Torvalds 
854dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
855dfb4b9dcSDavid S. Miller 
856ccdbb6e9SEric Dumazet 	if (clone_it) {
857ccdbb6e9SEric Dumazet 		const struct sk_buff *fclone = skb + 1;
858ccdbb6e9SEric Dumazet 
859dfb4b9dcSDavid S. Miller 		/* If congestion control is doing timestamping, we must
860dfb4b9dcSDavid S. Miller 		 * take such a timestamp before we potentially clone/copy.
861dfb4b9dcSDavid S. Miller 		 */
862164891aaSStephen Hemminger 		if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
863dfb4b9dcSDavid S. Miller 			__net_timestamp(skb);
864dfb4b9dcSDavid S. Miller 
8650e280af0SEric Dumazet 		if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
8660e280af0SEric Dumazet 			     fclone->fclone == SKB_FCLONE_CLONE))
8679a9bfd03SEric Dumazet 			NET_INC_STATS(sock_net(sk),
8680e280af0SEric Dumazet 				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
8690e280af0SEric Dumazet 
870dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
871dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
872dfb4b9dcSDavid S. Miller 		else
873dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
874dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
875dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
876dfb4b9dcSDavid S. Miller 	}
877dfb4b9dcSDavid S. Miller 
878dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
879dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
880dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
88133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
8821da177e4SLinus Torvalds 
8834de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
88433ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
88533ad798cSAdam Langley 	else
88633ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
88733ad798cSAdam Langley 							   &md5);
88833ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
8891da177e4SLinus Torvalds 
890547669d4SEric Dumazet 	if (tcp_packets_in_flight(tp) == 0)
8916687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
892547669d4SEric Dumazet 
893547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
894547669d4SEric Dumazet 	 * another queue.
895547669d4SEric Dumazet 	 */
896547669d4SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
8971da177e4SLinus Torvalds 
898aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
899aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
90046d3ceabSEric Dumazet 
90146d3ceabSEric Dumazet 	skb_orphan(skb);
90246d3ceabSEric Dumazet 	skb->sk = sk;
903c9eeec26SEric Dumazet 	skb->destructor = tcp_wfree;
90446d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
907aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
908c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
909c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
9101da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
9111da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
912df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
9134de075e0SEric Dumazet 					tcb->tcp_flags);
914dfb4b9dcSDavid S. Miller 
9154de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
9161da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
9171da177e4SLinus Torvalds 		 * is never scaled.
9181da177e4SLinus Torvalds 		 */
919600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
9201da177e4SLinus Torvalds 	} else {
9211da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
9221da177e4SLinus Torvalds 	}
9231da177e4SLinus Torvalds 	th->check		= 0;
9241da177e4SLinus Torvalds 	th->urg_ptr		= 0;
9251da177e4SLinus Torvalds 
92633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
9277691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
9287691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
9291da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
9301da177e4SLinus Torvalds 			th->urg = 1;
9317691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
9320eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
9337691367dSHerbert Xu 			th->urg = 1;
9347691367dSHerbert Xu 		}
9351da177e4SLinus Torvalds 	}
9361da177e4SLinus Torvalds 
937bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
9384de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
9399e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
940dfb4b9dcSDavid S. Miller 
941cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
942cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
943cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
944a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
945bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
94649a72dfbSAdam Langley 					       md5, sk, NULL, skb);
947cfb6eeb4SYOSHIFUJI Hideaki 	}
948cfb6eeb4SYOSHIFUJI Hideaki #endif
949cfb6eeb4SYOSHIFUJI Hideaki 
950bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
9511da177e4SLinus Torvalds 
9524de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
953fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
9541da177e4SLinus Torvalds 
9551da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
956cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
9571da177e4SLinus Torvalds 
958bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
959aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
960aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
9611da177e4SLinus Torvalds 
962d9d8da80SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
96383de47cdSHua Zhong 	if (likely(err <= 0))
9641da177e4SLinus Torvalds 		return err;
9651da177e4SLinus Torvalds 
9663cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
9671da177e4SLinus Torvalds 
968b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
9691da177e4SLinus Torvalds }
9701da177e4SLinus Torvalds 
97167edfef7SAndi Kleen /* This routine just queues the buffer for sending.
9721da177e4SLinus Torvalds  *
9731da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
9741da177e4SLinus Torvalds  * otherwise socket can stall.
9751da177e4SLinus Torvalds  */
9761da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
9771da177e4SLinus Torvalds {
9781da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9791da177e4SLinus Torvalds 
9801da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
9811da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
9821da177e4SLinus Torvalds 	skb_header_release(skb);
983fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
9843ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
9853ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
9861da177e4SLinus Torvalds }
9871da177e4SLinus Torvalds 
98867edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
989cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
990056834d9SIlpo Järvinen 				 unsigned int mss_now)
991f6302d1dSDavid S. Miller {
9927b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
9937b7fc97aSEric Dumazet 
994c52e2421SEric Dumazet 	/* Make sure we own this skb before messing gso_size/gso_segs */
995c52e2421SEric Dumazet 	WARN_ON_ONCE(skb_cloned(skb));
996c52e2421SEric Dumazet 
9978f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
998f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
999f6302d1dSDavid S. Miller 		 * non-TSO case.
1000f6302d1dSDavid S. Miller 		 */
10017b7fc97aSEric Dumazet 		shinfo->gso_segs = 1;
10027b7fc97aSEric Dumazet 		shinfo->gso_size = 0;
10037b7fc97aSEric Dumazet 		shinfo->gso_type = 0;
1004f6302d1dSDavid S. Miller 	} else {
10057b7fc97aSEric Dumazet 		shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
10067b7fc97aSEric Dumazet 		shinfo->gso_size = mss_now;
10077b7fc97aSEric Dumazet 		shinfo->gso_type = sk->sk_gso_type;
10081da177e4SLinus Torvalds 	}
10091da177e4SLinus Torvalds }
10101da177e4SLinus Torvalds 
101191fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
101268f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
101391fed7a1SIlpo Järvinen  */
1014cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
101591fed7a1SIlpo Järvinen 				   int decr)
101691fed7a1SIlpo Järvinen {
1017a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1018a47e5a98SIlpo Järvinen 
1019dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
102091fed7a1SIlpo Järvinen 		return;
102191fed7a1SIlpo Järvinen 
10226859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
102391fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
102491fed7a1SIlpo Järvinen }
102591fed7a1SIlpo Järvinen 
1026797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1027797108d1SIlpo Järvinen  * tweaks to fix counters
1028797108d1SIlpo Järvinen  */
1029cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1030797108d1SIlpo Järvinen {
1031797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1032797108d1SIlpo Järvinen 
1033797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1034797108d1SIlpo Järvinen 
1035797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1036797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1037797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1038797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1039797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1040797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1041797108d1SIlpo Järvinen 
1042797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1043797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1044797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1045797108d1SIlpo Järvinen 
1046797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1047797108d1SIlpo Järvinen 
1048797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1049797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
105052cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1051797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1052797108d1SIlpo Järvinen 
1053797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1054797108d1SIlpo Järvinen }
1055797108d1SIlpo Järvinen 
10561da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
10571da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
10581da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
10591da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
10601da177e4SLinus Torvalds  */
1061056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1062056834d9SIlpo Järvinen 		 unsigned int mss_now)
10631da177e4SLinus Torvalds {
10641da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10651da177e4SLinus Torvalds 	struct sk_buff *buff;
10666475be16SDavid S. Miller 	int nsize, old_factor;
1067b60b49eaSHerbert Xu 	int nlen;
10689ce01461SIlpo Järvinen 	u8 flags;
10691da177e4SLinus Torvalds 
10702fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
10712fceec13SIlpo Järvinen 		return -EINVAL;
10726a438bbeSStephen Hemminger 
10731da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
10741da177e4SLinus Torvalds 	if (nsize < 0)
10751da177e4SLinus Torvalds 		nsize = 0;
10761da177e4SLinus Torvalds 
1077c52e2421SEric Dumazet 	if (skb_unclone(skb, GFP_ATOMIC))
10781da177e4SLinus Torvalds 		return -ENOMEM;
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
10811da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
10821da177e4SLinus Torvalds 	if (buff == NULL)
10831da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1084ef5cb973SHerbert Xu 
10853ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
10863ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1087b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1088b60b49eaSHerbert Xu 	buff->truesize += nlen;
1089b60b49eaSHerbert Xu 	skb->truesize -= nlen;
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
10921da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
10931da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
10941da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
10951da177e4SLinus Torvalds 
10961da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
10974de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
10984de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
10994de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1100e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
11011da177e4SLinus Torvalds 
110284fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
11031da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1104056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1105056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
11061da177e4SLinus Torvalds 						       nsize, 0);
11071da177e4SLinus Torvalds 
11081da177e4SLinus Torvalds 		skb_trim(skb, len);
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
11111da177e4SLinus Torvalds 	} else {
111284fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
11131da177e4SLinus Torvalds 		skb_split(skb, buff, len);
11141da177e4SLinus Torvalds 	}
11151da177e4SLinus Torvalds 
11161da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
11191da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
11201da177e4SLinus Torvalds 	 */
11211da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1122a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
11231da177e4SLinus Torvalds 
11246475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
11256475be16SDavid S. Miller 
11261da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1127846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1128846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
11291da177e4SLinus Torvalds 
11306475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
11316475be16SDavid S. Miller 	 * adjust the various packet counters.
11326475be16SDavid S. Miller 	 */
1133cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
11346475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
11356475be16SDavid S. Miller 			tcp_skb_pcount(buff);
11361da177e4SLinus Torvalds 
1137797108d1SIlpo Järvinen 		if (diff)
1138797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
11391da177e4SLinus Torvalds 	}
11401da177e4SLinus Torvalds 
11411da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1142f44b5271SDavid S. Miller 	skb_header_release(buff);
1143fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
11441da177e4SLinus Torvalds 
11451da177e4SLinus Torvalds 	return 0;
11461da177e4SLinus Torvalds }
11471da177e4SLinus Torvalds 
11481da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
11491da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
11501da177e4SLinus Torvalds  * immediately discarded.
11511da177e4SLinus Torvalds  */
1152f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
11531da177e4SLinus Torvalds {
11547b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
11551da177e4SLinus Torvalds 	int i, k, eat;
11561da177e4SLinus Torvalds 
11574fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
11584fa48bf3SEric Dumazet 	if (eat) {
11594fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
11604fa48bf3SEric Dumazet 		len -= eat;
11614fa48bf3SEric Dumazet 		if (!len)
11624fa48bf3SEric Dumazet 			return;
11634fa48bf3SEric Dumazet 	}
11641da177e4SLinus Torvalds 	eat = len;
11651da177e4SLinus Torvalds 	k = 0;
11667b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
11677b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
11687b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
11699e903e08SEric Dumazet 
11709e903e08SEric Dumazet 		if (size <= eat) {
1171aff65da0SIan Campbell 			skb_frag_unref(skb, i);
11729e903e08SEric Dumazet 			eat -= size;
11731da177e4SLinus Torvalds 		} else {
11747b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
11751da177e4SLinus Torvalds 			if (eat) {
11767b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
11777b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
11781da177e4SLinus Torvalds 				eat = 0;
11791da177e4SLinus Torvalds 			}
11801da177e4SLinus Torvalds 			k++;
11811da177e4SLinus Torvalds 		}
11821da177e4SLinus Torvalds 	}
11837b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
11841da177e4SLinus Torvalds 
118527a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
11861da177e4SLinus Torvalds 	skb->data_len -= len;
11871da177e4SLinus Torvalds 	skb->len = skb->data_len;
11881da177e4SLinus Torvalds }
11891da177e4SLinus Torvalds 
119067edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
11911da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
11921da177e4SLinus Torvalds {
119314bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
11941da177e4SLinus Torvalds 		return -ENOMEM;
11951da177e4SLinus Torvalds 
11964fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
11971da177e4SLinus Torvalds 
11981da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
119984fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
12001da177e4SLinus Torvalds 
12011da177e4SLinus Torvalds 	skb->truesize	     -= len;
12021da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
12033ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
12041da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
12051da177e4SLinus Torvalds 
12065b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
12071da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
12085b35e1e6SNeal Cardwell 		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds 	return 0;
12111da177e4SLinus Torvalds }
12121da177e4SLinus Torvalds 
12131b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
12141b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
12155d424d5aSJohn Heffner {
1216cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1217cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12185d424d5aSJohn Heffner 	int mss_now;
12195d424d5aSJohn Heffner 
12205d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
12215d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
12225d424d5aSJohn Heffner 	 */
12235d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
12245d424d5aSJohn Heffner 
122567469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
122667469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
122767469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
122867469601SEric Dumazet 
122967469601SEric Dumazet 		if (dst && dst_allfrag(dst))
123067469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
123167469601SEric Dumazet 	}
123267469601SEric Dumazet 
12335d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
12345d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
12355d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
12365d424d5aSJohn Heffner 
12375d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
12385d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
12395d424d5aSJohn Heffner 
12405d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
12415d424d5aSJohn Heffner 	if (mss_now < 48)
12425d424d5aSJohn Heffner 		mss_now = 48;
12435d424d5aSJohn Heffner 	return mss_now;
12445d424d5aSJohn Heffner }
12455d424d5aSJohn Heffner 
12461b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
12471b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
12481b63edd6SYuchung Cheng {
12491b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
12501b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
12511b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
12521b63edd6SYuchung Cheng }
12531b63edd6SYuchung Cheng 
12545d424d5aSJohn Heffner /* Inverse of above */
125567469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
12565d424d5aSJohn Heffner {
1257cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1258cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12595d424d5aSJohn Heffner 	int mtu;
12605d424d5aSJohn Heffner 
12615d424d5aSJohn Heffner 	mtu = mss +
12625d424d5aSJohn Heffner 	      tp->tcp_header_len +
12635d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
12645d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
12655d424d5aSJohn Heffner 
126667469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
126767469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
126867469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
126967469601SEric Dumazet 
127067469601SEric Dumazet 		if (dst && dst_allfrag(dst))
127167469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
127267469601SEric Dumazet 	}
12735d424d5aSJohn Heffner 	return mtu;
12745d424d5aSJohn Heffner }
12755d424d5aSJohn Heffner 
127667edfef7SAndi Kleen /* MTU probing init per socket */
12775d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
12785d424d5aSJohn Heffner {
12795d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
12805d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
12815d424d5aSJohn Heffner 
12825d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
12835d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
12845d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
12855d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
12865d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
12875d424d5aSJohn Heffner }
12884bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
12895d424d5aSJohn Heffner 
12901da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
12931da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1296caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
12971da177e4SLinus Torvalds    It also does not include TCP options.
12981da177e4SLinus Torvalds 
1299d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
13001da177e4SLinus Torvalds 
13011da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
13021da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
13031da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
13041da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
13071da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
13081da177e4SLinus Torvalds 
1309d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1310d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
13111da177e4SLinus Torvalds  */
13121da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
13131da177e4SLinus Torvalds {
13141da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1315d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
13165d424d5aSJohn Heffner 	int mss_now;
13171da177e4SLinus Torvalds 
13185d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
13195d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
13201da177e4SLinus Torvalds 
13215d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1322409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
13231da177e4SLinus Torvalds 
13241da177e4SLinus Torvalds 	/* And store cached results */
1325d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
13265d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
13275d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1328c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	return mss_now;
13311da177e4SLinus Torvalds }
13324bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
13351da177e4SLinus Torvalds  * and even PMTU discovery events into account.
13361da177e4SLinus Torvalds  */
13370c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
13381da177e4SLinus Torvalds {
1339cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1340cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1341c1b4a7e6SDavid S. Miller 	u32 mss_now;
134295c96174SEric Dumazet 	unsigned int header_len;
134333ad798cSAdam Langley 	struct tcp_out_options opts;
134433ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
13451da177e4SLinus Torvalds 
1346c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1347c1b4a7e6SDavid S. Miller 
13481da177e4SLinus Torvalds 	if (dst) {
13491da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1350d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
13511da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
13521da177e4SLinus Torvalds 	}
13531da177e4SLinus Torvalds 
135433ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
135533ad798cSAdam Langley 		     sizeof(struct tcphdr);
135633ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
135733ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
135833ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
135933ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
136033ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
136133ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
136233ad798cSAdam Langley 		mss_now -= delta;
136333ad798cSAdam Langley 	}
1364cfb6eeb4SYOSHIFUJI Hideaki 
13651da177e4SLinus Torvalds 	return mss_now;
13661da177e4SLinus Torvalds }
13671da177e4SLinus Torvalds 
1368a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
13699e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1370a762a980SDavid S. Miller {
13719e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1372a762a980SDavid S. Miller 
1373d436d686SIlpo Järvinen 	if (tp->packets_out >= tp->snd_cwnd) {
1374a762a980SDavid S. Miller 		/* Network is feed fully. */
1375a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1376a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1377a762a980SDavid S. Miller 	} else {
1378a762a980SDavid S. Miller 		/* Network starves. */
1379a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1380a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1381a762a980SDavid S. Miller 
138215d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
138315d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1384a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1385a762a980SDavid S. Miller 	}
1386a762a980SDavid S. Miller }
1387a762a980SDavid S. Miller 
1388d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1389d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1390d4589926SEric Dumazet {
1391d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1392d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1393d4589926SEric Dumazet }
1394d4589926SEric Dumazet 
1395d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1396d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1397d4589926SEric Dumazet  * The test is really :
1398d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1399d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1400d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1401d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
14020e3a4803SIlpo Järvinen  */
1403d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1404d4589926SEric Dumazet 				const struct sk_buff *skb)
1405d4589926SEric Dumazet {
1406d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1407d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1408d4589926SEric Dumazet }
1409d4589926SEric Dumazet 
1410d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1411d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1412d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1413d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1414d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1415d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1416d4589926SEric Dumazet  */
1417d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1418d4589926SEric Dumazet 			    unsigned int mss_now, int nonagle)
1419d4589926SEric Dumazet {
1420d4589926SEric Dumazet 	return partial &&
1421d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1422d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1423d4589926SEric Dumazet }
1424d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1425d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1426d4589926SEric Dumazet 					const struct sk_buff *skb,
1427d4589926SEric Dumazet 					unsigned int mss_now,
1428d4589926SEric Dumazet 					unsigned int max_segs,
1429d4589926SEric Dumazet 					int nonagle)
1430c1b4a7e6SDavid S. Miller {
1431cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1432d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1433c1b4a7e6SDavid S. Miller 
143490840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
14351485348dSBen Hutchings 	max_len = mss_now * max_segs;
14360e3a4803SIlpo Järvinen 
14371485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
14381485348dSBen Hutchings 		return max_len;
14390e3a4803SIlpo Järvinen 
14405ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
14415ea3a748SIlpo Järvinen 
14421485348dSBen Hutchings 	if (max_len <= needed)
14431485348dSBen Hutchings 		return max_len;
14440e3a4803SIlpo Järvinen 
1445d4589926SEric Dumazet 	partial = needed % mss_now;
1446d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1447d4589926SEric Dumazet 	 * to include this last segment in this skb.
1448d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1449d4589926SEric Dumazet 	 */
1450d4589926SEric Dumazet 	if (tcp_nagle_check(partial != 0, tp, mss_now, nonagle))
1451d4589926SEric Dumazet 		return needed - partial;
1452d4589926SEric Dumazet 
1453d4589926SEric Dumazet 	return needed;
1454c1b4a7e6SDavid S. Miller }
1455c1b4a7e6SDavid S. Miller 
1456c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1457c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1458c1b4a7e6SDavid S. Miller  */
1459cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1460cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1461c1b4a7e6SDavid S. Miller {
1462c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1463c1b4a7e6SDavid S. Miller 
1464c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
14654de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
14664de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1467c1b4a7e6SDavid S. Miller 		return 1;
1468c1b4a7e6SDavid S. Miller 
1469c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1470c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1471c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1472c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1473c1b4a7e6SDavid S. Miller 
1474c1b4a7e6SDavid S. Miller 	return 0;
1475c1b4a7e6SDavid S. Miller }
1476c1b4a7e6SDavid S. Miller 
1477b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
147867edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1479c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1480c1b4a7e6SDavid S. Miller  */
1481cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1482056834d9SIlpo Järvinen 			     unsigned int mss_now)
1483c1b4a7e6SDavid S. Miller {
1484c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1485c1b4a7e6SDavid S. Miller 
1486f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1487846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1488c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1489c1b4a7e6SDavid S. Miller 	}
1490c1b4a7e6SDavid S. Miller 	return tso_segs;
1491c1b4a7e6SDavid S. Miller }
1492c1b4a7e6SDavid S. Miller 
1493c1b4a7e6SDavid S. Miller 
1494a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1495c1b4a7e6SDavid S. Miller  * sent now.
1496c1b4a7e6SDavid S. Miller  */
1497a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1498c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1499c1b4a7e6SDavid S. Miller {
1500c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1501c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1502c1b4a7e6SDavid S. Miller 	 *
1503c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1504c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1505c1b4a7e6SDavid S. Miller 	 */
1506c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1507a2a385d6SEric Dumazet 		return true;
1508c1b4a7e6SDavid S. Miller 
15099b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
15109b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1511a2a385d6SEric Dumazet 		return true;
1512c1b4a7e6SDavid S. Miller 
1513d4589926SEric Dumazet 	if (!tcp_nagle_check(skb->len < cur_mss, tp, cur_mss, nonagle))
1514a2a385d6SEric Dumazet 		return true;
1515c1b4a7e6SDavid S. Miller 
1516a2a385d6SEric Dumazet 	return false;
1517c1b4a7e6SDavid S. Miller }
1518c1b4a7e6SDavid S. Miller 
1519c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1520a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1521a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1522056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1523c1b4a7e6SDavid S. Miller {
1524c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1525c1b4a7e6SDavid S. Miller 
1526c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1527c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1528c1b4a7e6SDavid S. Miller 
152990840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1530c1b4a7e6SDavid S. Miller }
1531c1b4a7e6SDavid S. Miller 
1532fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1533c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1534c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1535c1b4a7e6SDavid S. Miller  */
1536cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1537c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1538c1b4a7e6SDavid S. Miller {
1539cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1540c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1541c1b4a7e6SDavid S. Miller 
1542846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1543c1b4a7e6SDavid S. Miller 
1544c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1545c1b4a7e6SDavid S. Miller 		return 0;
1546c1b4a7e6SDavid S. Miller 
1547c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1548056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1549c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1550c1b4a7e6SDavid S. Miller 
1551c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1552c1b4a7e6SDavid S. Miller }
1553c1b4a7e6SDavid S. Miller 
155467edfef7SAndi Kleen /* Test if sending is allowed right now. */
1555a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1556c1b4a7e6SDavid S. Miller {
1557cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1558fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1559c1b4a7e6SDavid S. Miller 
1560a02cec21SEric Dumazet 	return skb &&
15610c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1562c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1563a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1564c1b4a7e6SDavid S. Miller }
1565c1b4a7e6SDavid S. Miller 
1566c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1567c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1568c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1569c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1570c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1571c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1572c1b4a7e6SDavid S. Miller  */
1573056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1574c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1575c1b4a7e6SDavid S. Miller {
1576c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1577c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
15789ce01461SIlpo Järvinen 	u8 flags;
1579c1b4a7e6SDavid S. Miller 
1580c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1581c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1582c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1583c1b4a7e6SDavid S. Miller 
1584c4ead4c5SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp);
1585c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1586c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1587c1b4a7e6SDavid S. Miller 
15883ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
15893ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1590b60b49eaSHerbert Xu 	buff->truesize += nlen;
1591c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1592c1b4a7e6SDavid S. Miller 
1593c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1594c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1595c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1596c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1597c1b4a7e6SDavid S. Miller 
1598c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
15994de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
16004de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
16014de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1602c1b4a7e6SDavid S. Miller 
1603c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1604c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1605c1b4a7e6SDavid S. Miller 
160684fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1607c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1608c1b4a7e6SDavid S. Miller 
1609c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1610846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1611846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1612c1b4a7e6SDavid S. Miller 
1613c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1614c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1615fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1616c1b4a7e6SDavid S. Miller 
1617c1b4a7e6SDavid S. Miller 	return 0;
1618c1b4a7e6SDavid S. Miller }
1619c1b4a7e6SDavid S. Miller 
1620c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1621c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1622c1b4a7e6SDavid S. Miller  *
1623c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1624c1b4a7e6SDavid S. Miller  */
1625a2a385d6SEric Dumazet static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1626c1b4a7e6SDavid S. Miller {
16279e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
16286687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1629c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1630ad9f4f50SEric Dumazet 	int win_divisor;
1631c1b4a7e6SDavid S. Miller 
16324de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1633ae8064acSJohn Heffner 		goto send_now;
1634c1b4a7e6SDavid S. Miller 
16356687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1636ae8064acSJohn Heffner 		goto send_now;
1637ae8064acSJohn Heffner 
1638ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1639bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1640a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1641ae8064acSJohn Heffner 		goto send_now;
1642908a75c1SDavid S. Miller 
1643c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1644c1b4a7e6SDavid S. Miller 
1645056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1646c1b4a7e6SDavid S. Miller 
164790840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1648c1b4a7e6SDavid S. Miller 
1649c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1650c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1651c1b4a7e6SDavid S. Miller 
1652c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1653c1b4a7e6SDavid S. Miller 
1654ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
16551485348dSBen Hutchings 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
165695bd09ebSEric Dumazet 			   tp->xmit_size_goal_segs * tp->mss_cache))
1657ae8064acSJohn Heffner 		goto send_now;
1658ba244fe9SDavid S. Miller 
165962ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
166062ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
166162ad2761SIlpo Järvinen 		goto send_now;
166262ad2761SIlpo Järvinen 
1663ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1664ad9f4f50SEric Dumazet 	if (win_divisor) {
1665c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1666c1b4a7e6SDavid S. Miller 
1667c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1668c1b4a7e6SDavid S. Miller 		 * just use it.
1669c1b4a7e6SDavid S. Miller 		 */
1670ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1671c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1672ae8064acSJohn Heffner 			goto send_now;
1673c1b4a7e6SDavid S. Miller 	} else {
1674c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1675c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1676c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1677c1b4a7e6SDavid S. Miller 		 * then send now.
1678c1b4a7e6SDavid S. Miller 		 */
16796b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1680ae8064acSJohn Heffner 			goto send_now;
1681c1b4a7e6SDavid S. Miller 	}
1682c1b4a7e6SDavid S. Miller 
1683f4541d60SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1684f4541d60SEric Dumazet 	 * Do not rearm the timer if already set to not break TCP ACK clocking.
1685f4541d60SEric Dumazet 	 */
1686f4541d60SEric Dumazet 	if (!tp->tso_deferred)
1687ae8064acSJohn Heffner 		tp->tso_deferred = 1 | (jiffies << 1);
1688ae8064acSJohn Heffner 
1689a2a385d6SEric Dumazet 	return true;
1690ae8064acSJohn Heffner 
1691ae8064acSJohn Heffner send_now:
1692ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1693a2a385d6SEric Dumazet 	return false;
1694c1b4a7e6SDavid S. Miller }
1695c1b4a7e6SDavid S. Miller 
16965d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
169767edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
169867edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
169967edfef7SAndi Kleen  * changes resulting in larger path MTUs.
170067edfef7SAndi Kleen  *
17015d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
17025d424d5aSJohn Heffner  *         1 if a probe was sent,
1703056834d9SIlpo Järvinen  *         -1 otherwise
1704056834d9SIlpo Järvinen  */
17055d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
17065d424d5aSJohn Heffner {
17075d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
17085d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
17095d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
17105d424d5aSJohn Heffner 	int len;
17115d424d5aSJohn Heffner 	int probe_size;
171291cc17c0SIlpo Järvinen 	int size_needed;
17135d424d5aSJohn Heffner 	int copy;
17145d424d5aSJohn Heffner 	int mss_now;
17155d424d5aSJohn Heffner 
17165d424d5aSJohn Heffner 	/* Not currently probing/verifying,
17175d424d5aSJohn Heffner 	 * not in recovery,
17185d424d5aSJohn Heffner 	 * have enough cwnd, and
17195d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
17205d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
17215d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
17225d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
17235d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1724cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
17255d424d5aSJohn Heffner 		return -1;
17265d424d5aSJohn Heffner 
17275d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
17280c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
17295d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
173091cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
17315d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
17325d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
17335d424d5aSJohn Heffner 		return -1;
17345d424d5aSJohn Heffner 	}
17355d424d5aSJohn Heffner 
17365d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
17377f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
17385d424d5aSJohn Heffner 		return -1;
17395d424d5aSJohn Heffner 
174091cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
17415d424d5aSJohn Heffner 		return -1;
174290840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
17435d424d5aSJohn Heffner 		return 0;
17445d424d5aSJohn Heffner 
1745d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1746d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1747d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
17485d424d5aSJohn Heffner 			return -1;
17495d424d5aSJohn Heffner 		else
17505d424d5aSJohn Heffner 			return 0;
17515d424d5aSJohn Heffner 	}
17525d424d5aSJohn Heffner 
17535d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
17545d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
17555d424d5aSJohn Heffner 		return -1;
17563ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
17573ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
17585d424d5aSJohn Heffner 
1759fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
17605d424d5aSJohn Heffner 
17615d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
17625d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
17634de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
17645d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
17655d424d5aSJohn Heffner 	nskb->csum = 0;
176684fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
17675d424d5aSJohn Heffner 
176850c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
176950c4817eSIlpo Järvinen 
17705d424d5aSJohn Heffner 	len = 0;
1771234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
17725d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
17735d424d5aSJohn Heffner 		if (nskb->ip_summed)
17745d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
17755d424d5aSJohn Heffner 		else
17765d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1777056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1778056834d9SIlpo Järvinen 							    copy, nskb->csum);
17795d424d5aSJohn Heffner 
17805d424d5aSJohn Heffner 		if (skb->len <= copy) {
17815d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
17825d424d5aSJohn Heffner 			 * Throw it away. */
17834de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1784fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
17853ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
17865d424d5aSJohn Heffner 		} else {
17874de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1788a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
17895d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
17905d424d5aSJohn Heffner 				skb_pull(skb, copy);
179184fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1792056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1793056834d9SIlpo Järvinen 								 skb->len, 0);
17945d424d5aSJohn Heffner 			} else {
17955d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
17965d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
17975d424d5aSJohn Heffner 			}
17985d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
17995d424d5aSJohn Heffner 		}
18005d424d5aSJohn Heffner 
18015d424d5aSJohn Heffner 		len += copy;
1802234b6860SIlpo Järvinen 
1803234b6860SIlpo Järvinen 		if (len >= probe_size)
1804234b6860SIlpo Järvinen 			break;
18055d424d5aSJohn Heffner 	}
18065d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
18075d424d5aSJohn Heffner 
18085d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
18095d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
18105d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
18115d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
18125d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
18135d424d5aSJohn Heffner 		 * effectively two packets. */
18145d424d5aSJohn Heffner 		tp->snd_cwnd--;
181566f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
18165d424d5aSJohn Heffner 
18175d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
18180e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
18190e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
18205d424d5aSJohn Heffner 
18215d424d5aSJohn Heffner 		return 1;
18225d424d5aSJohn Heffner 	}
18235d424d5aSJohn Heffner 
18245d424d5aSJohn Heffner 	return -1;
18255d424d5aSJohn Heffner }
18265d424d5aSJohn Heffner 
18271da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
18281da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
18291da177e4SLinus Torvalds  * window for us.
18301da177e4SLinus Torvalds  *
1831f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1832f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1833f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1834f8269a49SIlpo Järvinen  *
18356ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
18366ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
18376ba8a3b1SNandita Dukkipati 
1838a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
1839a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
18401da177e4SLinus Torvalds  */
1841a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1842d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
18431da177e4SLinus Torvalds {
18441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
184592df7b51SDavid S. Miller 	struct sk_buff *skb;
1846c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1847c1b4a7e6SDavid S. Miller 	int cwnd_quota;
18485d424d5aSJohn Heffner 	int result;
18491da177e4SLinus Torvalds 
1850c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
18515d424d5aSJohn Heffner 
1852d5dd9175SIlpo Järvinen 	if (!push_one) {
18535d424d5aSJohn Heffner 		/* Do MTU probing. */
1854d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1855d5dd9175SIlpo Järvinen 		if (!result) {
1856a2a385d6SEric Dumazet 			return false;
18575d424d5aSJohn Heffner 		} else if (result > 0) {
18585d424d5aSJohn Heffner 			sent_pkts = 1;
18595d424d5aSJohn Heffner 		}
1860d5dd9175SIlpo Järvinen 	}
18615d424d5aSJohn Heffner 
1862fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1863c8ac3774SHerbert Xu 		unsigned int limit;
1864c8ac3774SHerbert Xu 
1865b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1866c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1867c1b4a7e6SDavid S. Miller 
1868ec342325SAndrew Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1869ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
1870ec342325SAndrew Vagin 
1871b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
18726ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
18736ba8a3b1SNandita Dukkipati 			if (push_one == 2)
18746ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
18756ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
18766ba8a3b1SNandita Dukkipati 			else
1877b68e9f85SHerbert Xu 				break;
18786ba8a3b1SNandita Dukkipati 		}
1879b68e9f85SHerbert Xu 
1880b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1881b68e9f85SHerbert Xu 			break;
1882b68e9f85SHerbert Xu 
1883c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1884aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1885aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1886aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1887aa93466bSDavid S. Miller 				break;
1888c1b4a7e6SDavid S. Miller 		} else {
1889d5dd9175SIlpo Järvinen 			if (!push_one && tcp_tso_should_defer(sk, skb))
1890aa93466bSDavid S. Miller 				break;
1891c1b4a7e6SDavid S. Miller 		}
1892aa93466bSDavid S. Miller 
1893c9eeec26SEric Dumazet 		/* TCP Small Queues :
1894c9eeec26SEric Dumazet 		 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1895c9eeec26SEric Dumazet 		 * This allows for :
1896c9eeec26SEric Dumazet 		 *  - better RTT estimation and ACK scheduling
1897c9eeec26SEric Dumazet 		 *  - faster recovery
1898c9eeec26SEric Dumazet 		 *  - high rates
189998e09386SEric Dumazet 		 * Alas, some drivers / subsystems require a fair amount
190098e09386SEric Dumazet 		 * of queued bytes to ensure line rate.
190198e09386SEric Dumazet 		 * One example is wifi aggregation (802.11 AMPDU)
190246d3ceabSEric Dumazet 		 */
190398e09386SEric Dumazet 		limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
190498e09386SEric Dumazet 			      sk->sk_pacing_rate >> 10);
1905c9eeec26SEric Dumazet 
1906c9eeec26SEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
190746d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1908bf06200eSJohn Ogness 			/* It is possible TX completion already happened
1909bf06200eSJohn Ogness 			 * before we set TSQ_THROTTLED, so we must
1910bf06200eSJohn Ogness 			 * test again the condition.
1911bf06200eSJohn Ogness 			 * We abuse smp_mb__after_clear_bit() because
1912bf06200eSJohn Ogness 			 * there is no smp_mb__after_set_bit() yet
1913bf06200eSJohn Ogness 			 */
1914bf06200eSJohn Ogness 			smp_mb__after_clear_bit();
1915bf06200eSJohn Ogness 			if (atomic_read(&sk->sk_wmem_alloc) > limit)
191646d3ceabSEric Dumazet 				break;
191746d3ceabSEric Dumazet 		}
1918c9eeec26SEric Dumazet 
1919c8ac3774SHerbert Xu 		limit = mss_now;
1920f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
19210e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
19221485348dSBen Hutchings 						    min_t(unsigned int,
19231485348dSBen Hutchings 							  cwnd_quota,
1924d4589926SEric Dumazet 							  sk->sk_gso_max_segs),
1925d4589926SEric Dumazet 						    nonagle);
1926c8ac3774SHerbert Xu 
1927c8ac3774SHerbert Xu 		if (skb->len > limit &&
1928c4ead4c5SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
19291da177e4SLinus Torvalds 			break;
19301da177e4SLinus Torvalds 
19311da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1932c1b4a7e6SDavid S. Miller 
1933d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
19341da177e4SLinus Torvalds 			break;
19351da177e4SLinus Torvalds 
1936ec342325SAndrew Vagin repair:
19371da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
19381da177e4SLinus Torvalds 		 * This call will increment packets_out.
19391da177e4SLinus Torvalds 		 */
194066f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
19411da177e4SLinus Torvalds 
19421da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1943a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
1944d5dd9175SIlpo Järvinen 
1945d5dd9175SIlpo Järvinen 		if (push_one)
1946d5dd9175SIlpo Järvinen 			break;
19471da177e4SLinus Torvalds 	}
19481da177e4SLinus Torvalds 
1949aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
1950684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
1951684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
19526ba8a3b1SNandita Dukkipati 
19536ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
19546ba8a3b1SNandita Dukkipati 		if (push_one != 2)
19556ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
19569e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
1957a2a385d6SEric Dumazet 		return false;
19581da177e4SLinus Torvalds 	}
19596ba8a3b1SNandita Dukkipati 	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
19606ba8a3b1SNandita Dukkipati }
19616ba8a3b1SNandita Dukkipati 
19626ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
19636ba8a3b1SNandita Dukkipati {
19646ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
19656ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
19666ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
19676ba8a3b1SNandita Dukkipati 	u32 rtt = tp->srtt >> 3;
19686ba8a3b1SNandita Dukkipati 
19696ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
19706ba8a3b1SNandita Dukkipati 		return false;
19716ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
19726ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
19736ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
19746ba8a3b1SNandita Dukkipati 		return false;
19756ba8a3b1SNandita Dukkipati 	}
19766ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
19776ba8a3b1SNandita Dukkipati 	 * finishes.
19786ba8a3b1SNandita Dukkipati 	 */
19796ba8a3b1SNandita Dukkipati 	if (sk->sk_state == TCP_SYN_RECV)
19806ba8a3b1SNandita Dukkipati 		return false;
19816ba8a3b1SNandita Dukkipati 
19826ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
19836ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
19846ba8a3b1SNandita Dukkipati 		return false;
19856ba8a3b1SNandita Dukkipati 
19866ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
19876ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
19886ba8a3b1SNandita Dukkipati 	 */
19894a5ab4e2SEric Dumazet 	if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
19906ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
19916ba8a3b1SNandita Dukkipati 		return false;
19926ba8a3b1SNandita Dukkipati 
19936ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
19946ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
19956ba8a3b1SNandita Dukkipati 		return false;
19966ba8a3b1SNandita Dukkipati 
19976ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
19986ba8a3b1SNandita Dukkipati 	 * for delayed ack when there's one outstanding packet.
19996ba8a3b1SNandita Dukkipati 	 */
20006ba8a3b1SNandita Dukkipati 	timeout = rtt << 1;
20016ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
20026ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
20036ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
20046ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
20056ba8a3b1SNandita Dukkipati 
20066ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
20076ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
20086ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
20096ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
20106ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
20116ba8a3b1SNandita Dukkipati 		if (delta > 0)
20126ba8a3b1SNandita Dukkipati 			timeout = delta;
20136ba8a3b1SNandita Dukkipati 	}
20146ba8a3b1SNandita Dukkipati 
20156ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
20166ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
20176ba8a3b1SNandita Dukkipati 	return true;
20186ba8a3b1SNandita Dukkipati }
20196ba8a3b1SNandita Dukkipati 
20206ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else
20216ba8a3b1SNandita Dukkipati  * retransmit the last segment.
20226ba8a3b1SNandita Dukkipati  */
20236ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
20246ba8a3b1SNandita Dukkipati {
20259b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
20266ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
20276ba8a3b1SNandita Dukkipati 	int pcount;
20286ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
20296ba8a3b1SNandita Dukkipati 	int err = -1;
20306ba8a3b1SNandita Dukkipati 
20316ba8a3b1SNandita Dukkipati 	if (tcp_send_head(sk) != NULL) {
20326ba8a3b1SNandita Dukkipati 		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
20336ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20346ba8a3b1SNandita Dukkipati 	}
20356ba8a3b1SNandita Dukkipati 
20369b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
20379b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
20389b717a8dSNandita Dukkipati 		goto rearm_timer;
20399b717a8dSNandita Dukkipati 
20406ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
20416ba8a3b1SNandita Dukkipati 	skb = tcp_write_queue_tail(sk);
20426ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
20436ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20446ba8a3b1SNandita Dukkipati 
20456ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
20466ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
20476ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20486ba8a3b1SNandita Dukkipati 
20496ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
20506ba8a3b1SNandita Dukkipati 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
20516ba8a3b1SNandita Dukkipati 			goto rearm_timer;
20526ba8a3b1SNandita Dukkipati 		skb = tcp_write_queue_tail(sk);
20536ba8a3b1SNandita Dukkipati 	}
20546ba8a3b1SNandita Dukkipati 
20556ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
20566ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20576ba8a3b1SNandita Dukkipati 
20586ba8a3b1SNandita Dukkipati 	/* Probe with zero data doesn't trigger fast recovery. */
20596ba8a3b1SNandita Dukkipati 	if (skb->len > 0)
20606ba8a3b1SNandita Dukkipati 		err = __tcp_retransmit_skb(sk, skb);
20616ba8a3b1SNandita Dukkipati 
20629b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
20639b717a8dSNandita Dukkipati 	if (likely(!err))
20649b717a8dSNandita Dukkipati 		tp->tlp_high_seq = tp->snd_nxt;
20659b717a8dSNandita Dukkipati 
20666ba8a3b1SNandita Dukkipati rearm_timer:
20676ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
20686ba8a3b1SNandita Dukkipati 				  inet_csk(sk)->icsk_rto,
20696ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
20706ba8a3b1SNandita Dukkipati 
20716ba8a3b1SNandita Dukkipati 	if (likely(!err))
20726ba8a3b1SNandita Dukkipati 		NET_INC_STATS_BH(sock_net(sk),
20736ba8a3b1SNandita Dukkipati 				 LINUX_MIB_TCPLOSSPROBES);
20746ba8a3b1SNandita Dukkipati 	return;
20751da177e4SLinus Torvalds }
20761da177e4SLinus Torvalds 
2077a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2078a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2079a762a980SDavid S. Miller  * The socket must be locked by the caller.
2080a762a980SDavid S. Miller  */
20819e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
20829e412ba7SIlpo Järvinen 			       int nonagle)
2083a762a980SDavid S. Miller {
2084726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2085726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2086726e07a8SIlpo Järvinen 	 * all will be happy.
2087726e07a8SIlpo Järvinen 	 */
2088726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2089726e07a8SIlpo Järvinen 		return;
2090726e07a8SIlpo Järvinen 
209199a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
209299a1dec7SMel Gorman 			   sk_gfp_atomic(sk, GFP_ATOMIC)))
20939e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2094a762a980SDavid S. Miller }
2095a762a980SDavid S. Miller 
2096c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2097c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2098c1b4a7e6SDavid S. Miller  */
2099c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2100c1b4a7e6SDavid S. Miller {
2101fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2102c1b4a7e6SDavid S. Miller 
2103c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2104c1b4a7e6SDavid S. Miller 
2105d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2106c1b4a7e6SDavid S. Miller }
2107c1b4a7e6SDavid S. Miller 
21081da177e4SLinus Torvalds /* This function returns the amount that we can raise the
21091da177e4SLinus Torvalds  * usable window based on the following constraints
21101da177e4SLinus Torvalds  *
21111da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
21121da177e4SLinus Torvalds  * 2. We limit memory per socket
21131da177e4SLinus Torvalds  *
21141da177e4SLinus Torvalds  * RFC 1122:
21151da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
21161da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
21171da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
21181da177e4SLinus Torvalds  *
21191da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
21201da177e4SLinus Torvalds  * it at least MSS bytes.
21211da177e4SLinus Torvalds  *
21221da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
21231da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
21241da177e4SLinus Torvalds  *
21251da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
21261da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
21271da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
21281da177e4SLinus Torvalds  * window to always advance by a single byte.
21291da177e4SLinus Torvalds  *
21301da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
21311da177e4SLinus Torvalds  * then this will not be a problem.
21321da177e4SLinus Torvalds  *
21331da177e4SLinus Torvalds  * BSD seems to make the following compromise:
21341da177e4SLinus Torvalds  *
21351da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
21361da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
21371da177e4SLinus Torvalds  *	then set the window to 0.
21381da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
21391da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
21401da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
21411da177e4SLinus Torvalds  *
21421da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
21431da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
21441da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
21451da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
21461da177e4SLinus Torvalds  * because the pipeline is full.
21471da177e4SLinus Torvalds  *
21481da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
21491da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
21501da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
21511da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
21521da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
21531da177e4SLinus Torvalds  *
21541da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
21551da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
21561da177e4SLinus Torvalds  *
21571da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
21581da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
21591da177e4SLinus Torvalds  */
21601da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
21611da177e4SLinus Torvalds {
2162463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
21631da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2164caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
21651da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
21661da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
21671da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
21681da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
21691da177e4SLinus Torvalds 	 */
2170463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
21711da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
21721da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
21731da177e4SLinus Torvalds 	int window;
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds 	if (mss > full_space)
21761da177e4SLinus Torvalds 		mss = full_space;
21771da177e4SLinus Torvalds 
2178b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2179463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
21801da177e4SLinus Torvalds 
2181180d8cd9SGlauber Costa 		if (sk_under_memory_pressure(sk))
2182056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2183056834d9SIlpo Järvinen 					       4U * tp->advmss);
21841da177e4SLinus Torvalds 
21851da177e4SLinus Torvalds 		if (free_space < mss)
21861da177e4SLinus Torvalds 			return 0;
21871da177e4SLinus Torvalds 	}
21881da177e4SLinus Torvalds 
21891da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
21901da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
21931da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
21941da177e4SLinus Torvalds 	 */
21951da177e4SLinus Torvalds 	window = tp->rcv_wnd;
21961da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
21971da177e4SLinus Torvalds 		window = free_space;
21981da177e4SLinus Torvalds 
21991da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
22001da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
22011da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
22021da177e4SLinus Torvalds 		 */
22031da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
22041da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
22051da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
22061da177e4SLinus Torvalds 	} else {
22071da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
22081da177e4SLinus Torvalds 		 * Window clamp already applied above.
22091da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
22101da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
22111da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
22121da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
22131da177e4SLinus Torvalds 		 * is too small.
22141da177e4SLinus Torvalds 		 */
22151da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
22161da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
221784565070SJohn Heffner 		else if (mss == full_space &&
2218b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
221984565070SJohn Heffner 			window = free_space;
22201da177e4SLinus Torvalds 	}
22211da177e4SLinus Torvalds 
22221da177e4SLinus Torvalds 	return window;
22231da177e4SLinus Torvalds }
22241da177e4SLinus Torvalds 
22254a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
22264a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
22271da177e4SLinus Torvalds {
22281da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2229fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2230058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
22311da177e4SLinus Torvalds 
2232058dc334SIlpo Järvinen 	skb_size = skb->len;
2233058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
22341da177e4SLinus Torvalds 
2235058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
22361da177e4SLinus Torvalds 
22376859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2238a6963a6bSIlpo Järvinen 
2239fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
22401da177e4SLinus Torvalds 
2241058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
22421a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
22431da177e4SLinus Torvalds 
224452d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
224552d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
22461da177e4SLinus Torvalds 
224784fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
22481da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
22491da177e4SLinus Torvalds 
22501da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
22511da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
22521da177e4SLinus Torvalds 
2253e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
22544de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
22551da177e4SLinus Torvalds 
22561da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
22571da177e4SLinus Torvalds 	 * packet counting does not break.
22581da177e4SLinus Torvalds 	 */
22594828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2260b7689205SIlpo Järvinen 
2261b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2262ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2263ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2264ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2265b7689205SIlpo Järvinen 
2266797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2267797108d1SIlpo Järvinen 
22683ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
22691da177e4SLinus Torvalds }
22701da177e4SLinus Torvalds 
227167edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2272a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
22734a17fc3aSIlpo Järvinen {
22744a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2275a2a385d6SEric Dumazet 		return false;
22764a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
22774a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2278a2a385d6SEric Dumazet 		return false;
22794a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2280a2a385d6SEric Dumazet 		return false;
22814a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2282a2a385d6SEric Dumazet 		return false;
22834a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
22844a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2285a2a385d6SEric Dumazet 		return false;
22864a17fc3aSIlpo Järvinen 
2287a2a385d6SEric Dumazet 	return true;
22884a17fc3aSIlpo Järvinen }
22894a17fc3aSIlpo Järvinen 
229067edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
229167edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
229267edfef7SAndi Kleen  */
22934a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
22944a17fc3aSIlpo Järvinen 				     int space)
22954a17fc3aSIlpo Järvinen {
22964a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
22974a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2298a2a385d6SEric Dumazet 	bool first = true;
22994a17fc3aSIlpo Järvinen 
23004a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
23014a17fc3aSIlpo Järvinen 		return;
23024de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
23034a17fc3aSIlpo Järvinen 		return;
23044a17fc3aSIlpo Järvinen 
23054a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
23064a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
23074a17fc3aSIlpo Järvinen 			break;
23084a17fc3aSIlpo Järvinen 
23094a17fc3aSIlpo Järvinen 		space -= skb->len;
23104a17fc3aSIlpo Järvinen 
23114a17fc3aSIlpo Järvinen 		if (first) {
2312a2a385d6SEric Dumazet 			first = false;
23134a17fc3aSIlpo Järvinen 			continue;
23144a17fc3aSIlpo Järvinen 		}
23154a17fc3aSIlpo Järvinen 
23164a17fc3aSIlpo Järvinen 		if (space < 0)
23174a17fc3aSIlpo Järvinen 			break;
23184a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
23194a17fc3aSIlpo Järvinen 		 * the data in the second
23204a17fc3aSIlpo Järvinen 		 */
2321a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
23224a17fc3aSIlpo Järvinen 			break;
23234a17fc3aSIlpo Järvinen 
23244a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
23254a17fc3aSIlpo Järvinen 			break;
23264a17fc3aSIlpo Järvinen 
23274a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
23284a17fc3aSIlpo Järvinen 	}
23294a17fc3aSIlpo Järvinen }
23304a17fc3aSIlpo Järvinen 
23311da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
23321da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
23331da177e4SLinus Torvalds  * error occurred which prevented the send.
23341da177e4SLinus Torvalds  */
233593b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
23361da177e4SLinus Torvalds {
23371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
23385d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
23397d227cd2SSridhar Samudrala 	unsigned int cur_mss;
2340*c84a5711SYuchung Cheng 	int err;
23411da177e4SLinus Torvalds 
23425d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
23435d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
23445d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
23455d424d5aSJohn Heffner 	}
23465d424d5aSJohn Heffner 
23471da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2348caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
23491da177e4SLinus Torvalds 	 */
23501da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
23511da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
23521da177e4SLinus Torvalds 		return -EAGAIN;
23531da177e4SLinus Torvalds 
23541da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
23551da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
23561da177e4SLinus Torvalds 			BUG();
23571da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
23581da177e4SLinus Torvalds 			return -ENOMEM;
23591da177e4SLinus Torvalds 	}
23601da177e4SLinus Torvalds 
23617d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
23627d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
23637d227cd2SSridhar Samudrala 
23640c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
23657d227cd2SSridhar Samudrala 
23661da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
23671da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
23681da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
23691da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
23701da177e4SLinus Torvalds 	 */
23719d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
23729d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
23731da177e4SLinus Torvalds 		return -EAGAIN;
23741da177e4SLinus Torvalds 
23751da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
2376846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
23771da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
237802276f3cSIlpo Järvinen 	} else {
23799eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
23809eb9362eSIlpo Järvinen 
23819eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
2382c52e2421SEric Dumazet 			if (skb_unclone(skb, GFP_ATOMIC))
2383c52e2421SEric Dumazet 				return -ENOMEM;
238402276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
23859eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
23869eb9362eSIlpo Järvinen 		}
23871da177e4SLinus Torvalds 	}
23881da177e4SLinus Torvalds 
23891da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
23901da177e4SLinus Torvalds 
23911da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
23921da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
23931da177e4SLinus Torvalds 	 */
23941da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
23951da177e4SLinus Torvalds 
239650bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
239750bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
239850bceae9SThomas Graf 	 * beyond what csum_start can cover.
239950bceae9SThomas Graf 	 */
240050bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
240150bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
2402117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2403117632e6SEric Dumazet 						   GFP_ATOMIC);
2404*c84a5711SYuchung Cheng 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2405117632e6SEric Dumazet 			     -ENOBUFS;
2406117632e6SEric Dumazet 	} else {
2407*c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2408117632e6SEric Dumazet 	}
2409*c84a5711SYuchung Cheng 
2410*c84a5711SYuchung Cheng 	if (likely(!err))
2411*c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2412*c84a5711SYuchung Cheng 	return err;
241393b174adSYuchung Cheng }
241493b174adSYuchung Cheng 
241593b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
241693b174adSYuchung Cheng {
241793b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
241893b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
24191da177e4SLinus Torvalds 
24201da177e4SLinus Torvalds 	if (err == 0) {
24211da177e4SLinus Torvalds 		/* Update global TCP statistics. */
242281cc8a75SPavel Emelyanov 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
24231da177e4SLinus Torvalds 
24241da177e4SLinus Torvalds 		tp->total_retrans++;
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
24271da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2428e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
24291da177e4SLinus Torvalds 		}
24301da177e4SLinus Torvalds #endif
2431b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2432b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
24331da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
24341da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
24351da177e4SLinus Torvalds 
24361da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
24371da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
24381da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
24391da177e4SLinus Torvalds 
2440c24f691bSYuchung Cheng 		tp->undo_retrans += tcp_skb_pcount(skb);
24411da177e4SLinus Torvalds 
24421da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
24431da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
24441da177e4SLinus Torvalds 		 */
24451da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
244624ab6becSYuchung Cheng 	} else {
244724ab6becSYuchung Cheng 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
24481da177e4SLinus Torvalds 	}
24491da177e4SLinus Torvalds 	return err;
24501da177e4SLinus Torvalds }
24511da177e4SLinus Torvalds 
245267edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
245367edfef7SAndi Kleen  * window/congestion state.
245467edfef7SAndi Kleen  */
2455a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2456b5afe7bcSIlpo Järvinen {
2457b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2458cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2459b5afe7bcSIlpo Järvinen 
2460b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2461b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2462a2a385d6SEric Dumazet 		return false;
2463b5afe7bcSIlpo Järvinen 
2464b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2465b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2466a2a385d6SEric Dumazet 		return false;
2467b5afe7bcSIlpo Järvinen 
2468b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2469b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2470b5afe7bcSIlpo Järvinen 	 *
2471b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2472b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2473b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2474b5afe7bcSIlpo Järvinen 	 */
2475b5afe7bcSIlpo Järvinen 
2476b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2477a2a385d6SEric Dumazet 		return false;
2478b5afe7bcSIlpo Järvinen 
2479a2a385d6SEric Dumazet 	return true;
2480b5afe7bcSIlpo Järvinen }
2481b5afe7bcSIlpo Järvinen 
24821da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
24831da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
24841da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
24851da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
24861da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
24871da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
24881da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
24891da177e4SLinus Torvalds  */
24901da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
24911da177e4SLinus Torvalds {
24926687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
24931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24941da177e4SLinus Torvalds 	struct sk_buff *skb;
24950e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2496618d9f25SIlpo Järvinen 	u32 last_lost;
249761eb55f4SIlpo Järvinen 	int mib_idx;
24980e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
24996a438bbeSStephen Hemminger 
250045e77d31SIlpo Järvinen 	if (!tp->packets_out)
250145e77d31SIlpo Järvinen 		return;
250245e77d31SIlpo Järvinen 
250308ebd172SIlpo Järvinen 	if (!tp->lost_out)
250408ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
250508ebd172SIlpo Järvinen 
2506618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
25076a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2508618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2509618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2510618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2511618d9f25SIlpo Järvinen 	} else {
2512fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2513618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2514618d9f25SIlpo Järvinen 	}
25151da177e4SLinus Torvalds 
2516fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
25171da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
25181da177e4SLinus Torvalds 
2519fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2520fe067e8aSDavid S. Miller 			break;
25216a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
25220e1c54c2SIlpo Järvinen 		if (hole == NULL)
25236a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
25246a438bbeSStephen Hemminger 
25251da177e4SLinus Torvalds 		/* Assume this retransmit will generate
25261da177e4SLinus Torvalds 		 * only one packet for congestion window
25271da177e4SLinus Torvalds 		 * calculation purposes.  This works because
25281da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
25291da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
25301da177e4SLinus Torvalds 		 * packet counting works out.
25311da177e4SLinus Torvalds 		 */
25321da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
25331da177e4SLinus Torvalds 			return;
25340e1c54c2SIlpo Järvinen 
25350e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
25360e1c54c2SIlpo Järvinen begin_fwd:
25370e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2538006f582cSIlpo Järvinen 				break;
25390e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
25400e1c54c2SIlpo Järvinen 
25410e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2542618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
25430e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
25440e1c54c2SIlpo Järvinen 				break;
25450e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
25460e1c54c2SIlpo Järvinen 			if (hole != NULL) {
25470e1c54c2SIlpo Järvinen 				skb = hole;
25480e1c54c2SIlpo Järvinen 				hole = NULL;
25490e1c54c2SIlpo Järvinen 			}
25500e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
25510e1c54c2SIlpo Järvinen 			goto begin_fwd;
25520e1c54c2SIlpo Järvinen 
25530e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2554ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
25550e1c54c2SIlpo Järvinen 				hole = skb;
255661eb55f4SIlpo Järvinen 			continue;
25571da177e4SLinus Torvalds 
25580e1c54c2SIlpo Järvinen 		} else {
2559618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
25600e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
25610e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
25620e1c54c2SIlpo Järvinen 			else
25630e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
25640e1c54c2SIlpo Järvinen 		}
25650e1c54c2SIlpo Järvinen 
25660e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
256761eb55f4SIlpo Järvinen 			continue;
256840b215e5SPavel Emelyanov 
256924ab6becSYuchung Cheng 		if (tcp_retransmit_skb(sk, skb))
25701da177e4SLinus Torvalds 			return;
257124ab6becSYuchung Cheng 
2572de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
25731da177e4SLinus Torvalds 
2574684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2575a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2576a262f0cdSNandita Dukkipati 
2577fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2578463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
25793f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
25803f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
25811da177e4SLinus Torvalds 	}
25821da177e4SLinus Torvalds }
25831da177e4SLinus Torvalds 
25841da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
25851da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
25861da177e4SLinus Torvalds  */
25871da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
25881da177e4SLinus Torvalds {
25891da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2590fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
25911da177e4SLinus Torvalds 	int mss_now;
25921da177e4SLinus Torvalds 
25931da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
25941da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
25951da177e4SLinus Torvalds 	 * and IP options.
25961da177e4SLinus Torvalds 	 */
25970c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
25981da177e4SLinus Torvalds 
2599fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
26004de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
26011da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
26021da177e4SLinus Torvalds 		tp->write_seq++;
26031da177e4SLinus Torvalds 	} else {
26041da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
26051da177e4SLinus Torvalds 		for (;;) {
2606aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2607aa133076SWu Fengguang 					       sk->sk_allocation);
26081da177e4SLinus Torvalds 			if (skb)
26091da177e4SLinus Torvalds 				break;
26101da177e4SLinus Torvalds 			yield();
26111da177e4SLinus Torvalds 		}
26121da177e4SLinus Torvalds 
26131da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
26141da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
26151da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2616e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2617a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
26181da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
26191da177e4SLinus Torvalds 	}
26209e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
26211da177e4SLinus Torvalds }
26221da177e4SLinus Torvalds 
26231da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
26241da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
26251da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
262665bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
26271da177e4SLinus Torvalds  */
2628dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
26291da177e4SLinus Torvalds {
26301da177e4SLinus Torvalds 	struct sk_buff *skb;
26311da177e4SLinus Torvalds 
26321da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
26331da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
26341da177e4SLinus Torvalds 	if (!skb) {
26354e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
26361da177e4SLinus Torvalds 		return;
26371da177e4SLinus Torvalds 	}
26381da177e4SLinus Torvalds 
26391da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
26401da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2641e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2642a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
26431da177e4SLinus Torvalds 	/* Send it off. */
26441da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2645dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
26464e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
264726af65cbSSridhar Samudrala 
264881cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
26491da177e4SLinus Torvalds }
26501da177e4SLinus Torvalds 
265167edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
265267edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
26531da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
26541da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
26551da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
26561da177e4SLinus Torvalds  */
26571da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
26581da177e4SLinus Torvalds {
26591da177e4SLinus Torvalds 	struct sk_buff *skb;
26601da177e4SLinus Torvalds 
2661fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
26624de075e0SEric Dumazet 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
266391df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
26641da177e4SLinus Torvalds 		return -EFAULT;
26651da177e4SLinus Torvalds 	}
26664de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
26671da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
26681da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
26691da177e4SLinus Torvalds 			if (nskb == NULL)
26701da177e4SLinus Torvalds 				return -ENOMEM;
2671fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
26721da177e4SLinus Torvalds 			skb_header_release(nskb);
2673fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
26743ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
26753ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
26763ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
26771da177e4SLinus Torvalds 			skb = nskb;
26781da177e4SLinus Torvalds 		}
26791da177e4SLinus Torvalds 
26804de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
26811da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
26821da177e4SLinus Torvalds 	}
26831da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2684dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
26851da177e4SLinus Torvalds }
26861da177e4SLinus Torvalds 
26874aea39c1SEric Dumazet /**
26884aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
26894aea39c1SEric Dumazet  * sk: listener socket
26904aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
26914aea39c1SEric Dumazet  * req: request_sock pointer
26924aea39c1SEric Dumazet  *
26934aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
26944aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
26954aea39c1SEric Dumazet  */
26961da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2697e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
26988336886fSJerry Chu 				struct tcp_fastopen_cookie *foc)
26991da177e4SLinus Torvalds {
2700bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
27012e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
27021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27031da177e4SLinus Torvalds 	struct tcphdr *th;
27041da177e4SLinus Torvalds 	struct sk_buff *skb;
2705cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2706bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2707f5fff5dcSTom Quetchenbach 	int mss;
27081da177e4SLinus Torvalds 
2709eb8895deSPhil Oester 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
27104aea39c1SEric Dumazet 	if (unlikely(!skb)) {
27114aea39c1SEric Dumazet 		dst_release(dst);
27121da177e4SLinus Torvalds 		return NULL;
27134aea39c1SEric Dumazet 	}
27141da177e4SLinus Torvalds 	/* Reserve space for headers. */
27151da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
27161da177e4SLinus Torvalds 
27174aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
2718ca10b9e9SEric Dumazet 	security_skb_owned_by(skb, sk);
27191da177e4SLinus Torvalds 
27200dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
2721f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2722f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2723f5fff5dcSTom Quetchenbach 
272433ad798cSAdam Langley 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
272533ad798cSAdam Langley 		__u8 rcv_wscale;
272633ad798cSAdam Langley 		/* Set this up on the first call only */
272733ad798cSAdam Langley 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2728e88c64f0SHagen Paul Pfeifer 
2729e88c64f0SHagen Paul Pfeifer 		/* limit the window selection if the user enforce a smaller rx buffer */
2730e88c64f0SHagen Paul Pfeifer 		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2731e88c64f0SHagen Paul Pfeifer 		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2732e88c64f0SHagen Paul Pfeifer 			req->window_clamp = tcp_full_space(sk);
2733e88c64f0SHagen Paul Pfeifer 
273433ad798cSAdam Langley 		/* tcp_full_space because it is guaranteed to be the first packet */
273533ad798cSAdam Langley 		tcp_select_initial_window(tcp_full_space(sk),
2736f5fff5dcSTom Quetchenbach 			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
273733ad798cSAdam Langley 			&req->rcv_wnd,
273833ad798cSAdam Langley 			&req->window_clamp,
273933ad798cSAdam Langley 			ireq->wscale_ok,
274031d12926Slaurent chavey 			&rcv_wscale,
274131d12926Slaurent chavey 			dst_metric(dst, RTAX_INITRWND));
274233ad798cSAdam Langley 		ireq->rcv_wscale = rcv_wscale;
274333ad798cSAdam Langley 	}
2744cfb6eeb4SYOSHIFUJI Hideaki 
274533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
27468b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
27478b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
27488b5f12d0SFlorian Westphal 		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
27498b5f12d0SFlorian Westphal 	else
27508b5f12d0SFlorian Westphal #endif
275133ad798cSAdam Langley 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
27521a2c6181SChristoph Paasch 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
27531a2c6181SChristoph Paasch 					     foc) + sizeof(*th);
275433ad798cSAdam Langley 
2755aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2756aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
27571da177e4SLinus Torvalds 
2758aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
27591da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
27601da177e4SLinus Torvalds 	th->syn = 1;
27611da177e4SLinus Torvalds 	th->ack = 1;
27621da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
2763b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
2764634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
2765e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2766e870a8efSIlpo Järvinen 	 * not even correctly set)
2767e870a8efSIlpo Järvinen 	 */
2768e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2769a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
27704957faadSWilliam Allen Simpson 
27711da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
27728336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
27738336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
27741da177e4SLinus Torvalds 
27751da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2776600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2777bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
27781da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
2779aa2ea058STom Herbert 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2780cfb6eeb4SYOSHIFUJI Hideaki 
2781cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2782cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2783cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2784bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
278549a72dfbSAdam Langley 					       md5, NULL, req, skb);
2786cfb6eeb4SYOSHIFUJI Hideaki 	}
2787cfb6eeb4SYOSHIFUJI Hideaki #endif
2788cfb6eeb4SYOSHIFUJI Hideaki 
27891da177e4SLinus Torvalds 	return skb;
27901da177e4SLinus Torvalds }
27914bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
27921da177e4SLinus Torvalds 
279367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
2794f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
27951da177e4SLinus Torvalds {
2796cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
27971da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27981da177e4SLinus Torvalds 	__u8 rcv_wscale;
27991da177e4SLinus Torvalds 
28001da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
28011da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
28021da177e4SLinus Torvalds 	 */
28031da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2804bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
28051da177e4SLinus Torvalds 
2806cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2807cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2808cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2809cfb6eeb4SYOSHIFUJI Hideaki #endif
2810cfb6eeb4SYOSHIFUJI Hideaki 
28111da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
28121da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
28131da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
28141da177e4SLinus Torvalds 	tp->max_window = 0;
28155d424d5aSJohn Heffner 	tcp_mtup_init(sk);
28161da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
28171da177e4SLinus Torvalds 
28181da177e4SLinus Torvalds 	if (!tp->window_clamp)
28191da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
28200dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
2821f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2822f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2823f5fff5dcSTom Quetchenbach 
28241da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
28251da177e4SLinus Torvalds 
2826e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
2827e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2828e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2829e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
2830e88c64f0SHagen Paul Pfeifer 
28311da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
28321da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
28331da177e4SLinus Torvalds 				  &tp->rcv_wnd,
28341da177e4SLinus Torvalds 				  &tp->window_clamp,
2835bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
283631d12926Slaurent chavey 				  &rcv_wscale,
283731d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
28401da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
28411da177e4SLinus Torvalds 
28421da177e4SLinus Torvalds 	sk->sk_err = 0;
28431da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
28441da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2845ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
28461da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
28471da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
284833f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
2849370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
2850ee995283SPavel Emelyanov 
2851ee995283SPavel Emelyanov 	if (likely(!tp->repair))
28521da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
2853c7781a6eSAndrew Vagin 	else
2854c7781a6eSAndrew Vagin 		tp->rcv_tstamp = tcp_time_stamp;
2855ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
2856ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
28571da177e4SLinus Torvalds 
2858463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2859463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
28601da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
28611da177e4SLinus Torvalds }
28621da177e4SLinus Torvalds 
2863783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2864783237e8SYuchung Cheng {
2865783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2866783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2867783237e8SYuchung Cheng 
2868783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
2869783237e8SYuchung Cheng 	skb_header_release(skb);
2870783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
2871783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
2872783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
2873783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
2874783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
2875783237e8SYuchung Cheng }
2876783237e8SYuchung Cheng 
2877783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
2878783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
2879783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2880783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
2881783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
2882783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
2883783237e8SYuchung Cheng  */
2884783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2885783237e8SYuchung Cheng {
2886783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2887783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
2888aab48743SYuchung Cheng 	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2889783237e8SYuchung Cheng 	struct sk_buff *syn_data = NULL, *data;
2890aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
2891783237e8SYuchung Cheng 
289267da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
2893aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2894aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
2895aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
2896aab48743SYuchung Cheng 	if (syn_loss > 1 &&
2897aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2898aab48743SYuchung Cheng 		fo->cookie.len = -1;
2899aab48743SYuchung Cheng 		goto fallback;
2900aab48743SYuchung Cheng 	}
2901aab48743SYuchung Cheng 
290267da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
290367da22d2SYuchung Cheng 		fo->cookie.len = -1;
290467da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
2905783237e8SYuchung Cheng 		goto fallback;
2906783237e8SYuchung Cheng 
2907783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2908783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
2909783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
2910783237e8SYuchung Cheng 	 */
2911783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2912783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
29131b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2914783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
2915783237e8SYuchung Cheng 
2916f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
2917f5ddcbbbSEric Dumazet 
2918f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
2919f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2920f5ddcbbbSEric Dumazet 
2921f5ddcbbbSEric Dumazet 	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
2922783237e8SYuchung Cheng 				   sk->sk_allocation);
2923783237e8SYuchung Cheng 	if (syn_data == NULL)
2924783237e8SYuchung Cheng 		goto fallback;
2925783237e8SYuchung Cheng 
2926783237e8SYuchung Cheng 	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2927783237e8SYuchung Cheng 		struct iovec *iov = &fo->data->msg_iov[i];
2928783237e8SYuchung Cheng 		unsigned char __user *from = iov->iov_base;
2929783237e8SYuchung Cheng 		int len = iov->iov_len;
2930783237e8SYuchung Cheng 
2931783237e8SYuchung Cheng 		if (syn_data->len + len > space)
2932783237e8SYuchung Cheng 			len = space - syn_data->len;
2933783237e8SYuchung Cheng 		else if (i + 1 == iovlen)
2934783237e8SYuchung Cheng 			/* No more data pending in inet_wait_for_connect() */
2935783237e8SYuchung Cheng 			fo->data = NULL;
2936783237e8SYuchung Cheng 
2937783237e8SYuchung Cheng 		if (skb_add_data(syn_data, from, len))
2938783237e8SYuchung Cheng 			goto fallback;
2939783237e8SYuchung Cheng 	}
2940783237e8SYuchung Cheng 
2941783237e8SYuchung Cheng 	/* Queue a data-only packet after the regular SYN for retransmission */
2942783237e8SYuchung Cheng 	data = pskb_copy(syn_data, sk->sk_allocation);
2943783237e8SYuchung Cheng 	if (data == NULL)
2944783237e8SYuchung Cheng 		goto fallback;
2945783237e8SYuchung Cheng 	TCP_SKB_CB(data)->seq++;
2946783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2947783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2948783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, data);
2949783237e8SYuchung Cheng 	fo->copied = data->len;
2950783237e8SYuchung Cheng 
2951783237e8SYuchung Cheng 	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
295267da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
2953783237e8SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2954783237e8SYuchung Cheng 		goto done;
2955783237e8SYuchung Cheng 	}
2956783237e8SYuchung Cheng 	syn_data = NULL;
2957783237e8SYuchung Cheng 
2958783237e8SYuchung Cheng fallback:
2959783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
2960783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
2961783237e8SYuchung Cheng 		fo->cookie.len = 0;
2962783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2963783237e8SYuchung Cheng 	if (err)
2964783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
2965783237e8SYuchung Cheng 	kfree_skb(syn_data);
2966783237e8SYuchung Cheng done:
2967783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
2968783237e8SYuchung Cheng 	return err;
2969783237e8SYuchung Cheng }
2970783237e8SYuchung Cheng 
297167edfef7SAndi Kleen /* Build a SYN and send it off. */
29721da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
29731da177e4SLinus Torvalds {
29741da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
29751da177e4SLinus Torvalds 	struct sk_buff *buff;
2976ee586811SEric Paris 	int err;
29771da177e4SLinus Torvalds 
29781da177e4SLinus Torvalds 	tcp_connect_init(sk);
29791da177e4SLinus Torvalds 
29802b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
29812b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
29822b916477SAndrey Vagin 		return 0;
29832b916477SAndrey Vagin 	}
29842b916477SAndrey Vagin 
2985d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
29861da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
29871da177e4SLinus Torvalds 		return -ENOBUFS;
29881da177e4SLinus Torvalds 
29891da177e4SLinus Torvalds 	/* Reserve space for headers. */
29901da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
29911da177e4SLinus Torvalds 
2992a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2993783237e8SYuchung Cheng 	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2994783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
2995e870a8efSIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
29961da177e4SLinus Torvalds 
2997783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
2998783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2999783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3000ee586811SEric Paris 	if (err == -ECONNREFUSED)
3001ee586811SEric Paris 		return err;
3002bd37a088SWei Yongjun 
3003bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3004bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3005bd37a088SWei Yongjun 	 */
3006bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3007bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
300881cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
30091da177e4SLinus Torvalds 
30101da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
30113f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30123f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
30131da177e4SLinus Torvalds 	return 0;
30141da177e4SLinus Torvalds }
30154bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
30161da177e4SLinus Torvalds 
30171da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
30181da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
30191da177e4SLinus Torvalds  * for details.
30201da177e4SLinus Torvalds  */
30211da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
30221da177e4SLinus Torvalds {
3023463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3024463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
30251da177e4SLinus Torvalds 	unsigned long timeout;
30261da177e4SLinus Torvalds 
30271da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3028463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
30291da177e4SLinus Torvalds 		int max_ato = HZ / 2;
30301da177e4SLinus Torvalds 
3031056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3032056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
30331da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
30341da177e4SLinus Torvalds 
30351da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3038463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
30391da177e4SLinus Torvalds 		 * directly.
30401da177e4SLinus Torvalds 		 */
30411da177e4SLinus Torvalds 		if (tp->srtt) {
30421da177e4SLinus Torvalds 			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
30431da177e4SLinus Torvalds 
30441da177e4SLinus Torvalds 			if (rtt < max_ato)
30451da177e4SLinus Torvalds 				max_ato = rtt;
30461da177e4SLinus Torvalds 		}
30471da177e4SLinus Torvalds 
30481da177e4SLinus Torvalds 		ato = min(ato, max_ato);
30491da177e4SLinus Torvalds 	}
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	/* Stay within the limit we were given */
30521da177e4SLinus Torvalds 	timeout = jiffies + ato;
30531da177e4SLinus Torvalds 
30541da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3055463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
30561da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
30571da177e4SLinus Torvalds 		 * send ACK now.
30581da177e4SLinus Torvalds 		 */
3059463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3060463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
30611da177e4SLinus Torvalds 			tcp_send_ack(sk);
30621da177e4SLinus Torvalds 			return;
30631da177e4SLinus Torvalds 		}
30641da177e4SLinus Torvalds 
3065463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3066463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
30671da177e4SLinus Torvalds 	}
3068463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3069463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3070463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
30711da177e4SLinus Torvalds }
30721da177e4SLinus Torvalds 
30731da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
30741da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
30751da177e4SLinus Torvalds {
30761da177e4SLinus Torvalds 	struct sk_buff *buff;
30771da177e4SLinus Torvalds 
3078058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3079058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3080058dc334SIlpo Järvinen 		return;
3081058dc334SIlpo Järvinen 
30821da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
30831da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
30841da177e4SLinus Torvalds 	 * sock.
30851da177e4SLinus Torvalds 	 */
308699a1dec7SMel Gorman 	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
30871da177e4SLinus Torvalds 	if (buff == NULL) {
3088463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3089463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
30903f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
30913f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
30921da177e4SLinus Torvalds 		return;
30931da177e4SLinus Torvalds 	}
30941da177e4SLinus Torvalds 
30951da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
30961da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3097a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
30981da177e4SLinus Torvalds 
30991da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
31001da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
310199a1dec7SMel Gorman 	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
31021da177e4SLinus Torvalds }
31031da177e4SLinus Torvalds 
31041da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
31051da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
31061da177e4SLinus Torvalds  *
31071da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
31081da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
31091da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
31101da177e4SLinus Torvalds  *
31111da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
31121da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
31131da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
31141da177e4SLinus Torvalds  */
31151da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
31161da177e4SLinus Torvalds {
31171da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31181da177e4SLinus Torvalds 	struct sk_buff *skb;
31191da177e4SLinus Torvalds 
31201da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
312199a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
31221da177e4SLinus Torvalds 	if (skb == NULL)
31231da177e4SLinus Torvalds 		return -1;
31241da177e4SLinus Torvalds 
31251da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
31261da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
31271da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
31281da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
31291da177e4SLinus Torvalds 	 * send it.
31301da177e4SLinus Torvalds 	 */
3131a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
31321da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
3133dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
31341da177e4SLinus Torvalds }
31351da177e4SLinus Torvalds 
3136ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3137ee995283SPavel Emelyanov {
3138ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3139ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3140ee995283SPavel Emelyanov 		tcp_xmit_probe_skb(sk, 0);
3141ee995283SPavel Emelyanov 	}
3142ee995283SPavel Emelyanov }
3143ee995283SPavel Emelyanov 
314467edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
31451da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
31461da177e4SLinus Torvalds {
31471da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31481da177e4SLinus Torvalds 	struct sk_buff *skb;
31491da177e4SLinus Torvalds 
3150058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3151058dc334SIlpo Järvinen 		return -1;
3152058dc334SIlpo Järvinen 
3153fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
315490840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
31551da177e4SLinus Torvalds 		int err;
31560c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
315790840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
31581da177e4SLinus Torvalds 
31591da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
31601da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
31611da177e4SLinus Torvalds 
31621da177e4SLinus Torvalds 		/* We are probing the opening of a window
31631da177e4SLinus Torvalds 		 * but the window size is != 0
31641da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
31651da177e4SLinus Torvalds 		 */
31661da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
31671da177e4SLinus Torvalds 		    skb->len > mss) {
31681da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
31694de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3170846998aeSDavid S. Miller 			if (tcp_fragment(sk, skb, seg_size, mss))
31711da177e4SLinus Torvalds 				return -1;
31721da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
3173846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
31741da177e4SLinus Torvalds 
31754de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
31761da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
3177dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
317866f5fe62SIlpo Järvinen 		if (!err)
317966f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
31801da177e4SLinus Torvalds 		return err;
31811da177e4SLinus Torvalds 	} else {
318233f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
31834828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
31841da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
31851da177e4SLinus Torvalds 	}
31861da177e4SLinus Torvalds }
31871da177e4SLinus Torvalds 
31881da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
31891da177e4SLinus Torvalds  * a partial packet else a zero probe.
31901da177e4SLinus Torvalds  */
31911da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
31921da177e4SLinus Torvalds {
3193463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
31941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31951da177e4SLinus Torvalds 	int err;
31961da177e4SLinus Torvalds 
31971da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
31981da177e4SLinus Torvalds 
3199fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
32001da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
32016687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3202463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
32031da177e4SLinus Torvalds 		return;
32041da177e4SLinus Torvalds 	}
32051da177e4SLinus Torvalds 
32061da177e4SLinus Torvalds 	if (err <= 0) {
3207463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3208463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
32096687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3210463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
32113f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
32123f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
32131da177e4SLinus Torvalds 	} else {
32141da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
32156687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
32161da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
32171da177e4SLinus Torvalds 		 *
32181da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
32191da177e4SLinus Torvalds 		 */
32206687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
32216687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3222463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3223463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
32243f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
32253f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
32261da177e4SLinus Torvalds 	}
32271da177e4SLinus Torvalds }
3228