xref: /linux/net/ipv4/tcp_output.c (revision eb8895debe1baba41fcb62c78a16f0c63c21662a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */
5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
645d424d5aSJohn Heffner 
6535089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6735089bb2SDavid S. Miller 
6846d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6946d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
70519855c5SWilliam Allen Simpson 
7167edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
72cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
736ff03ac3SIlpo Järvinen {
746ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
756ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7666f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
779e412ba7SIlpo Järvinen 
78fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
791da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
808512430eSIlpo Järvinen 
8166f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
826ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
836a5dc9e5SEric Dumazet 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
84750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
851da177e4SLinus Torvalds 	}
866a5dc9e5SEric Dumazet }
871da177e4SLinus Torvalds 
881da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
891da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
901da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
911da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
921da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
931da177e4SLinus Torvalds  */
94cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
951da177e4SLinus Torvalds {
96cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
979e412ba7SIlpo Järvinen 
9890840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
991da177e4SLinus Torvalds 		return tp->snd_nxt;
1001da177e4SLinus Torvalds 	else
10190840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1021da177e4SLinus Torvalds }
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1051da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1061da177e4SLinus Torvalds  *
1071da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1081da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1091da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1101da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1111da177e4SLinus Torvalds  *    large MSS.
1121da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1131da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1141da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1151da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1161da177e4SLinus Torvalds  *    probably even Jumbo".
1171da177e4SLinus Torvalds  */
1181da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1191da177e4SLinus Torvalds {
1201da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
121cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1221da177e4SLinus Torvalds 	int mss = tp->advmss;
1231da177e4SLinus Torvalds 
1240dbaee3bSDavid S. Miller 	if (dst) {
1250dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1260dbaee3bSDavid S. Miller 
1270dbaee3bSDavid S. Miller 		if (metric < mss) {
1280dbaee3bSDavid S. Miller 			mss = metric;
1291da177e4SLinus Torvalds 			tp->advmss = mss;
1301da177e4SLinus Torvalds 		}
1310dbaee3bSDavid S. Miller 	}
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds 	return (__u16)mss;
1341da177e4SLinus Torvalds }
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1371da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
138cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1391da177e4SLinus Torvalds {
140463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1411da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1421da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1431da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1441da177e4SLinus Torvalds 
1456687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1461da177e4SLinus Torvalds 
1476687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1481da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1491da177e4SLinus Torvalds 
150463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1511da177e4SLinus Torvalds 		cwnd >>= 1;
1521da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1531da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1541da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds 
15767edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15840efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
159cf533ea5SEric Dumazet 				struct sock *sk)
1601da177e4SLinus Torvalds {
161463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
162463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
163bcefe17cSCong Wang 	const struct dst_entry *dst = __sk_dst_get(sk);
1641da177e4SLinus Torvalds 
16535089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
16635089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
167463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1681da177e4SLinus Torvalds 
1691da177e4SLinus Torvalds 	tp->lsndtime = now;
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1721da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1731da177e4SLinus Torvalds 	 */
174bcefe17cSCong Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
175bcefe17cSCong Wang 	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
176463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
17967edfef7SAndi Kleen /* Account for an ACK we sent. */
18040efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1811da177e4SLinus Torvalds {
182463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
183463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1841da177e4SLinus Torvalds }
1851da177e4SLinus Torvalds 
18685f16525SYuchung Cheng 
18785f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
18885f16525SYuchung Cheng {
18985f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1909ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19185f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19285f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19385f16525SYuchung Cheng 	 */
19485f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
19585f16525SYuchung Cheng 
19685f16525SYuchung Cheng 	if (mss > 1460)
19785f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
19885f16525SYuchung Cheng 	return init_rwnd;
19985f16525SYuchung Cheng }
20085f16525SYuchung Cheng 
2011da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2021da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2031da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2041da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2051da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2061da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2071da177e4SLinus Torvalds  */
2081da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2091da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21031d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21131d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2121da177e4SLinus Torvalds {
2131da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2161da177e4SLinus Torvalds 	if (*window_clamp == 0)
2171da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2181da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2211da177e4SLinus Torvalds 	if (space > mss)
2221da177e4SLinus Torvalds 		space = (space / mss) * mss;
2231da177e4SLinus Torvalds 
2241da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22515d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22615d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22715d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22815d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22915d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23015d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2311da177e4SLinus Torvalds 	 */
23215d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2331da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23415d99e02SRick Jones 	else
23515d99e02SRick Jones 		(*rcv_wnd) = space;
23615d99e02SRick Jones 
2371da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2381da177e4SLinus Torvalds 	if (wscale_ok) {
2391da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2401da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2411da177e4SLinus Torvalds 		 */
2421da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
243316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2441da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2451da177e4SLinus Torvalds 			space >>= 1;
2461da177e4SLinus Torvalds 			(*rcv_wscale)++;
2471da177e4SLinus Torvalds 		}
2481da177e4SLinus Torvalds 	}
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25185f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25285f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
253b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2541da177e4SLinus Torvalds 	}
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2571da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2581da177e4SLinus Torvalds }
2594bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2621da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2631da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2641da177e4SLinus Torvalds  * frame.
2651da177e4SLinus Torvalds  */
26640efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2671da177e4SLinus Torvalds {
2681da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2691da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2701da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds 	/* Never shrink the offered window */
2731da177e4SLinus Torvalds 	if (new_win < cur_win) {
2741da177e4SLinus Torvalds 		/* Danger Will Robinson!
2751da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2761da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2771da177e4SLinus Torvalds 		 * window in time.  --DaveM
2781da177e4SLinus Torvalds 		 *
2791da177e4SLinus Torvalds 		 * Relax Will Robinson.
2801da177e4SLinus Torvalds 		 */
281607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2821da177e4SLinus Torvalds 	}
2831da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2841da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2851da177e4SLinus Torvalds 
2861da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2871da177e4SLinus Torvalds 	 * scaled window.
2881da177e4SLinus Torvalds 	 */
28915d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2901da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2911da177e4SLinus Torvalds 	else
2921da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2951da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2981da177e4SLinus Torvalds 	if (new_win == 0)
2991da177e4SLinus Torvalds 		tp->pred_flags = 0;
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds 	return new_win;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
30467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
305cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
306bdf1ee5dSIlpo Järvinen {
3074de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
308bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3094de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
310bdf1ee5dSIlpo Järvinen }
311bdf1ee5dSIlpo Järvinen 
31267edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
313bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
314bdf1ee5dSIlpo Järvinen {
315bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
316bdf1ee5dSIlpo Järvinen 
317bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
3185d134f1cSHannes Frederic Sowa 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
3194de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
320bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
321bdf1ee5dSIlpo Järvinen 	}
322bdf1ee5dSIlpo Järvinen }
323bdf1ee5dSIlpo Järvinen 
324bdf1ee5dSIlpo Järvinen static __inline__ void
325cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
326bdf1ee5dSIlpo Järvinen {
327bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
328bdf1ee5dSIlpo Järvinen 		th->ece = 1;
329bdf1ee5dSIlpo Järvinen }
330bdf1ee5dSIlpo Järvinen 
33167edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
33267edfef7SAndi Kleen  * be sent.
33367edfef7SAndi Kleen  */
334bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
335bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
336bdf1ee5dSIlpo Järvinen {
337bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
338bdf1ee5dSIlpo Järvinen 
339bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
340bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
341bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
342bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
343bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
344bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
345bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
346bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
347bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
348bdf1ee5dSIlpo Järvinen 			}
349bdf1ee5dSIlpo Järvinen 		} else {
350bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
351bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
352bdf1ee5dSIlpo Järvinen 		}
353bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
354bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
355bdf1ee5dSIlpo Järvinen 	}
356bdf1ee5dSIlpo Järvinen }
357bdf1ee5dSIlpo Järvinen 
358e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
359e870a8efSIlpo Järvinen  * auto increment end seqno.
360e870a8efSIlpo Järvinen  */
361e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
362e870a8efSIlpo Järvinen {
3632e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
364e870a8efSIlpo Järvinen 	skb->csum = 0;
365e870a8efSIlpo Järvinen 
3664de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
367e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
368e870a8efSIlpo Järvinen 
369e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_segs = 1;
370e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_size = 0;
371e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_type = 0;
372e870a8efSIlpo Järvinen 
373e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
374a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
375e870a8efSIlpo Järvinen 		seq++;
376e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
377e870a8efSIlpo Järvinen }
378e870a8efSIlpo Järvinen 
379a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
38033f5f57eSIlpo Järvinen {
38133f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
38233f5f57eSIlpo Järvinen }
38333f5f57eSIlpo Järvinen 
38433ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
38533ad798cSAdam Langley #define OPTION_TS		(1 << 1)
38633ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
38789e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
3882100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
38933ad798cSAdam Langley 
39033ad798cSAdam Langley struct tcp_out_options {
3912100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
3922100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
39333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
39433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
395bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
396bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
3972100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
3982100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
39933ad798cSAdam Langley };
40033ad798cSAdam Langley 
40167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
40267edfef7SAndi Kleen  *
40367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
404fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
405fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
406fd6149d3SIlpo Järvinen  * inter-operatibility perspective it seems that we're somewhat stuck with
407fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
408fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
409fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
410fd6149d3SIlpo Järvinen  *
411fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
412fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
413fd6149d3SIlpo Järvinen  */
41433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
415bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
416bd0388aeSWilliam Allen Simpson {
4172100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
418bd0388aeSWilliam Allen Simpson 
419bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4201a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4211a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
422bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
423bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
42433ad798cSAdam Langley 		ptr += 4;
42533ad798cSAdam Langley 	}
42633ad798cSAdam Langley 
427fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
428fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
429fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
430fd6149d3SIlpo Järvinen 			       opts->mss);
431fd6149d3SIlpo Järvinen 	}
432fd6149d3SIlpo Järvinen 
433bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
434bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
43533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
43633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
43733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
43833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
439bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
44033ad798cSAdam Langley 		} else {
441496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
44240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
44340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
44440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
44540efc6faSStephen Hemminger 		}
44633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
44733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
44833ad798cSAdam Langley 	}
44933ad798cSAdam Langley 
450bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
45133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
45233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
45333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
45433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
45533ad798cSAdam Langley 	}
45633ad798cSAdam Langley 
457bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
45833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
45933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
46033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
46133ad798cSAdam Langley 			       opts->ws);
46233ad798cSAdam Langley 	}
46333ad798cSAdam Langley 
46433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
46533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
46633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
46740efc6faSStephen Hemminger 		int this_sack;
46840efc6faSStephen Hemminger 
46940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
47040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
47140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
47233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
47340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
4742de979bdSStephen Hemminger 
47533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
47633ad798cSAdam Langley 		     ++this_sack) {
47740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
47840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
47940efc6faSStephen Hemminger 		}
4802de979bdSStephen Hemminger 
48140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
48240efc6faSStephen Hemminger 	}
4832100c8d2SYuchung Cheng 
4842100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
4852100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
4862100c8d2SYuchung Cheng 
4872100c8d2SYuchung Cheng 		*ptr++ = htonl((TCPOPT_EXP << 24) |
4882100c8d2SYuchung Cheng 			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
4892100c8d2SYuchung Cheng 			       TCPOPT_FASTOPEN_MAGIC);
4902100c8d2SYuchung Cheng 
4912100c8d2SYuchung Cheng 		memcpy(ptr, foc->val, foc->len);
4922100c8d2SYuchung Cheng 		if ((foc->len & 3) == 2) {
4932100c8d2SYuchung Cheng 			u8 *align = ((u8 *)ptr) + foc->len;
4942100c8d2SYuchung Cheng 			align[0] = align[1] = TCPOPT_NOP;
4952100c8d2SYuchung Cheng 		}
4962100c8d2SYuchung Cheng 		ptr += (foc->len + 3) >> 2;
4972100c8d2SYuchung Cheng 	}
49840efc6faSStephen Hemminger }
49940efc6faSStephen Hemminger 
50067edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
50167edfef7SAndi Kleen  * network wire format yet.
50267edfef7SAndi Kleen  */
50395c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
50433ad798cSAdam Langley 				struct tcp_out_options *opts,
505cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
506cf533ea5SEric Dumazet {
50733ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
50895c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
509783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
51033ad798cSAdam Langley 
511cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
51233ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
51333ad798cSAdam Langley 	if (*md5) {
51433ad798cSAdam Langley 		opts->options |= OPTION_MD5;
515bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
516cfb6eeb4SYOSHIFUJI Hideaki 	}
51733ad798cSAdam Langley #else
51833ad798cSAdam Langley 	*md5 = NULL;
519cfb6eeb4SYOSHIFUJI Hideaki #endif
52033ad798cSAdam Langley 
52133ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
52233ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
52333ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
52433ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
52533ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
52633ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
52733ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
52833ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
52933ad798cSAdam Langley 	 * going out.  */
53033ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
531bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
53233ad798cSAdam Langley 
533bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
53433ad798cSAdam Langley 		opts->options |= OPTION_TS;
535ee684b6fSAndrey Vagin 		opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
53633ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
537bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
53833ad798cSAdam Langley 	}
539bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
54033ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
54189e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
542bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
54333ad798cSAdam Langley 	}
544bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
54533ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
546b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
547bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
54833ad798cSAdam Langley 	}
54933ad798cSAdam Langley 
550783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
551783237e8SYuchung Cheng 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
552783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
553783237e8SYuchung Cheng 		if (remaining >= need) {
554783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
555783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
556783237e8SYuchung Cheng 			remaining -= need;
557783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
558783237e8SYuchung Cheng 		}
559783237e8SYuchung Cheng 	}
560bd0388aeSWilliam Allen Simpson 
561bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
56233ad798cSAdam Langley }
56333ad798cSAdam Langley 
56467edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
56595c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk,
56633ad798cSAdam Langley 				   struct request_sock *req,
56795c96174SEric Dumazet 				   unsigned int mss, struct sk_buff *skb,
56833ad798cSAdam Langley 				   struct tcp_out_options *opts,
5694957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
5708336886fSJerry Chu 				   struct tcp_fastopen_cookie *foc)
5714957faadSWilliam Allen Simpson {
57233ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
57395c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
57433ad798cSAdam Langley 
57533ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
57633ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
57733ad798cSAdam Langley 	if (*md5) {
57833ad798cSAdam Langley 		opts->options |= OPTION_MD5;
5794957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
5804957faadSWilliam Allen Simpson 
5814957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
5824957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
5834957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
5844957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
5854957faadSWilliam Allen Simpson 		 */
586de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
58733ad798cSAdam Langley 	}
58833ad798cSAdam Langley #else
58933ad798cSAdam Langley 	*md5 = NULL;
59033ad798cSAdam Langley #endif
59133ad798cSAdam Langley 
5924957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
59333ad798cSAdam Langley 	opts->mss = mss;
5944957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
59533ad798cSAdam Langley 
59633ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
59733ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
59889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
5994957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
60033ad798cSAdam Langley 	}
601de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
60233ad798cSAdam Langley 		opts->options |= OPTION_TS;
60333ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
60433ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6054957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
60633ad798cSAdam Langley 	}
60733ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
60833ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
609de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6104957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
61133ad798cSAdam Langley 	}
6128336886fSJerry Chu 	if (foc != NULL) {
6138336886fSJerry Chu 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
6148336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6158336886fSJerry Chu 		if (remaining >= need) {
6168336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6178336886fSJerry Chu 			opts->fastopen_cookie = foc;
6188336886fSJerry Chu 			remaining -= need;
6198336886fSJerry Chu 		}
6208336886fSJerry Chu 	}
6214957faadSWilliam Allen Simpson 
6224957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
62333ad798cSAdam Langley }
62433ad798cSAdam Langley 
62567edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
62667edfef7SAndi Kleen  * final wire format yet.
62767edfef7SAndi Kleen  */
62895c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
62933ad798cSAdam Langley 					struct tcp_out_options *opts,
630cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
631cf533ea5SEric Dumazet {
63233ad798cSAdam Langley 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
63333ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
63495c96174SEric Dumazet 	unsigned int size = 0;
635cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
63633ad798cSAdam Langley 
63733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
63833ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
63933ad798cSAdam Langley 	if (unlikely(*md5)) {
64033ad798cSAdam Langley 		opts->options |= OPTION_MD5;
64133ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
64233ad798cSAdam Langley 	}
64333ad798cSAdam Langley #else
64433ad798cSAdam Langley 	*md5 = NULL;
64533ad798cSAdam Langley #endif
64633ad798cSAdam Langley 
64733ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
64833ad798cSAdam Langley 		opts->options |= OPTION_TS;
649ee684b6fSAndrey Vagin 		opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
65033ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
65133ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
65233ad798cSAdam Langley 	}
65333ad798cSAdam Langley 
654cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
655cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
65695c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
65733ad798cSAdam Langley 		opts->num_sack_blocks =
65895c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
65933ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
66033ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
66133ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
66233ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
66333ad798cSAdam Langley 	}
66433ad798cSAdam Langley 
66533ad798cSAdam Langley 	return size;
66640efc6faSStephen Hemminger }
6671da177e4SLinus Torvalds 
66846d3ceabSEric Dumazet 
66946d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
67046d3ceabSEric Dumazet  *
67146d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
67246d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
67346d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
67446d3ceabSEric Dumazet  *
67546d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
67646d3ceabSEric Dumazet  * needs to be reallocated in a driver.
67746d3ceabSEric Dumazet  * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
67846d3ceabSEric Dumazet  *
67946d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
68046d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
68146d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
68246d3ceabSEric Dumazet  */
68346d3ceabSEric Dumazet struct tsq_tasklet {
68446d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
68546d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
68646d3ceabSEric Dumazet };
68746d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
68846d3ceabSEric Dumazet 
6896f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
6906f458dfbSEric Dumazet {
6916f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
6926f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
6936f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
6946f458dfbSEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
6956f458dfbSEric Dumazet }
69646d3ceabSEric Dumazet /*
69746d3ceabSEric Dumazet  * One tasklest per cpu tries to send more skbs.
69846d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
69946d3ceabSEric Dumazet  * transfering tsq->head because tcp_wfree() might
70046d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
70146d3ceabSEric Dumazet  */
70246d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
70346d3ceabSEric Dumazet {
70446d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
70546d3ceabSEric Dumazet 	LIST_HEAD(list);
70646d3ceabSEric Dumazet 	unsigned long flags;
70746d3ceabSEric Dumazet 	struct list_head *q, *n;
70846d3ceabSEric Dumazet 	struct tcp_sock *tp;
70946d3ceabSEric Dumazet 	struct sock *sk;
71046d3ceabSEric Dumazet 
71146d3ceabSEric Dumazet 	local_irq_save(flags);
71246d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
71346d3ceabSEric Dumazet 	local_irq_restore(flags);
71446d3ceabSEric Dumazet 
71546d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
71646d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
71746d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
71846d3ceabSEric Dumazet 
71946d3ceabSEric Dumazet 		sk = (struct sock *)tp;
72046d3ceabSEric Dumazet 		bh_lock_sock(sk);
72146d3ceabSEric Dumazet 
72246d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
7236f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
72446d3ceabSEric Dumazet 		} else {
72546d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
7266f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
72746d3ceabSEric Dumazet 		}
72846d3ceabSEric Dumazet 		bh_unlock_sock(sk);
72946d3ceabSEric Dumazet 
73046d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
73146d3ceabSEric Dumazet 		sk_free(sk);
73246d3ceabSEric Dumazet 	}
73346d3ceabSEric Dumazet }
73446d3ceabSEric Dumazet 
7356f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
7366f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
737563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
738563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
73946d3ceabSEric Dumazet /**
74046d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
74146d3ceabSEric Dumazet  * @sk: socket
74246d3ceabSEric Dumazet  *
74346d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
74446d3ceabSEric Dumazet  * actions before socket release.
74546d3ceabSEric Dumazet  */
74646d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
74746d3ceabSEric Dumazet {
74846d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
7496f458dfbSEric Dumazet 	unsigned long flags, nflags;
75046d3ceabSEric Dumazet 
7516f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
7526f458dfbSEric Dumazet 	do {
7536f458dfbSEric Dumazet 		flags = tp->tsq_flags;
7546f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
7556f458dfbSEric Dumazet 			return;
7566f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
7576f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
7586f458dfbSEric Dumazet 
7596f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
7606f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
7616f458dfbSEric Dumazet 
762144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
7636f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
764144d56e9SEric Dumazet 		__sock_put(sk);
765144d56e9SEric Dumazet 	}
766144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
7676f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
768144d56e9SEric Dumazet 		__sock_put(sk);
769144d56e9SEric Dumazet 	}
770144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
771563d34d0SEric Dumazet 		sk->sk_prot->mtu_reduced(sk);
772144d56e9SEric Dumazet 		__sock_put(sk);
773144d56e9SEric Dumazet 	}
77446d3ceabSEric Dumazet }
77546d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
77646d3ceabSEric Dumazet 
77746d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
77846d3ceabSEric Dumazet {
77946d3ceabSEric Dumazet 	int i;
78046d3ceabSEric Dumazet 
78146d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
78246d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
78346d3ceabSEric Dumazet 
78446d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
78546d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
78646d3ceabSEric Dumazet 			     tcp_tasklet_func,
78746d3ceabSEric Dumazet 			     (unsigned long)tsq);
78846d3ceabSEric Dumazet 	}
78946d3ceabSEric Dumazet }
79046d3ceabSEric Dumazet 
79146d3ceabSEric Dumazet /*
79246d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
79346d3ceabSEric Dumazet  * We cant xmit new skbs from this context, as we might already
79446d3ceabSEric Dumazet  * hold qdisc lock.
79546d3ceabSEric Dumazet  */
796d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
79746d3ceabSEric Dumazet {
79846d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
79946d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
80046d3ceabSEric Dumazet 
80146d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
80246d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
80346d3ceabSEric Dumazet 		unsigned long flags;
80446d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
80546d3ceabSEric Dumazet 
80646d3ceabSEric Dumazet 		/* Keep a ref on socket.
80746d3ceabSEric Dumazet 		 * This last ref will be released in tcp_tasklet_func()
80846d3ceabSEric Dumazet 		 */
80946d3ceabSEric Dumazet 		atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
81046d3ceabSEric Dumazet 
81146d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
81246d3ceabSEric Dumazet 		local_irq_save(flags);
81346d3ceabSEric Dumazet 		tsq = &__get_cpu_var(tsq_tasklet);
81446d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
81546d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
81646d3ceabSEric Dumazet 		local_irq_restore(flags);
81746d3ceabSEric Dumazet 	} else {
81846d3ceabSEric Dumazet 		sock_wfree(skb);
81946d3ceabSEric Dumazet 	}
82046d3ceabSEric Dumazet }
82146d3ceabSEric Dumazet 
8221da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
8231da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
8241da177e4SLinus Torvalds  * transmission and possible later retransmissions.
8251da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
8261da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
8271da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
8281da177e4SLinus Torvalds  * device.
8291da177e4SLinus Torvalds  *
8301da177e4SLinus Torvalds  * We are working here with either a clone of the original
8311da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
8321da177e4SLinus Torvalds  */
833056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
834056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
8351da177e4SLinus Torvalds {
8366687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
837dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
838dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
839dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
84033ad798cSAdam Langley 	struct tcp_out_options opts;
84195c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
842cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
8431da177e4SLinus Torvalds 	struct tcphdr *th;
8441da177e4SLinus Torvalds 	int err;
8451da177e4SLinus Torvalds 
846dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
847dfb4b9dcSDavid S. Miller 
848dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
849dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
850dfb4b9dcSDavid S. Miller 	 */
851164891aaSStephen Hemminger 	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
852dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
853dfb4b9dcSDavid S. Miller 
854dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
8550e280af0SEric Dumazet 		const struct sk_buff *fclone = skb + 1;
8560e280af0SEric Dumazet 
8570e280af0SEric Dumazet 		if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
8580e280af0SEric Dumazet 			     fclone->fclone == SKB_FCLONE_CLONE))
8590e280af0SEric Dumazet 			NET_INC_STATS_BH(sock_net(sk),
8600e280af0SEric Dumazet 					 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
8610e280af0SEric Dumazet 
862dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
863dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
864dfb4b9dcSDavid S. Miller 		else
865dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
866dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
867dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
868dfb4b9dcSDavid S. Miller 	}
869dfb4b9dcSDavid S. Miller 
870dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
871dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
872dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
87333ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
8741da177e4SLinus Torvalds 
8754de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
87633ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
87733ad798cSAdam Langley 	else
87833ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
87933ad798cSAdam Langley 							   &md5);
88033ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
8811da177e4SLinus Torvalds 
882547669d4SEric Dumazet 	if (tcp_packets_in_flight(tp) == 0)
8836687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
884547669d4SEric Dumazet 
885547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
886547669d4SEric Dumazet 	 * another queue.
887547669d4SEric Dumazet 	 */
888547669d4SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
8891da177e4SLinus Torvalds 
890aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
891aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
89246d3ceabSEric Dumazet 
89346d3ceabSEric Dumazet 	skb_orphan(skb);
89446d3ceabSEric Dumazet 	skb->sk = sk;
89546d3ceabSEric Dumazet 	skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
89646d3ceabSEric Dumazet 			  tcp_wfree : sock_wfree;
89746d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
900aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
901c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
902c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
9031da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
9041da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
905df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
9064de075e0SEric Dumazet 					tcb->tcp_flags);
907dfb4b9dcSDavid S. Miller 
9084de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
9091da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
9101da177e4SLinus Torvalds 		 * is never scaled.
9111da177e4SLinus Torvalds 		 */
912600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
9131da177e4SLinus Torvalds 	} else {
9141da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
9151da177e4SLinus Torvalds 	}
9161da177e4SLinus Torvalds 	th->check		= 0;
9171da177e4SLinus Torvalds 	th->urg_ptr		= 0;
9181da177e4SLinus Torvalds 
91933f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
9207691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
9217691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
9221da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
9231da177e4SLinus Torvalds 			th->urg = 1;
9247691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
9250eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
9267691367dSHerbert Xu 			th->urg = 1;
9277691367dSHerbert Xu 		}
9281da177e4SLinus Torvalds 	}
9291da177e4SLinus Torvalds 
930bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
9314de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
9329e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
933dfb4b9dcSDavid S. Miller 
934cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
935cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
936cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
937a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
938bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
93949a72dfbSAdam Langley 					       md5, sk, NULL, skb);
940cfb6eeb4SYOSHIFUJI Hideaki 	}
941cfb6eeb4SYOSHIFUJI Hideaki #endif
942cfb6eeb4SYOSHIFUJI Hideaki 
943bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
9441da177e4SLinus Torvalds 
9454de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
946fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
949cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
9501da177e4SLinus Torvalds 
951bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
952aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
953aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
9541da177e4SLinus Torvalds 
955d9d8da80SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
95683de47cdSHua Zhong 	if (likely(err <= 0))
9571da177e4SLinus Torvalds 		return err;
9581da177e4SLinus Torvalds 
9593cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
9601da177e4SLinus Torvalds 
961b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
9621da177e4SLinus Torvalds }
9631da177e4SLinus Torvalds 
96467edfef7SAndi Kleen /* This routine just queues the buffer for sending.
9651da177e4SLinus Torvalds  *
9661da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
9671da177e4SLinus Torvalds  * otherwise socket can stall.
9681da177e4SLinus Torvalds  */
9691da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
9701da177e4SLinus Torvalds {
9711da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
9741da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
9751da177e4SLinus Torvalds 	skb_header_release(skb);
976fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
9773ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
9783ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
9791da177e4SLinus Torvalds }
9801da177e4SLinus Torvalds 
98167edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
982cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
983056834d9SIlpo Järvinen 				 unsigned int mss_now)
984f6302d1dSDavid S. Miller {
9858e5b9ddaSHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk) ||
9868e5b9ddaSHerbert Xu 	    skb->ip_summed == CHECKSUM_NONE) {
987f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
988f6302d1dSDavid S. Miller 		 * non-TSO case.
989f6302d1dSDavid S. Miller 		 */
9907967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
9917967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
992c9af6db4SPravin B Shelar 		skb_shinfo(skb)->gso_type = 0;
993f6302d1dSDavid S. Miller 	} else {
994356f89e1SIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
9957967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
996c9af6db4SPravin B Shelar 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
9971da177e4SLinus Torvalds 	}
9981da177e4SLinus Torvalds }
9991da177e4SLinus Torvalds 
100091fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
100168f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
100291fed7a1SIlpo Järvinen  */
1003cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
100491fed7a1SIlpo Järvinen 				   int decr)
100591fed7a1SIlpo Järvinen {
1006a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1007a47e5a98SIlpo Järvinen 
1008dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
100991fed7a1SIlpo Järvinen 		return;
101091fed7a1SIlpo Järvinen 
10116859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
101291fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
101391fed7a1SIlpo Järvinen }
101491fed7a1SIlpo Järvinen 
1015797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1016797108d1SIlpo Järvinen  * tweaks to fix counters
1017797108d1SIlpo Järvinen  */
1018cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1019797108d1SIlpo Järvinen {
1020797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1021797108d1SIlpo Järvinen 
1022797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1023797108d1SIlpo Järvinen 
1024797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1025797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1026797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1027797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1028797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1029797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1030797108d1SIlpo Järvinen 
1031797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1032797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1033797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1034797108d1SIlpo Järvinen 
1035797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1036797108d1SIlpo Järvinen 
1037797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1038797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
103952cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1040797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1041797108d1SIlpo Järvinen 
1042797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1043797108d1SIlpo Järvinen }
1044797108d1SIlpo Järvinen 
10451da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
10461da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
10471da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
10481da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
10491da177e4SLinus Torvalds  */
1050056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1051056834d9SIlpo Järvinen 		 unsigned int mss_now)
10521da177e4SLinus Torvalds {
10531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10541da177e4SLinus Torvalds 	struct sk_buff *buff;
10556475be16SDavid S. Miller 	int nsize, old_factor;
1056b60b49eaSHerbert Xu 	int nlen;
10579ce01461SIlpo Järvinen 	u8 flags;
10581da177e4SLinus Torvalds 
10592fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
10602fceec13SIlpo Järvinen 		return -EINVAL;
10616a438bbeSStephen Hemminger 
10621da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
10631da177e4SLinus Torvalds 	if (nsize < 0)
10641da177e4SLinus Torvalds 		nsize = 0;
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
10671da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
10681da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
10691da177e4SLinus Torvalds 		return -ENOMEM;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
10721da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
10731da177e4SLinus Torvalds 	if (buff == NULL)
10741da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1075ef5cb973SHerbert Xu 
10763ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
10773ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1078b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1079b60b49eaSHerbert Xu 	buff->truesize += nlen;
1080b60b49eaSHerbert Xu 	skb->truesize -= nlen;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
10831da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
10841da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
10851da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
10861da177e4SLinus Torvalds 
10871da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
10884de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
10894de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
10904de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1091e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
10921da177e4SLinus Torvalds 
109384fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
10941da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1095056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1096056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
10971da177e4SLinus Torvalds 						       nsize, 0);
10981da177e4SLinus Torvalds 
10991da177e4SLinus Torvalds 		skb_trim(skb, len);
11001da177e4SLinus Torvalds 
11011da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
11021da177e4SLinus Torvalds 	} else {
110384fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
11041da177e4SLinus Torvalds 		skb_split(skb, buff, len);
11051da177e4SLinus Torvalds 	}
11061da177e4SLinus Torvalds 
11071da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
11081da177e4SLinus Torvalds 
11091da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
11101da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
11111da177e4SLinus Torvalds 	 */
11121da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1113a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
11141da177e4SLinus Torvalds 
11156475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
11166475be16SDavid S. Miller 
11171da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1118846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1119846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
11201da177e4SLinus Torvalds 
11216475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
11226475be16SDavid S. Miller 	 * adjust the various packet counters.
11236475be16SDavid S. Miller 	 */
1124cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
11256475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
11266475be16SDavid S. Miller 			tcp_skb_pcount(buff);
11271da177e4SLinus Torvalds 
1128797108d1SIlpo Järvinen 		if (diff)
1129797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
11301da177e4SLinus Torvalds 	}
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1133f44b5271SDavid S. Miller 	skb_header_release(buff);
1134fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds 	return 0;
11371da177e4SLinus Torvalds }
11381da177e4SLinus Torvalds 
11391da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
11401da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
11411da177e4SLinus Torvalds  * immediately discarded.
11421da177e4SLinus Torvalds  */
1143f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
11441da177e4SLinus Torvalds {
11451da177e4SLinus Torvalds 	int i, k, eat;
11461da177e4SLinus Torvalds 
11474fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
11484fa48bf3SEric Dumazet 	if (eat) {
11494fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
11504fa48bf3SEric Dumazet 		len -= eat;
11514fa48bf3SEric Dumazet 		if (!len)
11524fa48bf3SEric Dumazet 			return;
11534fa48bf3SEric Dumazet 	}
11541da177e4SLinus Torvalds 	eat = len;
11551da177e4SLinus Torvalds 	k = 0;
11561da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11579e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
11589e903e08SEric Dumazet 
11599e903e08SEric Dumazet 		if (size <= eat) {
1160aff65da0SIan Campbell 			skb_frag_unref(skb, i);
11619e903e08SEric Dumazet 			eat -= size;
11621da177e4SLinus Torvalds 		} else {
11631da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
11641da177e4SLinus Torvalds 			if (eat) {
11651da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
11669e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
11671da177e4SLinus Torvalds 				eat = 0;
11681da177e4SLinus Torvalds 			}
11691da177e4SLinus Torvalds 			k++;
11701da177e4SLinus Torvalds 		}
11711da177e4SLinus Torvalds 	}
11721da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
11731da177e4SLinus Torvalds 
117427a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
11751da177e4SLinus Torvalds 	skb->data_len -= len;
11761da177e4SLinus Torvalds 	skb->len = skb->data_len;
11771da177e4SLinus Torvalds }
11781da177e4SLinus Torvalds 
117967edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
11801da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
11811da177e4SLinus Torvalds {
118214bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
11831da177e4SLinus Torvalds 		return -ENOMEM;
11841da177e4SLinus Torvalds 
11854fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
11861da177e4SLinus Torvalds 
11871da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
118884fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
11891da177e4SLinus Torvalds 
11901da177e4SLinus Torvalds 	skb->truesize	     -= len;
11911da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
11923ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
11931da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
11941da177e4SLinus Torvalds 
11955b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
11961da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
11975b35e1e6SNeal Cardwell 		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
11981da177e4SLinus Torvalds 
11991da177e4SLinus Torvalds 	return 0;
12001da177e4SLinus Torvalds }
12011da177e4SLinus Torvalds 
12021b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
12031b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
12045d424d5aSJohn Heffner {
1205cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1206cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12075d424d5aSJohn Heffner 	int mss_now;
12085d424d5aSJohn Heffner 
12095d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
12105d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
12115d424d5aSJohn Heffner 	 */
12125d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
12135d424d5aSJohn Heffner 
121467469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
121567469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
121667469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
121767469601SEric Dumazet 
121867469601SEric Dumazet 		if (dst && dst_allfrag(dst))
121967469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
122067469601SEric Dumazet 	}
122167469601SEric Dumazet 
12225d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
12235d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
12245d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
12255d424d5aSJohn Heffner 
12265d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
12275d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
12285d424d5aSJohn Heffner 
12295d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
12305d424d5aSJohn Heffner 	if (mss_now < 48)
12315d424d5aSJohn Heffner 		mss_now = 48;
12325d424d5aSJohn Heffner 	return mss_now;
12335d424d5aSJohn Heffner }
12345d424d5aSJohn Heffner 
12351b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
12361b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
12371b63edd6SYuchung Cheng {
12381b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
12391b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
12401b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
12411b63edd6SYuchung Cheng }
12421b63edd6SYuchung Cheng 
12435d424d5aSJohn Heffner /* Inverse of above */
124467469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
12455d424d5aSJohn Heffner {
1246cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1247cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12485d424d5aSJohn Heffner 	int mtu;
12495d424d5aSJohn Heffner 
12505d424d5aSJohn Heffner 	mtu = mss +
12515d424d5aSJohn Heffner 	      tp->tcp_header_len +
12525d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
12535d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
12545d424d5aSJohn Heffner 
125567469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
125667469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
125767469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
125867469601SEric Dumazet 
125967469601SEric Dumazet 		if (dst && dst_allfrag(dst))
126067469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
126167469601SEric Dumazet 	}
12625d424d5aSJohn Heffner 	return mtu;
12635d424d5aSJohn Heffner }
12645d424d5aSJohn Heffner 
126567edfef7SAndi Kleen /* MTU probing init per socket */
12665d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
12675d424d5aSJohn Heffner {
12685d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
12695d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
12705d424d5aSJohn Heffner 
12715d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
12725d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
12735d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
12745d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
12755d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
12765d424d5aSJohn Heffner }
12774bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
12785d424d5aSJohn Heffner 
12791da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
12821da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
12831da177e4SLinus Torvalds 
12841da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1285caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
12861da177e4SLinus Torvalds    It also does not include TCP options.
12871da177e4SLinus Torvalds 
1288d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
12891da177e4SLinus Torvalds 
12901da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
12911da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
12921da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
12931da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
12961da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
12971da177e4SLinus Torvalds 
1298d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1299d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
13001da177e4SLinus Torvalds  */
13011da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
13021da177e4SLinus Torvalds {
13031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1304d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
13055d424d5aSJohn Heffner 	int mss_now;
13061da177e4SLinus Torvalds 
13075d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
13085d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
13091da177e4SLinus Torvalds 
13105d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1311409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds 	/* And store cached results */
1314d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
13155d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
13165d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1317c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	return mss_now;
13201da177e4SLinus Torvalds }
13214bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
13221da177e4SLinus Torvalds 
13231da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
13241da177e4SLinus Torvalds  * and even PMTU discovery events into account.
13251da177e4SLinus Torvalds  */
13260c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
13271da177e4SLinus Torvalds {
1328cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1329cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1330c1b4a7e6SDavid S. Miller 	u32 mss_now;
133195c96174SEric Dumazet 	unsigned int header_len;
133233ad798cSAdam Langley 	struct tcp_out_options opts;
133333ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
13341da177e4SLinus Torvalds 
1335c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1336c1b4a7e6SDavid S. Miller 
13371da177e4SLinus Torvalds 	if (dst) {
13381da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1339d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
13401da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
13411da177e4SLinus Torvalds 	}
13421da177e4SLinus Torvalds 
134333ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
134433ad798cSAdam Langley 		     sizeof(struct tcphdr);
134533ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
134633ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
134733ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
134833ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
134933ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
135033ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
135133ad798cSAdam Langley 		mss_now -= delta;
135233ad798cSAdam Langley 	}
1353cfb6eeb4SYOSHIFUJI Hideaki 
13541da177e4SLinus Torvalds 	return mss_now;
13551da177e4SLinus Torvalds }
13561da177e4SLinus Torvalds 
1357a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
13589e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1359a762a980SDavid S. Miller {
13609e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1361a762a980SDavid S. Miller 
1362d436d686SIlpo Järvinen 	if (tp->packets_out >= tp->snd_cwnd) {
1363a762a980SDavid S. Miller 		/* Network is feed fully. */
1364a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1365a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1366a762a980SDavid S. Miller 	} else {
1367a762a980SDavid S. Miller 		/* Network starves. */
1368a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1369a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1370a762a980SDavid S. Miller 
137115d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
137215d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1373a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1374a762a980SDavid S. Miller 	}
1375a762a980SDavid S. Miller }
1376a762a980SDavid S. Miller 
13770e3a4803SIlpo Järvinen /* Returns the portion of skb which can be sent right away without
13780e3a4803SIlpo Järvinen  * introducing MSS oddities to segment boundaries. In rare cases where
13790e3a4803SIlpo Järvinen  * mss_now != mss_cache, we will request caller to create a small skb
13800e3a4803SIlpo Järvinen  * per input skb which could be mostly avoided here (if desired).
13815ea3a748SIlpo Järvinen  *
13825ea3a748SIlpo Järvinen  * We explicitly want to create a request for splitting write queue tail
13835ea3a748SIlpo Järvinen  * to a small skb for Nagle purposes while avoiding unnecessary modulos,
13845ea3a748SIlpo Järvinen  * thus all the complexity (cwnd_len is always MSS multiple which we
13855ea3a748SIlpo Järvinen  * return whenever allowed by the other factors). Basically we need the
13865ea3a748SIlpo Järvinen  * modulo only when the receiver window alone is the limiting factor or
13875ea3a748SIlpo Järvinen  * when we would be allowed to send the split-due-to-Nagle skb fully.
13880e3a4803SIlpo Järvinen  */
1389cf533ea5SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
13901485348dSBen Hutchings 					unsigned int mss_now, unsigned int max_segs)
1391c1b4a7e6SDavid S. Miller {
1392cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
13931485348dSBen Hutchings 	u32 needed, window, max_len;
1394c1b4a7e6SDavid S. Miller 
139590840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
13961485348dSBen Hutchings 	max_len = mss_now * max_segs;
13970e3a4803SIlpo Järvinen 
13981485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
13991485348dSBen Hutchings 		return max_len;
14000e3a4803SIlpo Järvinen 
14015ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
14025ea3a748SIlpo Järvinen 
14031485348dSBen Hutchings 	if (max_len <= needed)
14041485348dSBen Hutchings 		return max_len;
14050e3a4803SIlpo Järvinen 
14060e3a4803SIlpo Järvinen 	return needed - needed % mss_now;
1407c1b4a7e6SDavid S. Miller }
1408c1b4a7e6SDavid S. Miller 
1409c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1410c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1411c1b4a7e6SDavid S. Miller  */
1412cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1413cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1414c1b4a7e6SDavid S. Miller {
1415c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1416c1b4a7e6SDavid S. Miller 
1417c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
14184de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
14194de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1420c1b4a7e6SDavid S. Miller 		return 1;
1421c1b4a7e6SDavid S. Miller 
1422c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1423c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1424c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1425c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1426c1b4a7e6SDavid S. Miller 
1427c1b4a7e6SDavid S. Miller 	return 0;
1428c1b4a7e6SDavid S. Miller }
1429c1b4a7e6SDavid S. Miller 
1430b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
143167edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1432c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1433c1b4a7e6SDavid S. Miller  */
1434cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1435056834d9SIlpo Järvinen 			     unsigned int mss_now)
1436c1b4a7e6SDavid S. Miller {
1437c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1438c1b4a7e6SDavid S. Miller 
1439f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1440846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1441c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1442c1b4a7e6SDavid S. Miller 	}
1443c1b4a7e6SDavid S. Miller 	return tso_segs;
1444c1b4a7e6SDavid S. Miller }
1445c1b4a7e6SDavid S. Miller 
144667edfef7SAndi Kleen /* Minshall's variant of the Nagle send check. */
1447a2a385d6SEric Dumazet static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1448c1b4a7e6SDavid S. Miller {
1449c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml, tp->snd_una) &&
1450c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
1451c1b4a7e6SDavid S. Miller }
1452c1b4a7e6SDavid S. Miller 
1453a2a385d6SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1454c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1455c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
14566d67e9beSFeng King  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1457c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1458c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1459c1b4a7e6SDavid S. Miller  */
1460a2a385d6SEric Dumazet static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1461c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
146295c96174SEric Dumazet 				  unsigned int mss_now, int nonagle)
1463c1b4a7e6SDavid S. Miller {
1464a02cec21SEric Dumazet 	return skb->len < mss_now &&
1465c1b4a7e6SDavid S. Miller 		((nonagle & TCP_NAGLE_CORK) ||
1466a02cec21SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1467c1b4a7e6SDavid S. Miller }
1468c1b4a7e6SDavid S. Miller 
1469a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1470c1b4a7e6SDavid S. Miller  * sent now.
1471c1b4a7e6SDavid S. Miller  */
1472a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1473c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1474c1b4a7e6SDavid S. Miller {
1475c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1476c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1477c1b4a7e6SDavid S. Miller 	 *
1478c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1479c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1480c1b4a7e6SDavid S. Miller 	 */
1481c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1482a2a385d6SEric Dumazet 		return true;
1483c1b4a7e6SDavid S. Miller 
14849b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
14859b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1486a2a385d6SEric Dumazet 		return true;
1487c1b4a7e6SDavid S. Miller 
1488c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1489a2a385d6SEric Dumazet 		return true;
1490c1b4a7e6SDavid S. Miller 
1491a2a385d6SEric Dumazet 	return false;
1492c1b4a7e6SDavid S. Miller }
1493c1b4a7e6SDavid S. Miller 
1494c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1495a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1496a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1497056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1498c1b4a7e6SDavid S. Miller {
1499c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1500c1b4a7e6SDavid S. Miller 
1501c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1502c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1503c1b4a7e6SDavid S. Miller 
150490840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1505c1b4a7e6SDavid S. Miller }
1506c1b4a7e6SDavid S. Miller 
1507fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1508c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1509c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1510c1b4a7e6SDavid S. Miller  */
1511cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1512c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1513c1b4a7e6SDavid S. Miller {
1514cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1515c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1516c1b4a7e6SDavid S. Miller 
1517846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1518c1b4a7e6SDavid S. Miller 
1519c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1520c1b4a7e6SDavid S. Miller 		return 0;
1521c1b4a7e6SDavid S. Miller 
1522c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1523056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1524c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1525c1b4a7e6SDavid S. Miller 
1526c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1527c1b4a7e6SDavid S. Miller }
1528c1b4a7e6SDavid S. Miller 
152967edfef7SAndi Kleen /* Test if sending is allowed right now. */
1530a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1531c1b4a7e6SDavid S. Miller {
1532cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1533fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1534c1b4a7e6SDavid S. Miller 
1535a02cec21SEric Dumazet 	return skb &&
15360c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1537c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1538a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1539c1b4a7e6SDavid S. Miller }
1540c1b4a7e6SDavid S. Miller 
1541c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1542c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1543c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1544c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1545c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1546c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1547c1b4a7e6SDavid S. Miller  */
1548056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1549c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1550c1b4a7e6SDavid S. Miller {
1551c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1552c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
15539ce01461SIlpo Järvinen 	u8 flags;
1554c1b4a7e6SDavid S. Miller 
1555c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1556c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1557c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1558c1b4a7e6SDavid S. Miller 
1559c4ead4c5SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp);
1560c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1561c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1562c1b4a7e6SDavid S. Miller 
15633ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
15643ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1565b60b49eaSHerbert Xu 	buff->truesize += nlen;
1566c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1567c1b4a7e6SDavid S. Miller 
1568c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1569c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1570c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1571c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1572c1b4a7e6SDavid S. Miller 
1573c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
15744de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
15754de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
15764de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1577c1b4a7e6SDavid S. Miller 
1578c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1579c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1580c1b4a7e6SDavid S. Miller 
158184fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1582c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1583c1b4a7e6SDavid S. Miller 
1584c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1585846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1586846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1587c1b4a7e6SDavid S. Miller 
1588c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1589c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1590fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1591c1b4a7e6SDavid S. Miller 
1592c1b4a7e6SDavid S. Miller 	return 0;
1593c1b4a7e6SDavid S. Miller }
1594c1b4a7e6SDavid S. Miller 
1595c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1596c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1597c1b4a7e6SDavid S. Miller  *
1598c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1599c1b4a7e6SDavid S. Miller  */
1600a2a385d6SEric Dumazet static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1601c1b4a7e6SDavid S. Miller {
16029e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
16036687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1604c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1605ad9f4f50SEric Dumazet 	int win_divisor;
1606c1b4a7e6SDavid S. Miller 
16074de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1608ae8064acSJohn Heffner 		goto send_now;
1609c1b4a7e6SDavid S. Miller 
16106687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1611ae8064acSJohn Heffner 		goto send_now;
1612ae8064acSJohn Heffner 
1613ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1614bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1615a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1616ae8064acSJohn Heffner 		goto send_now;
1617908a75c1SDavid S. Miller 
1618c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1619c1b4a7e6SDavid S. Miller 
1620056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1621c1b4a7e6SDavid S. Miller 
162290840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1623c1b4a7e6SDavid S. Miller 
1624c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1625c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1626c1b4a7e6SDavid S. Miller 
1627c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1628c1b4a7e6SDavid S. Miller 
1629ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
16301485348dSBen Hutchings 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
16311485348dSBen Hutchings 			   sk->sk_gso_max_segs * tp->mss_cache))
1632ae8064acSJohn Heffner 		goto send_now;
1633ba244fe9SDavid S. Miller 
163462ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
163562ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
163662ad2761SIlpo Järvinen 		goto send_now;
163762ad2761SIlpo Järvinen 
1638ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1639ad9f4f50SEric Dumazet 	if (win_divisor) {
1640c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1641c1b4a7e6SDavid S. Miller 
1642c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1643c1b4a7e6SDavid S. Miller 		 * just use it.
1644c1b4a7e6SDavid S. Miller 		 */
1645ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1646c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1647ae8064acSJohn Heffner 			goto send_now;
1648c1b4a7e6SDavid S. Miller 	} else {
1649c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1650c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1651c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1652c1b4a7e6SDavid S. Miller 		 * then send now.
1653c1b4a7e6SDavid S. Miller 		 */
16546b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1655ae8064acSJohn Heffner 			goto send_now;
1656c1b4a7e6SDavid S. Miller 	}
1657c1b4a7e6SDavid S. Miller 
1658f4541d60SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1659f4541d60SEric Dumazet 	 * Do not rearm the timer if already set to not break TCP ACK clocking.
1660f4541d60SEric Dumazet 	 */
1661f4541d60SEric Dumazet 	if (!tp->tso_deferred)
1662ae8064acSJohn Heffner 		tp->tso_deferred = 1 | (jiffies << 1);
1663ae8064acSJohn Heffner 
1664a2a385d6SEric Dumazet 	return true;
1665ae8064acSJohn Heffner 
1666ae8064acSJohn Heffner send_now:
1667ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1668a2a385d6SEric Dumazet 	return false;
1669c1b4a7e6SDavid S. Miller }
1670c1b4a7e6SDavid S. Miller 
16715d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
167267edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
167367edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
167467edfef7SAndi Kleen  * changes resulting in larger path MTUs.
167567edfef7SAndi Kleen  *
16765d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
16775d424d5aSJohn Heffner  *         1 if a probe was sent,
1678056834d9SIlpo Järvinen  *         -1 otherwise
1679056834d9SIlpo Järvinen  */
16805d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
16815d424d5aSJohn Heffner {
16825d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
16835d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
16845d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
16855d424d5aSJohn Heffner 	int len;
16865d424d5aSJohn Heffner 	int probe_size;
168791cc17c0SIlpo Järvinen 	int size_needed;
16885d424d5aSJohn Heffner 	int copy;
16895d424d5aSJohn Heffner 	int mss_now;
16905d424d5aSJohn Heffner 
16915d424d5aSJohn Heffner 	/* Not currently probing/verifying,
16925d424d5aSJohn Heffner 	 * not in recovery,
16935d424d5aSJohn Heffner 	 * have enough cwnd, and
16945d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
16955d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
16965d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
16975d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
16985d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1699cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
17005d424d5aSJohn Heffner 		return -1;
17015d424d5aSJohn Heffner 
17025d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
17030c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
17045d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
170591cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
17065d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
17075d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
17085d424d5aSJohn Heffner 		return -1;
17095d424d5aSJohn Heffner 	}
17105d424d5aSJohn Heffner 
17115d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
17127f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
17135d424d5aSJohn Heffner 		return -1;
17145d424d5aSJohn Heffner 
171591cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
17165d424d5aSJohn Heffner 		return -1;
171790840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
17185d424d5aSJohn Heffner 		return 0;
17195d424d5aSJohn Heffner 
1720d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1721d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1722d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
17235d424d5aSJohn Heffner 			return -1;
17245d424d5aSJohn Heffner 		else
17255d424d5aSJohn Heffner 			return 0;
17265d424d5aSJohn Heffner 	}
17275d424d5aSJohn Heffner 
17285d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
17295d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
17305d424d5aSJohn Heffner 		return -1;
17313ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
17323ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
17335d424d5aSJohn Heffner 
1734fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
17355d424d5aSJohn Heffner 
17365d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
17375d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
17384de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
17395d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
17405d424d5aSJohn Heffner 	nskb->csum = 0;
174184fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
17425d424d5aSJohn Heffner 
174350c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
174450c4817eSIlpo Järvinen 
17455d424d5aSJohn Heffner 	len = 0;
1746234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
17475d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
17485d424d5aSJohn Heffner 		if (nskb->ip_summed)
17495d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
17505d424d5aSJohn Heffner 		else
17515d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1752056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1753056834d9SIlpo Järvinen 							    copy, nskb->csum);
17545d424d5aSJohn Heffner 
17555d424d5aSJohn Heffner 		if (skb->len <= copy) {
17565d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
17575d424d5aSJohn Heffner 			 * Throw it away. */
17584de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1759fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
17603ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
17615d424d5aSJohn Heffner 		} else {
17624de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1763a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
17645d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
17655d424d5aSJohn Heffner 				skb_pull(skb, copy);
176684fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1767056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1768056834d9SIlpo Järvinen 								 skb->len, 0);
17695d424d5aSJohn Heffner 			} else {
17705d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
17715d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
17725d424d5aSJohn Heffner 			}
17735d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
17745d424d5aSJohn Heffner 		}
17755d424d5aSJohn Heffner 
17765d424d5aSJohn Heffner 		len += copy;
1777234b6860SIlpo Järvinen 
1778234b6860SIlpo Järvinen 		if (len >= probe_size)
1779234b6860SIlpo Järvinen 			break;
17805d424d5aSJohn Heffner 	}
17815d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
17825d424d5aSJohn Heffner 
17835d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
17845d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
17855d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
17865d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
17875d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
17885d424d5aSJohn Heffner 		 * effectively two packets. */
17895d424d5aSJohn Heffner 		tp->snd_cwnd--;
179066f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
17915d424d5aSJohn Heffner 
17925d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
17930e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
17940e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
17955d424d5aSJohn Heffner 
17965d424d5aSJohn Heffner 		return 1;
17975d424d5aSJohn Heffner 	}
17985d424d5aSJohn Heffner 
17995d424d5aSJohn Heffner 	return -1;
18005d424d5aSJohn Heffner }
18015d424d5aSJohn Heffner 
18021da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
18031da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
18041da177e4SLinus Torvalds  * window for us.
18051da177e4SLinus Torvalds  *
1806f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1807f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1808f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1809f8269a49SIlpo Järvinen  *
18106ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
18116ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
18126ba8a3b1SNandita Dukkipati 
1813a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
1814a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
18151da177e4SLinus Torvalds  */
1816a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1817d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
18181da177e4SLinus Torvalds {
18191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
182092df7b51SDavid S. Miller 	struct sk_buff *skb;
1821c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1822c1b4a7e6SDavid S. Miller 	int cwnd_quota;
18235d424d5aSJohn Heffner 	int result;
18241da177e4SLinus Torvalds 
1825c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
18265d424d5aSJohn Heffner 
1827d5dd9175SIlpo Järvinen 	if (!push_one) {
18285d424d5aSJohn Heffner 		/* Do MTU probing. */
1829d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1830d5dd9175SIlpo Järvinen 		if (!result) {
1831a2a385d6SEric Dumazet 			return false;
18325d424d5aSJohn Heffner 		} else if (result > 0) {
18335d424d5aSJohn Heffner 			sent_pkts = 1;
18345d424d5aSJohn Heffner 		}
1835d5dd9175SIlpo Järvinen 	}
18365d424d5aSJohn Heffner 
1837fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1838c8ac3774SHerbert Xu 		unsigned int limit;
1839c8ac3774SHerbert Xu 
184046d3ceabSEric Dumazet 
1841b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1842c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1843c1b4a7e6SDavid S. Miller 
1844ec342325SAndrew Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1845ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
1846ec342325SAndrew Vagin 
1847b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
18486ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
18496ba8a3b1SNandita Dukkipati 			if (push_one == 2)
18506ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
18516ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
18526ba8a3b1SNandita Dukkipati 			else
1853b68e9f85SHerbert Xu 				break;
18546ba8a3b1SNandita Dukkipati 		}
1855b68e9f85SHerbert Xu 
1856b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1857b68e9f85SHerbert Xu 			break;
1858b68e9f85SHerbert Xu 
1859c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1860aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1861aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1862aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1863aa93466bSDavid S. Miller 				break;
1864c1b4a7e6SDavid S. Miller 		} else {
1865d5dd9175SIlpo Järvinen 			if (!push_one && tcp_tso_should_defer(sk, skb))
1866aa93466bSDavid S. Miller 				break;
1867c1b4a7e6SDavid S. Miller 		}
1868aa93466bSDavid S. Miller 
186946d3ceabSEric Dumazet 		/* TSQ : sk_wmem_alloc accounts skb truesize,
187046d3ceabSEric Dumazet 		 * including skb overhead. But thats OK.
187146d3ceabSEric Dumazet 		 */
187246d3ceabSEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
187346d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
187446d3ceabSEric Dumazet 			break;
187546d3ceabSEric Dumazet 		}
1876c8ac3774SHerbert Xu 		limit = mss_now;
1877f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
18780e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
18791485348dSBen Hutchings 						    min_t(unsigned int,
18801485348dSBen Hutchings 							  cwnd_quota,
18811485348dSBen Hutchings 							  sk->sk_gso_max_segs));
1882c8ac3774SHerbert Xu 
1883c8ac3774SHerbert Xu 		if (skb->len > limit &&
1884c4ead4c5SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
18851da177e4SLinus Torvalds 			break;
18861da177e4SLinus Torvalds 
18871da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1888c1b4a7e6SDavid S. Miller 
1889d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
18901da177e4SLinus Torvalds 			break;
18911da177e4SLinus Torvalds 
1892ec342325SAndrew Vagin repair:
18931da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
18941da177e4SLinus Torvalds 		 * This call will increment packets_out.
18951da177e4SLinus Torvalds 		 */
189666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
18971da177e4SLinus Torvalds 
18981da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1899a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
1900d5dd9175SIlpo Järvinen 
1901d5dd9175SIlpo Järvinen 		if (push_one)
1902d5dd9175SIlpo Järvinen 			break;
19031da177e4SLinus Torvalds 	}
19041da177e4SLinus Torvalds 
1905aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
1906684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
1907684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
19086ba8a3b1SNandita Dukkipati 
19096ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
19106ba8a3b1SNandita Dukkipati 		if (push_one != 2)
19116ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
19129e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
1913a2a385d6SEric Dumazet 		return false;
19141da177e4SLinus Torvalds 	}
19156ba8a3b1SNandita Dukkipati 	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
19166ba8a3b1SNandita Dukkipati }
19176ba8a3b1SNandita Dukkipati 
19186ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
19196ba8a3b1SNandita Dukkipati {
19206ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
19216ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
19226ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
19236ba8a3b1SNandita Dukkipati 	u32 rtt = tp->srtt >> 3;
19246ba8a3b1SNandita Dukkipati 
19256ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
19266ba8a3b1SNandita Dukkipati 		return false;
19276ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
19286ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
19296ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
19306ba8a3b1SNandita Dukkipati 		return false;
19316ba8a3b1SNandita Dukkipati 	}
19326ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
19336ba8a3b1SNandita Dukkipati 	 * finishes.
19346ba8a3b1SNandita Dukkipati 	 */
19356ba8a3b1SNandita Dukkipati 	if (sk->sk_state == TCP_SYN_RECV)
19366ba8a3b1SNandita Dukkipati 		return false;
19376ba8a3b1SNandita Dukkipati 
19386ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
19396ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
19406ba8a3b1SNandita Dukkipati 		return false;
19416ba8a3b1SNandita Dukkipati 
19426ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
19436ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
19446ba8a3b1SNandita Dukkipati 	 */
19456ba8a3b1SNandita Dukkipati 	if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
19466ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
19476ba8a3b1SNandita Dukkipati 		return false;
19486ba8a3b1SNandita Dukkipati 
19496ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
19506ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
19516ba8a3b1SNandita Dukkipati 		return false;
19526ba8a3b1SNandita Dukkipati 
19536ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
19546ba8a3b1SNandita Dukkipati 	 * for delayed ack when there's one outstanding packet.
19556ba8a3b1SNandita Dukkipati 	 */
19566ba8a3b1SNandita Dukkipati 	timeout = rtt << 1;
19576ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
19586ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
19596ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
19606ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
19616ba8a3b1SNandita Dukkipati 
19626ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
19636ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
19646ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
19656ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
19666ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
19676ba8a3b1SNandita Dukkipati 		if (delta > 0)
19686ba8a3b1SNandita Dukkipati 			timeout = delta;
19696ba8a3b1SNandita Dukkipati 	}
19706ba8a3b1SNandita Dukkipati 
19716ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
19726ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
19736ba8a3b1SNandita Dukkipati 	return true;
19746ba8a3b1SNandita Dukkipati }
19756ba8a3b1SNandita Dukkipati 
19766ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else
19776ba8a3b1SNandita Dukkipati  * retransmit the last segment.
19786ba8a3b1SNandita Dukkipati  */
19796ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
19806ba8a3b1SNandita Dukkipati {
19819b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
19826ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
19836ba8a3b1SNandita Dukkipati 	int pcount;
19846ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
19856ba8a3b1SNandita Dukkipati 	int err = -1;
19866ba8a3b1SNandita Dukkipati 
19876ba8a3b1SNandita Dukkipati 	if (tcp_send_head(sk) != NULL) {
19886ba8a3b1SNandita Dukkipati 		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
19896ba8a3b1SNandita Dukkipati 		goto rearm_timer;
19906ba8a3b1SNandita Dukkipati 	}
19916ba8a3b1SNandita Dukkipati 
19929b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
19939b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
19949b717a8dSNandita Dukkipati 		goto rearm_timer;
19959b717a8dSNandita Dukkipati 
19966ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
19976ba8a3b1SNandita Dukkipati 	skb = tcp_write_queue_tail(sk);
19986ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
19996ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20006ba8a3b1SNandita Dukkipati 
20016ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
20026ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
20036ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20046ba8a3b1SNandita Dukkipati 
20056ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
20066ba8a3b1SNandita Dukkipati 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
20076ba8a3b1SNandita Dukkipati 			goto rearm_timer;
20086ba8a3b1SNandita Dukkipati 		skb = tcp_write_queue_tail(sk);
20096ba8a3b1SNandita Dukkipati 	}
20106ba8a3b1SNandita Dukkipati 
20116ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
20126ba8a3b1SNandita Dukkipati 		goto rearm_timer;
20136ba8a3b1SNandita Dukkipati 
20146ba8a3b1SNandita Dukkipati 	/* Probe with zero data doesn't trigger fast recovery. */
20156ba8a3b1SNandita Dukkipati 	if (skb->len > 0)
20166ba8a3b1SNandita Dukkipati 		err = __tcp_retransmit_skb(sk, skb);
20176ba8a3b1SNandita Dukkipati 
20189b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
20199b717a8dSNandita Dukkipati 	if (likely(!err))
20209b717a8dSNandita Dukkipati 		tp->tlp_high_seq = tp->snd_nxt;
20219b717a8dSNandita Dukkipati 
20226ba8a3b1SNandita Dukkipati rearm_timer:
20236ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
20246ba8a3b1SNandita Dukkipati 				  inet_csk(sk)->icsk_rto,
20256ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
20266ba8a3b1SNandita Dukkipati 
20276ba8a3b1SNandita Dukkipati 	if (likely(!err))
20286ba8a3b1SNandita Dukkipati 		NET_INC_STATS_BH(sock_net(sk),
20296ba8a3b1SNandita Dukkipati 				 LINUX_MIB_TCPLOSSPROBES);
20306ba8a3b1SNandita Dukkipati 	return;
20311da177e4SLinus Torvalds }
20321da177e4SLinus Torvalds 
2033a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2034a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2035a762a980SDavid S. Miller  * The socket must be locked by the caller.
2036a762a980SDavid S. Miller  */
20379e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
20389e412ba7SIlpo Järvinen 			       int nonagle)
2039a762a980SDavid S. Miller {
2040726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2041726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2042726e07a8SIlpo Järvinen 	 * all will be happy.
2043726e07a8SIlpo Järvinen 	 */
2044726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2045726e07a8SIlpo Järvinen 		return;
2046726e07a8SIlpo Järvinen 
204799a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
204899a1dec7SMel Gorman 			   sk_gfp_atomic(sk, GFP_ATOMIC)))
20499e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2050a762a980SDavid S. Miller }
2051a762a980SDavid S. Miller 
2052c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2053c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2054c1b4a7e6SDavid S. Miller  */
2055c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2056c1b4a7e6SDavid S. Miller {
2057fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2058c1b4a7e6SDavid S. Miller 
2059c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2060c1b4a7e6SDavid S. Miller 
2061d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2062c1b4a7e6SDavid S. Miller }
2063c1b4a7e6SDavid S. Miller 
20641da177e4SLinus Torvalds /* This function returns the amount that we can raise the
20651da177e4SLinus Torvalds  * usable window based on the following constraints
20661da177e4SLinus Torvalds  *
20671da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
20681da177e4SLinus Torvalds  * 2. We limit memory per socket
20691da177e4SLinus Torvalds  *
20701da177e4SLinus Torvalds  * RFC 1122:
20711da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
20721da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
20731da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
20741da177e4SLinus Torvalds  *
20751da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
20761da177e4SLinus Torvalds  * it at least MSS bytes.
20771da177e4SLinus Torvalds  *
20781da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
20791da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
20801da177e4SLinus Torvalds  *
20811da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
20821da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
20831da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
20841da177e4SLinus Torvalds  * window to always advance by a single byte.
20851da177e4SLinus Torvalds  *
20861da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
20871da177e4SLinus Torvalds  * then this will not be a problem.
20881da177e4SLinus Torvalds  *
20891da177e4SLinus Torvalds  * BSD seems to make the following compromise:
20901da177e4SLinus Torvalds  *
20911da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
20921da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
20931da177e4SLinus Torvalds  *	then set the window to 0.
20941da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
20951da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
20961da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
20971da177e4SLinus Torvalds  *
20981da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
20991da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
21001da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
21011da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
21021da177e4SLinus Torvalds  * because the pipeline is full.
21031da177e4SLinus Torvalds  *
21041da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
21051da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
21061da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
21071da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
21081da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
21091da177e4SLinus Torvalds  *
21101da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
21111da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
21121da177e4SLinus Torvalds  *
21131da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
21141da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
21151da177e4SLinus Torvalds  */
21161da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
21171da177e4SLinus Torvalds {
2118463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
21191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2120caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
21211da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
21221da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
21231da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
21241da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
21251da177e4SLinus Torvalds 	 */
2126463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
21271da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
21281da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
21291da177e4SLinus Torvalds 	int window;
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds 	if (mss > full_space)
21321da177e4SLinus Torvalds 		mss = full_space;
21331da177e4SLinus Torvalds 
2134b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2135463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
21361da177e4SLinus Torvalds 
2137180d8cd9SGlauber Costa 		if (sk_under_memory_pressure(sk))
2138056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2139056834d9SIlpo Järvinen 					       4U * tp->advmss);
21401da177e4SLinus Torvalds 
21411da177e4SLinus Torvalds 		if (free_space < mss)
21421da177e4SLinus Torvalds 			return 0;
21431da177e4SLinus Torvalds 	}
21441da177e4SLinus Torvalds 
21451da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
21461da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
21491da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
21501da177e4SLinus Torvalds 	 */
21511da177e4SLinus Torvalds 	window = tp->rcv_wnd;
21521da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
21531da177e4SLinus Torvalds 		window = free_space;
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
21561da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
21571da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
21581da177e4SLinus Torvalds 		 */
21591da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
21601da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
21611da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
21621da177e4SLinus Torvalds 	} else {
21631da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
21641da177e4SLinus Torvalds 		 * Window clamp already applied above.
21651da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
21661da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
21671da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
21681da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
21691da177e4SLinus Torvalds 		 * is too small.
21701da177e4SLinus Torvalds 		 */
21711da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
21721da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
217384565070SJohn Heffner 		else if (mss == full_space &&
2174b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
217584565070SJohn Heffner 			window = free_space;
21761da177e4SLinus Torvalds 	}
21771da177e4SLinus Torvalds 
21781da177e4SLinus Torvalds 	return window;
21791da177e4SLinus Torvalds }
21801da177e4SLinus Torvalds 
21814a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
21824a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
21831da177e4SLinus Torvalds {
21841da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2185fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2186058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
21871da177e4SLinus Torvalds 
2188058dc334SIlpo Järvinen 	skb_size = skb->len;
2189058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
21901da177e4SLinus Torvalds 
2191058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
21921da177e4SLinus Torvalds 
21936859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2194a6963a6bSIlpo Järvinen 
2195fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
21961da177e4SLinus Torvalds 
2197058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
21981a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
21991da177e4SLinus Torvalds 
220052d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
220152d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
22021da177e4SLinus Torvalds 
220384fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
22041da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
22071da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
22081da177e4SLinus Torvalds 
2209e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
22104de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
22111da177e4SLinus Torvalds 
22121da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
22131da177e4SLinus Torvalds 	 * packet counting does not break.
22141da177e4SLinus Torvalds 	 */
22154828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2216b7689205SIlpo Järvinen 
2217b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2218ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2219ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2220ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2221b7689205SIlpo Järvinen 
2222797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2223797108d1SIlpo Järvinen 
22243ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
22251da177e4SLinus Torvalds }
22261da177e4SLinus Torvalds 
222767edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2228a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
22294a17fc3aSIlpo Järvinen {
22304a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2231a2a385d6SEric Dumazet 		return false;
22324a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
22334a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2234a2a385d6SEric Dumazet 		return false;
22354a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2236a2a385d6SEric Dumazet 		return false;
22374a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2238a2a385d6SEric Dumazet 		return false;
22394a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
22404a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2241a2a385d6SEric Dumazet 		return false;
22424a17fc3aSIlpo Järvinen 
2243a2a385d6SEric Dumazet 	return true;
22444a17fc3aSIlpo Järvinen }
22454a17fc3aSIlpo Järvinen 
224667edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
224767edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
224867edfef7SAndi Kleen  */
22494a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
22504a17fc3aSIlpo Järvinen 				     int space)
22514a17fc3aSIlpo Järvinen {
22524a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
22534a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2254a2a385d6SEric Dumazet 	bool first = true;
22554a17fc3aSIlpo Järvinen 
22564a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
22574a17fc3aSIlpo Järvinen 		return;
22584de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
22594a17fc3aSIlpo Järvinen 		return;
22604a17fc3aSIlpo Järvinen 
22614a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
22624a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
22634a17fc3aSIlpo Järvinen 			break;
22644a17fc3aSIlpo Järvinen 
22654a17fc3aSIlpo Järvinen 		space -= skb->len;
22664a17fc3aSIlpo Järvinen 
22674a17fc3aSIlpo Järvinen 		if (first) {
2268a2a385d6SEric Dumazet 			first = false;
22694a17fc3aSIlpo Järvinen 			continue;
22704a17fc3aSIlpo Järvinen 		}
22714a17fc3aSIlpo Järvinen 
22724a17fc3aSIlpo Järvinen 		if (space < 0)
22734a17fc3aSIlpo Järvinen 			break;
22744a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
22754a17fc3aSIlpo Järvinen 		 * the data in the second
22764a17fc3aSIlpo Järvinen 		 */
2277a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
22784a17fc3aSIlpo Järvinen 			break;
22794a17fc3aSIlpo Järvinen 
22804a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
22814a17fc3aSIlpo Järvinen 			break;
22824a17fc3aSIlpo Järvinen 
22834a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
22844a17fc3aSIlpo Järvinen 	}
22854a17fc3aSIlpo Järvinen }
22864a17fc3aSIlpo Järvinen 
22871da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
22881da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
22891da177e4SLinus Torvalds  * error occurred which prevented the send.
22901da177e4SLinus Torvalds  */
229193b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
22921da177e4SLinus Torvalds {
22931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22945d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
22957d227cd2SSridhar Samudrala 	unsigned int cur_mss;
22961da177e4SLinus Torvalds 
22975d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
22985d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
22995d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
23005d424d5aSJohn Heffner 	}
23015d424d5aSJohn Heffner 
23021da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2303caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
23041da177e4SLinus Torvalds 	 */
23051da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
23061da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
23071da177e4SLinus Torvalds 		return -EAGAIN;
23081da177e4SLinus Torvalds 
23091da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
23101da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
23111da177e4SLinus Torvalds 			BUG();
23121da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
23131da177e4SLinus Torvalds 			return -ENOMEM;
23141da177e4SLinus Torvalds 	}
23151da177e4SLinus Torvalds 
23167d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
23177d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
23187d227cd2SSridhar Samudrala 
23190c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
23207d227cd2SSridhar Samudrala 
23211da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
23221da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
23231da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
23241da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
23251da177e4SLinus Torvalds 	 */
23269d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
23279d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
23281da177e4SLinus Torvalds 		return -EAGAIN;
23291da177e4SLinus Torvalds 
23301da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
2331846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
23321da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
233302276f3cSIlpo Järvinen 	} else {
23349eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
23359eb9362eSIlpo Järvinen 
23369eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
233702276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
23389eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
23399eb9362eSIlpo Järvinen 		}
23401da177e4SLinus Torvalds 	}
23411da177e4SLinus Torvalds 
23421da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
23431da177e4SLinus Torvalds 
23441da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
23451da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
23461da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
23471da177e4SLinus Torvalds 	 */
23481da177e4SLinus Torvalds 	if (skb->len > 0 &&
23494de075e0SEric Dumazet 	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
23501da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
23511da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
2352e870a8efSIlpo Järvinen 			/* Reuse, even though it does some unnecessary work */
2353e870a8efSIlpo Järvinen 			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
23544de075e0SEric Dumazet 					     TCP_SKB_CB(skb)->tcp_flags);
23551da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
23561da177e4SLinus Torvalds 		}
23571da177e4SLinus Torvalds 	}
23581da177e4SLinus Torvalds 
23591da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
23601da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
23611da177e4SLinus Torvalds 	 */
23621da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
23631da177e4SLinus Torvalds 
236450bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
236550bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
236650bceae9SThomas Graf 	 * beyond what csum_start can cover.
236750bceae9SThomas Graf 	 */
236850bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
236950bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
2370117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2371117632e6SEric Dumazet 						   GFP_ATOMIC);
237293b174adSYuchung Cheng 		return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2373117632e6SEric Dumazet 			      -ENOBUFS;
2374117632e6SEric Dumazet 	} else {
237593b174adSYuchung Cheng 		return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2376117632e6SEric Dumazet 	}
237793b174adSYuchung Cheng }
237893b174adSYuchung Cheng 
237993b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
238093b174adSYuchung Cheng {
238193b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
238293b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
23831da177e4SLinus Torvalds 
23841da177e4SLinus Torvalds 	if (err == 0) {
23851da177e4SLinus Torvalds 		/* Update global TCP statistics. */
238681cc8a75SPavel Emelyanov 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds 		tp->total_retrans++;
23891da177e4SLinus Torvalds 
23901da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
23911da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2392e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
23931da177e4SLinus Torvalds 		}
23941da177e4SLinus Torvalds #endif
2395b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2396b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
23971da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
23981da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
23991da177e4SLinus Torvalds 
24001da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
24011da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
24021da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
24031da177e4SLinus Torvalds 
2404c24f691bSYuchung Cheng 		tp->undo_retrans += tcp_skb_pcount(skb);
24051da177e4SLinus Torvalds 
24061da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
24071da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
24081da177e4SLinus Torvalds 		 */
24091da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
241024ab6becSYuchung Cheng 	} else {
241124ab6becSYuchung Cheng 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
24121da177e4SLinus Torvalds 	}
24131da177e4SLinus Torvalds 	return err;
24141da177e4SLinus Torvalds }
24151da177e4SLinus Torvalds 
241667edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
241767edfef7SAndi Kleen  * window/congestion state.
241867edfef7SAndi Kleen  */
2419a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2420b5afe7bcSIlpo Järvinen {
2421b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2422cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2423b5afe7bcSIlpo Järvinen 
2424b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2425b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2426a2a385d6SEric Dumazet 		return false;
2427b5afe7bcSIlpo Järvinen 
2428b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2429b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2430a2a385d6SEric Dumazet 		return false;
2431b5afe7bcSIlpo Järvinen 
2432b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2433b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2434b5afe7bcSIlpo Järvinen 	 *
2435b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2436b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2437b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2438b5afe7bcSIlpo Järvinen 	 */
2439b5afe7bcSIlpo Järvinen 
2440b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2441a2a385d6SEric Dumazet 		return false;
2442b5afe7bcSIlpo Järvinen 
2443a2a385d6SEric Dumazet 	return true;
2444b5afe7bcSIlpo Järvinen }
2445b5afe7bcSIlpo Järvinen 
24461da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
24471da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
24481da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
24491da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
24501da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
24511da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
24521da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
24531da177e4SLinus Torvalds  */
24541da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
24551da177e4SLinus Torvalds {
24566687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
24571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24581da177e4SLinus Torvalds 	struct sk_buff *skb;
24590e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2460618d9f25SIlpo Järvinen 	u32 last_lost;
246161eb55f4SIlpo Järvinen 	int mib_idx;
24620e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
24636a438bbeSStephen Hemminger 
246445e77d31SIlpo Järvinen 	if (!tp->packets_out)
246545e77d31SIlpo Järvinen 		return;
246645e77d31SIlpo Järvinen 
246708ebd172SIlpo Järvinen 	if (!tp->lost_out)
246808ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
246908ebd172SIlpo Järvinen 
2470618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
24716a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2472618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2473618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2474618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2475618d9f25SIlpo Järvinen 	} else {
2476fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2477618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2478618d9f25SIlpo Järvinen 	}
24791da177e4SLinus Torvalds 
2480fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
24811da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
24821da177e4SLinus Torvalds 
2483fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2484fe067e8aSDavid S. Miller 			break;
24856a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
24860e1c54c2SIlpo Järvinen 		if (hole == NULL)
24876a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
24886a438bbeSStephen Hemminger 
24891da177e4SLinus Torvalds 		/* Assume this retransmit will generate
24901da177e4SLinus Torvalds 		 * only one packet for congestion window
24911da177e4SLinus Torvalds 		 * calculation purposes.  This works because
24921da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
24931da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
24941da177e4SLinus Torvalds 		 * packet counting works out.
24951da177e4SLinus Torvalds 		 */
24961da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
24971da177e4SLinus Torvalds 			return;
24980e1c54c2SIlpo Järvinen 
24990e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
25000e1c54c2SIlpo Järvinen begin_fwd:
25010e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2502006f582cSIlpo Järvinen 				break;
25030e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
25040e1c54c2SIlpo Järvinen 
25050e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2506618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
25070e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
25080e1c54c2SIlpo Järvinen 				break;
25090e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
25100e1c54c2SIlpo Järvinen 			if (hole != NULL) {
25110e1c54c2SIlpo Järvinen 				skb = hole;
25120e1c54c2SIlpo Järvinen 				hole = NULL;
25130e1c54c2SIlpo Järvinen 			}
25140e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
25150e1c54c2SIlpo Järvinen 			goto begin_fwd;
25160e1c54c2SIlpo Järvinen 
25170e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2518ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
25190e1c54c2SIlpo Järvinen 				hole = skb;
252061eb55f4SIlpo Järvinen 			continue;
25211da177e4SLinus Torvalds 
25220e1c54c2SIlpo Järvinen 		} else {
2523618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
25240e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
25250e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
25260e1c54c2SIlpo Järvinen 			else
25270e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
25280e1c54c2SIlpo Järvinen 		}
25290e1c54c2SIlpo Järvinen 
25300e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
253161eb55f4SIlpo Järvinen 			continue;
253240b215e5SPavel Emelyanov 
253324ab6becSYuchung Cheng 		if (tcp_retransmit_skb(sk, skb))
25341da177e4SLinus Torvalds 			return;
253524ab6becSYuchung Cheng 
2536de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
25371da177e4SLinus Torvalds 
2538684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2539a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2540a262f0cdSNandita Dukkipati 
2541fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2542463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
25433f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
25443f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
25451da177e4SLinus Torvalds 	}
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25481da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
25491da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
25501da177e4SLinus Torvalds  */
25511da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
25521da177e4SLinus Torvalds {
25531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2554fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
25551da177e4SLinus Torvalds 	int mss_now;
25561da177e4SLinus Torvalds 
25571da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
25581da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
25591da177e4SLinus Torvalds 	 * and IP options.
25601da177e4SLinus Torvalds 	 */
25610c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
25621da177e4SLinus Torvalds 
2563fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
25644de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
25651da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
25661da177e4SLinus Torvalds 		tp->write_seq++;
25671da177e4SLinus Torvalds 	} else {
25681da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
25691da177e4SLinus Torvalds 		for (;;) {
2570aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2571aa133076SWu Fengguang 					       sk->sk_allocation);
25721da177e4SLinus Torvalds 			if (skb)
25731da177e4SLinus Torvalds 				break;
25741da177e4SLinus Torvalds 			yield();
25751da177e4SLinus Torvalds 		}
25761da177e4SLinus Torvalds 
25771da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
25781da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
25791da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2580e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2581a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
25821da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
25831da177e4SLinus Torvalds 	}
25849e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
25851da177e4SLinus Torvalds }
25861da177e4SLinus Torvalds 
25871da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
25881da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
25891da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
259065bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
25911da177e4SLinus Torvalds  */
2592dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
25931da177e4SLinus Torvalds {
25941da177e4SLinus Torvalds 	struct sk_buff *skb;
25951da177e4SLinus Torvalds 
25961da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
25971da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
25981da177e4SLinus Torvalds 	if (!skb) {
25994e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
26001da177e4SLinus Torvalds 		return;
26011da177e4SLinus Torvalds 	}
26021da177e4SLinus Torvalds 
26031da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
26041da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2605e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2606a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
26071da177e4SLinus Torvalds 	/* Send it off. */
26081da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2609dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
26104e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
261126af65cbSSridhar Samudrala 
261281cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
26131da177e4SLinus Torvalds }
26141da177e4SLinus Torvalds 
261567edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
261667edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
26171da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
26181da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
26191da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
26201da177e4SLinus Torvalds  */
26211da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
26221da177e4SLinus Torvalds {
26231da177e4SLinus Torvalds 	struct sk_buff *skb;
26241da177e4SLinus Torvalds 
2625fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
26264de075e0SEric Dumazet 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
262791df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
26281da177e4SLinus Torvalds 		return -EFAULT;
26291da177e4SLinus Torvalds 	}
26304de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
26311da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
26321da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
26331da177e4SLinus Torvalds 			if (nskb == NULL)
26341da177e4SLinus Torvalds 				return -ENOMEM;
2635fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
26361da177e4SLinus Torvalds 			skb_header_release(nskb);
2637fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
26383ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
26393ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
26403ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
26411da177e4SLinus Torvalds 			skb = nskb;
26421da177e4SLinus Torvalds 		}
26431da177e4SLinus Torvalds 
26444de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
26451da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
26461da177e4SLinus Torvalds 	}
26471da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2648dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
26491da177e4SLinus Torvalds }
26501da177e4SLinus Torvalds 
26514aea39c1SEric Dumazet /**
26524aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
26534aea39c1SEric Dumazet  * sk: listener socket
26544aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
26554aea39c1SEric Dumazet  * req: request_sock pointer
26564aea39c1SEric Dumazet  *
26574aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
26584aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
26594aea39c1SEric Dumazet  */
26601da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2661e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
26628336886fSJerry Chu 				struct tcp_fastopen_cookie *foc)
26631da177e4SLinus Torvalds {
2664bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
26652e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
26661da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
26671da177e4SLinus Torvalds 	struct tcphdr *th;
26681da177e4SLinus Torvalds 	struct sk_buff *skb;
2669cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2670bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2671f5fff5dcSTom Quetchenbach 	int mss;
26721da177e4SLinus Torvalds 
2673*eb8895deSPhil Oester 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
26744aea39c1SEric Dumazet 	if (unlikely(!skb)) {
26754aea39c1SEric Dumazet 		dst_release(dst);
26761da177e4SLinus Torvalds 		return NULL;
26774aea39c1SEric Dumazet 	}
26781da177e4SLinus Torvalds 	/* Reserve space for headers. */
26791da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
26801da177e4SLinus Torvalds 
26814aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
2682ca10b9e9SEric Dumazet 	security_skb_owned_by(skb, sk);
26831da177e4SLinus Torvalds 
26840dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
2685f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2686f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2687f5fff5dcSTom Quetchenbach 
268833ad798cSAdam Langley 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
268933ad798cSAdam Langley 		__u8 rcv_wscale;
269033ad798cSAdam Langley 		/* Set this up on the first call only */
269133ad798cSAdam Langley 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2692e88c64f0SHagen Paul Pfeifer 
2693e88c64f0SHagen Paul Pfeifer 		/* limit the window selection if the user enforce a smaller rx buffer */
2694e88c64f0SHagen Paul Pfeifer 		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2695e88c64f0SHagen Paul Pfeifer 		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2696e88c64f0SHagen Paul Pfeifer 			req->window_clamp = tcp_full_space(sk);
2697e88c64f0SHagen Paul Pfeifer 
269833ad798cSAdam Langley 		/* tcp_full_space because it is guaranteed to be the first packet */
269933ad798cSAdam Langley 		tcp_select_initial_window(tcp_full_space(sk),
2700f5fff5dcSTom Quetchenbach 			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
270133ad798cSAdam Langley 			&req->rcv_wnd,
270233ad798cSAdam Langley 			&req->window_clamp,
270333ad798cSAdam Langley 			ireq->wscale_ok,
270431d12926Slaurent chavey 			&rcv_wscale,
270531d12926Slaurent chavey 			dst_metric(dst, RTAX_INITRWND));
270633ad798cSAdam Langley 		ireq->rcv_wscale = rcv_wscale;
270733ad798cSAdam Langley 	}
2708cfb6eeb4SYOSHIFUJI Hideaki 
270933ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
27108b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
27118b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
27128b5f12d0SFlorian Westphal 		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
27138b5f12d0SFlorian Westphal 	else
27148b5f12d0SFlorian Westphal #endif
271533ad798cSAdam Langley 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
27161a2c6181SChristoph Paasch 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
27171a2c6181SChristoph Paasch 					     foc) + sizeof(*th);
271833ad798cSAdam Langley 
2719aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2720aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
27211da177e4SLinus Torvalds 
2722aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
27231da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
27241da177e4SLinus Torvalds 	th->syn = 1;
27251da177e4SLinus Torvalds 	th->ack = 1;
27261da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
2727a3116ac5SKOVACS Krisztian 	th->source = ireq->loc_port;
27282e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
2729e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2730e870a8efSIlpo Järvinen 	 * not even correctly set)
2731e870a8efSIlpo Järvinen 	 */
2732e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2733a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
27344957faadSWilliam Allen Simpson 
27351da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
27368336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
27378336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
27381da177e4SLinus Torvalds 
27391da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2740600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2741bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
27421da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
2743aa2ea058STom Herbert 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2744cfb6eeb4SYOSHIFUJI Hideaki 
2745cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2746cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2747cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2748bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
274949a72dfbSAdam Langley 					       md5, NULL, req, skb);
2750cfb6eeb4SYOSHIFUJI Hideaki 	}
2751cfb6eeb4SYOSHIFUJI Hideaki #endif
2752cfb6eeb4SYOSHIFUJI Hideaki 
27531da177e4SLinus Torvalds 	return skb;
27541da177e4SLinus Torvalds }
27554bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
27561da177e4SLinus Torvalds 
275767edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
2758370816aeSPavel Emelyanov void tcp_connect_init(struct sock *sk)
27591da177e4SLinus Torvalds {
2760cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
27611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27621da177e4SLinus Torvalds 	__u8 rcv_wscale;
27631da177e4SLinus Torvalds 
27641da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
27651da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
27661da177e4SLinus Torvalds 	 */
27671da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2768bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
27691da177e4SLinus Torvalds 
2770cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2771cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2772cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2773cfb6eeb4SYOSHIFUJI Hideaki #endif
2774cfb6eeb4SYOSHIFUJI Hideaki 
27751da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
27761da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
27771da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
27781da177e4SLinus Torvalds 	tp->max_window = 0;
27795d424d5aSJohn Heffner 	tcp_mtup_init(sk);
27801da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
27811da177e4SLinus Torvalds 
27821da177e4SLinus Torvalds 	if (!tp->window_clamp)
27831da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
27840dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
2785f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2786f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2787f5fff5dcSTom Quetchenbach 
27881da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
27891da177e4SLinus Torvalds 
2790e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
2791e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2792e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2793e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
2794e88c64f0SHagen Paul Pfeifer 
27951da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
27961da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
27971da177e4SLinus Torvalds 				  &tp->rcv_wnd,
27981da177e4SLinus Torvalds 				  &tp->window_clamp,
2799bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
280031d12926Slaurent chavey 				  &rcv_wscale,
280131d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
28021da177e4SLinus Torvalds 
28031da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
28041da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
28051da177e4SLinus Torvalds 
28061da177e4SLinus Torvalds 	sk->sk_err = 0;
28071da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
28081da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2809ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
28101da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
28111da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
281233f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
2813370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
2814ee995283SPavel Emelyanov 
2815ee995283SPavel Emelyanov 	if (likely(!tp->repair))
28161da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
2817c7781a6eSAndrew Vagin 	else
2818c7781a6eSAndrew Vagin 		tp->rcv_tstamp = tcp_time_stamp;
2819ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
2820ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
28211da177e4SLinus Torvalds 
2822463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2823463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
28241da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
28251da177e4SLinus Torvalds }
28261da177e4SLinus Torvalds 
2827783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2828783237e8SYuchung Cheng {
2829783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2830783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2831783237e8SYuchung Cheng 
2832783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
2833783237e8SYuchung Cheng 	skb_header_release(skb);
2834783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
2835783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
2836783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
2837783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
2838783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
2839783237e8SYuchung Cheng }
2840783237e8SYuchung Cheng 
2841783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
2842783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
2843783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2844783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
2845783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
2846783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
2847783237e8SYuchung Cheng  */
2848783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2849783237e8SYuchung Cheng {
2850783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2851783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
2852aab48743SYuchung Cheng 	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2853783237e8SYuchung Cheng 	struct sk_buff *syn_data = NULL, *data;
2854aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
2855783237e8SYuchung Cheng 
285667da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
2857aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2858aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
2859aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
2860aab48743SYuchung Cheng 	if (syn_loss > 1 &&
2861aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2862aab48743SYuchung Cheng 		fo->cookie.len = -1;
2863aab48743SYuchung Cheng 		goto fallback;
2864aab48743SYuchung Cheng 	}
2865aab48743SYuchung Cheng 
286667da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
286767da22d2SYuchung Cheng 		fo->cookie.len = -1;
286867da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
2869783237e8SYuchung Cheng 		goto fallback;
2870783237e8SYuchung Cheng 
2871783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2872783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
2873783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
2874783237e8SYuchung Cheng 	 */
2875783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2876783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
28771b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2878783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
2879783237e8SYuchung Cheng 
2880783237e8SYuchung Cheng 	syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
2881783237e8SYuchung Cheng 				   sk->sk_allocation);
2882783237e8SYuchung Cheng 	if (syn_data == NULL)
2883783237e8SYuchung Cheng 		goto fallback;
2884783237e8SYuchung Cheng 
2885783237e8SYuchung Cheng 	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2886783237e8SYuchung Cheng 		struct iovec *iov = &fo->data->msg_iov[i];
2887783237e8SYuchung Cheng 		unsigned char __user *from = iov->iov_base;
2888783237e8SYuchung Cheng 		int len = iov->iov_len;
2889783237e8SYuchung Cheng 
2890783237e8SYuchung Cheng 		if (syn_data->len + len > space)
2891783237e8SYuchung Cheng 			len = space - syn_data->len;
2892783237e8SYuchung Cheng 		else if (i + 1 == iovlen)
2893783237e8SYuchung Cheng 			/* No more data pending in inet_wait_for_connect() */
2894783237e8SYuchung Cheng 			fo->data = NULL;
2895783237e8SYuchung Cheng 
2896783237e8SYuchung Cheng 		if (skb_add_data(syn_data, from, len))
2897783237e8SYuchung Cheng 			goto fallback;
2898783237e8SYuchung Cheng 	}
2899783237e8SYuchung Cheng 
2900783237e8SYuchung Cheng 	/* Queue a data-only packet after the regular SYN for retransmission */
2901783237e8SYuchung Cheng 	data = pskb_copy(syn_data, sk->sk_allocation);
2902783237e8SYuchung Cheng 	if (data == NULL)
2903783237e8SYuchung Cheng 		goto fallback;
2904783237e8SYuchung Cheng 	TCP_SKB_CB(data)->seq++;
2905783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2906783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2907783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, data);
2908783237e8SYuchung Cheng 	fo->copied = data->len;
2909783237e8SYuchung Cheng 
2910783237e8SYuchung Cheng 	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
291167da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
2912783237e8SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2913783237e8SYuchung Cheng 		goto done;
2914783237e8SYuchung Cheng 	}
2915783237e8SYuchung Cheng 	syn_data = NULL;
2916783237e8SYuchung Cheng 
2917783237e8SYuchung Cheng fallback:
2918783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
2919783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
2920783237e8SYuchung Cheng 		fo->cookie.len = 0;
2921783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2922783237e8SYuchung Cheng 	if (err)
2923783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
2924783237e8SYuchung Cheng 	kfree_skb(syn_data);
2925783237e8SYuchung Cheng done:
2926783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
2927783237e8SYuchung Cheng 	return err;
2928783237e8SYuchung Cheng }
2929783237e8SYuchung Cheng 
293067edfef7SAndi Kleen /* Build a SYN and send it off. */
29311da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
29321da177e4SLinus Torvalds {
29331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
29341da177e4SLinus Torvalds 	struct sk_buff *buff;
2935ee586811SEric Paris 	int err;
29361da177e4SLinus Torvalds 
29371da177e4SLinus Torvalds 	tcp_connect_init(sk);
29381da177e4SLinus Torvalds 
29392b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
29402b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
29412b916477SAndrey Vagin 		return 0;
29422b916477SAndrey Vagin 	}
29432b916477SAndrey Vagin 
2944d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
29451da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
29461da177e4SLinus Torvalds 		return -ENOBUFS;
29471da177e4SLinus Torvalds 
29481da177e4SLinus Torvalds 	/* Reserve space for headers. */
29491da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
29501da177e4SLinus Torvalds 
2951a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2952783237e8SYuchung Cheng 	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2953783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
2954e870a8efSIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
29551da177e4SLinus Torvalds 
2956783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
2957783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2958783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2959ee586811SEric Paris 	if (err == -ECONNREFUSED)
2960ee586811SEric Paris 		return err;
2961bd37a088SWei Yongjun 
2962bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2963bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
2964bd37a088SWei Yongjun 	 */
2965bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2966bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
296781cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
29681da177e4SLinus Torvalds 
29691da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
29703f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
29713f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
29721da177e4SLinus Torvalds 	return 0;
29731da177e4SLinus Torvalds }
29744bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
29751da177e4SLinus Torvalds 
29761da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
29771da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
29781da177e4SLinus Torvalds  * for details.
29791da177e4SLinus Torvalds  */
29801da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
29811da177e4SLinus Torvalds {
2982463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2983463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
29841da177e4SLinus Torvalds 	unsigned long timeout;
29851da177e4SLinus Torvalds 
29861da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2987463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
29881da177e4SLinus Torvalds 		int max_ato = HZ / 2;
29891da177e4SLinus Torvalds 
2990056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
2991056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
29921da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
29931da177e4SLinus Torvalds 
29941da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
29951da177e4SLinus Torvalds 
29961da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2997463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
29981da177e4SLinus Torvalds 		 * directly.
29991da177e4SLinus Torvalds 		 */
30001da177e4SLinus Torvalds 		if (tp->srtt) {
30011da177e4SLinus Torvalds 			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
30021da177e4SLinus Torvalds 
30031da177e4SLinus Torvalds 			if (rtt < max_ato)
30041da177e4SLinus Torvalds 				max_ato = rtt;
30051da177e4SLinus Torvalds 		}
30061da177e4SLinus Torvalds 
30071da177e4SLinus Torvalds 		ato = min(ato, max_ato);
30081da177e4SLinus Torvalds 	}
30091da177e4SLinus Torvalds 
30101da177e4SLinus Torvalds 	/* Stay within the limit we were given */
30111da177e4SLinus Torvalds 	timeout = jiffies + ato;
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3014463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
30151da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
30161da177e4SLinus Torvalds 		 * send ACK now.
30171da177e4SLinus Torvalds 		 */
3018463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3019463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
30201da177e4SLinus Torvalds 			tcp_send_ack(sk);
30211da177e4SLinus Torvalds 			return;
30221da177e4SLinus Torvalds 		}
30231da177e4SLinus Torvalds 
3024463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3025463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
30261da177e4SLinus Torvalds 	}
3027463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3028463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3029463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
30301da177e4SLinus Torvalds }
30311da177e4SLinus Torvalds 
30321da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
30331da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
30341da177e4SLinus Torvalds {
30351da177e4SLinus Torvalds 	struct sk_buff *buff;
30361da177e4SLinus Torvalds 
3037058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3038058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3039058dc334SIlpo Järvinen 		return;
3040058dc334SIlpo Järvinen 
30411da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
30421da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
30431da177e4SLinus Torvalds 	 * sock.
30441da177e4SLinus Torvalds 	 */
304599a1dec7SMel Gorman 	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
30461da177e4SLinus Torvalds 	if (buff == NULL) {
3047463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3048463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
30493f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
30503f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
30511da177e4SLinus Torvalds 		return;
30521da177e4SLinus Torvalds 	}
30531da177e4SLinus Torvalds 
30541da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
30551da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3056a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
30571da177e4SLinus Torvalds 
30581da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
30591da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
306099a1dec7SMel Gorman 	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
30611da177e4SLinus Torvalds }
30621da177e4SLinus Torvalds 
30631da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
30641da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
30651da177e4SLinus Torvalds  *
30661da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
30671da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
30681da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
30691da177e4SLinus Torvalds  *
30701da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
30711da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
30721da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
30731da177e4SLinus Torvalds  */
30741da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
30751da177e4SLinus Torvalds {
30761da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30771da177e4SLinus Torvalds 	struct sk_buff *skb;
30781da177e4SLinus Torvalds 
30791da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
308099a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
30811da177e4SLinus Torvalds 	if (skb == NULL)
30821da177e4SLinus Torvalds 		return -1;
30831da177e4SLinus Torvalds 
30841da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
30851da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
30861da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
30871da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
30881da177e4SLinus Torvalds 	 * send it.
30891da177e4SLinus Torvalds 	 */
3090a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
30911da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
3092dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
30931da177e4SLinus Torvalds }
30941da177e4SLinus Torvalds 
3095ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3096ee995283SPavel Emelyanov {
3097ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3098ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3099c0e88ff0SPavel Emelyanov 		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
3100ee995283SPavel Emelyanov 		tcp_xmit_probe_skb(sk, 0);
3101ee995283SPavel Emelyanov 	}
3102ee995283SPavel Emelyanov }
3103ee995283SPavel Emelyanov 
310467edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
31051da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
31061da177e4SLinus Torvalds {
31071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31081da177e4SLinus Torvalds 	struct sk_buff *skb;
31091da177e4SLinus Torvalds 
3110058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3111058dc334SIlpo Järvinen 		return -1;
3112058dc334SIlpo Järvinen 
3113fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
311490840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
31151da177e4SLinus Torvalds 		int err;
31160c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
311790840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
31181da177e4SLinus Torvalds 
31191da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
31201da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
31211da177e4SLinus Torvalds 
31221da177e4SLinus Torvalds 		/* We are probing the opening of a window
31231da177e4SLinus Torvalds 		 * but the window size is != 0
31241da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
31251da177e4SLinus Torvalds 		 */
31261da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
31271da177e4SLinus Torvalds 		    skb->len > mss) {
31281da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
31294de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3130846998aeSDavid S. Miller 			if (tcp_fragment(sk, skb, seg_size, mss))
31311da177e4SLinus Torvalds 				return -1;
31321da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
3133846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
31341da177e4SLinus Torvalds 
31354de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
31361da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
3137dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
313866f5fe62SIlpo Järvinen 		if (!err)
313966f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
31401da177e4SLinus Torvalds 		return err;
31411da177e4SLinus Torvalds 	} else {
314233f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
31434828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
31441da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
31451da177e4SLinus Torvalds 	}
31461da177e4SLinus Torvalds }
31471da177e4SLinus Torvalds 
31481da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
31491da177e4SLinus Torvalds  * a partial packet else a zero probe.
31501da177e4SLinus Torvalds  */
31511da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
31521da177e4SLinus Torvalds {
3153463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
31541da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31551da177e4SLinus Torvalds 	int err;
31561da177e4SLinus Torvalds 
31571da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
31581da177e4SLinus Torvalds 
3159fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
31601da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
31616687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3162463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
31631da177e4SLinus Torvalds 		return;
31641da177e4SLinus Torvalds 	}
31651da177e4SLinus Torvalds 
31661da177e4SLinus Torvalds 	if (err <= 0) {
3167463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3168463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
31696687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3170463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
31713f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
31723f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
31731da177e4SLinus Torvalds 	} else {
31741da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
31756687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
31761da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
31771da177e4SLinus Torvalds 		 *
31781da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
31791da177e4SLinus Torvalds 		 */
31806687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
31816687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3182463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3183463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
31843f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
31853f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
31861da177e4SLinus Torvalds 	}
31871da177e4SLinus Torvalds }
3188