xref: /linux/net/ipv4/tcp_output.c (revision 6ba8a3b19e764b6a65e4030ab0999be50c291e6c)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */
5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
645d424d5aSJohn Heffner 
6535089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6735089bb2SDavid S. Miller 
68519855c5SWilliam Allen Simpson int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
69e6b09ccaSDavid S. Miller EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
70519855c5SWilliam Allen Simpson 
7146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
7246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
73519855c5SWilliam Allen Simpson 
7467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
75cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
766ff03ac3SIlpo Järvinen {
77*6ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
786ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
809e412ba7SIlpo Järvinen 
81fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
821da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
838512430eSIlpo Järvinen 
8425985edcSLucas De Marchi 	/* Don't override Nagle indefinitely with F-RTO */
858512430eSIlpo Järvinen 	if (tp->frto_counter == 2)
868512430eSIlpo Järvinen 		tp->frto_counter = 3;
8766f5fe62SIlpo Järvinen 
8866f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
89*6ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
90*6ba8a3b1SNandita Dukkipati 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
91750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
921da177e4SLinus Torvalds }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
951da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
961da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
971da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
981da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
991da177e4SLinus Torvalds  */
100cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
1011da177e4SLinus Torvalds {
102cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1039e412ba7SIlpo Järvinen 
10490840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1051da177e4SLinus Torvalds 		return tp->snd_nxt;
1061da177e4SLinus Torvalds 	else
10790840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1081da177e4SLinus Torvalds }
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1111da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1121da177e4SLinus Torvalds  *
1131da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1141da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1151da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1161da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1171da177e4SLinus Torvalds  *    large MSS.
1181da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1191da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1201da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1211da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1221da177e4SLinus Torvalds  *    probably even Jumbo".
1231da177e4SLinus Torvalds  */
1241da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1251da177e4SLinus Torvalds {
1261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
127cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1281da177e4SLinus Torvalds 	int mss = tp->advmss;
1291da177e4SLinus Torvalds 
1300dbaee3bSDavid S. Miller 	if (dst) {
1310dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1320dbaee3bSDavid S. Miller 
1330dbaee3bSDavid S. Miller 		if (metric < mss) {
1340dbaee3bSDavid S. Miller 			mss = metric;
1351da177e4SLinus Torvalds 			tp->advmss = mss;
1361da177e4SLinus Torvalds 		}
1370dbaee3bSDavid S. Miller 	}
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds 	return (__u16)mss;
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1431da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
144cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1451da177e4SLinus Torvalds {
146463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1471da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1481da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1491da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1501da177e4SLinus Torvalds 
1516687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1521da177e4SLinus Torvalds 
1536687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1541da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1551da177e4SLinus Torvalds 
156463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1571da177e4SLinus Torvalds 		cwnd >>= 1;
1581da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1591da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1601da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1611da177e4SLinus Torvalds }
1621da177e4SLinus Torvalds 
16367edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
165cf533ea5SEric Dumazet 				struct sock *sk)
1661da177e4SLinus Torvalds {
167463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
168463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1691da177e4SLinus Torvalds 
17035089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
17135089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
172463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds 	tp->lsndtime = now;
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1771da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1781da177e4SLinus Torvalds 	 */
179463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
180463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1811da177e4SLinus Torvalds }
1821da177e4SLinus Torvalds 
18367edfef7SAndi Kleen /* Account for an ACK we sent. */
18440efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1851da177e4SLinus Torvalds {
186463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
187463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1881da177e4SLinus Torvalds }
1891da177e4SLinus Torvalds 
1901da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1911da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1921da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1931da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1941da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1951da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1961da177e4SLinus Torvalds  */
1971da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1981da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
19931d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20031d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2051da177e4SLinus Torvalds 	if (*window_clamp == 0)
2061da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2071da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2101da177e4SLinus Torvalds 	if (space > mss)
2111da177e4SLinus Torvalds 		space = (space / mss) * mss;
2121da177e4SLinus Torvalds 
2131da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
21415d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
21515d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
21615d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
21715d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
21815d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
21915d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2201da177e4SLinus Torvalds 	 */
22115d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2221da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
22315d99e02SRick Jones 	else
22415d99e02SRick Jones 		(*rcv_wnd) = space;
22515d99e02SRick Jones 
2261da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2271da177e4SLinus Torvalds 	if (wscale_ok) {
2281da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2291da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2301da177e4SLinus Torvalds 		 */
2311da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
232316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2331da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2341da177e4SLinus Torvalds 			space >>= 1;
2351da177e4SLinus Torvalds 			(*rcv_wscale)++;
2361da177e4SLinus Torvalds 		}
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds 
239356f0398SNandita Dukkipati 	/* Set initial window to a value enough for senders starting with
240356f0398SNandita Dukkipati 	 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
241356f0398SNandita Dukkipati 	 * a limit on the initial window when mss is larger than 1460.
242356f0398SNandita Dukkipati 	 */
2431da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
244356f0398SNandita Dukkipati 		int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
245356f0398SNandita Dukkipati 		if (mss > 1460)
246356f0398SNandita Dukkipati 			init_cwnd =
247356f0398SNandita Dukkipati 			max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
24831d12926Slaurent chavey 		/* when initializing use the value from init_rcv_wnd
24931d12926Slaurent chavey 		 * rather than the default from above
25031d12926Slaurent chavey 		 */
251b1afde60SNandita Dukkipati 		if (init_rcv_wnd)
252b1afde60SNandita Dukkipati 			*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
253b1afde60SNandita Dukkipati 		else
254b1afde60SNandita Dukkipati 			*rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
2551da177e4SLinus Torvalds 	}
2561da177e4SLinus Torvalds 
2571da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2581da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2591da177e4SLinus Torvalds }
2604bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2631da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2641da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2651da177e4SLinus Torvalds  * frame.
2661da177e4SLinus Torvalds  */
26740efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2681da177e4SLinus Torvalds {
2691da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2701da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2711da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds 	/* Never shrink the offered window */
2741da177e4SLinus Torvalds 	if (new_win < cur_win) {
2751da177e4SLinus Torvalds 		/* Danger Will Robinson!
2761da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2771da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2781da177e4SLinus Torvalds 		 * window in time.  --DaveM
2791da177e4SLinus Torvalds 		 *
2801da177e4SLinus Torvalds 		 * Relax Will Robinson.
2811da177e4SLinus Torvalds 		 */
282607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2831da177e4SLinus Torvalds 	}
2841da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2851da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2881da177e4SLinus Torvalds 	 * scaled window.
2891da177e4SLinus Torvalds 	 */
29015d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2911da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2921da177e4SLinus Torvalds 	else
2931da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2961da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2991da177e4SLinus Torvalds 	if (new_win == 0)
3001da177e4SLinus Torvalds 		tp->pred_flags = 0;
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds 	return new_win;
3031da177e4SLinus Torvalds }
3041da177e4SLinus Torvalds 
30567edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
306cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
307bdf1ee5dSIlpo Järvinen {
3084de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
309bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3104de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
311bdf1ee5dSIlpo Järvinen }
312bdf1ee5dSIlpo Järvinen 
31367edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
314bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
315bdf1ee5dSIlpo Järvinen {
316bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
317bdf1ee5dSIlpo Järvinen 
318bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
3195d134f1cSHannes Frederic Sowa 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
3204de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
321bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
322bdf1ee5dSIlpo Järvinen 	}
323bdf1ee5dSIlpo Järvinen }
324bdf1ee5dSIlpo Järvinen 
325bdf1ee5dSIlpo Järvinen static __inline__ void
326cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
327bdf1ee5dSIlpo Järvinen {
328bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
329bdf1ee5dSIlpo Järvinen 		th->ece = 1;
330bdf1ee5dSIlpo Järvinen }
331bdf1ee5dSIlpo Järvinen 
33267edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
33367edfef7SAndi Kleen  * be sent.
33467edfef7SAndi Kleen  */
335bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
336bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
337bdf1ee5dSIlpo Järvinen {
338bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
339bdf1ee5dSIlpo Järvinen 
340bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
341bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
342bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
343bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
344bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
345bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
346bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
347bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
348bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
349bdf1ee5dSIlpo Järvinen 			}
350bdf1ee5dSIlpo Järvinen 		} else {
351bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
352bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
353bdf1ee5dSIlpo Järvinen 		}
354bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
355bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
356bdf1ee5dSIlpo Järvinen 	}
357bdf1ee5dSIlpo Järvinen }
358bdf1ee5dSIlpo Järvinen 
359e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
360e870a8efSIlpo Järvinen  * auto increment end seqno.
361e870a8efSIlpo Järvinen  */
362e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
363e870a8efSIlpo Järvinen {
3642e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
365e870a8efSIlpo Järvinen 	skb->csum = 0;
366e870a8efSIlpo Järvinen 
3674de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
368e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
369e870a8efSIlpo Järvinen 
370e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_segs = 1;
371e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_size = 0;
372e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_type = 0;
373e870a8efSIlpo Järvinen 
374e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
375a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
376e870a8efSIlpo Järvinen 		seq++;
377e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
378e870a8efSIlpo Järvinen }
379e870a8efSIlpo Järvinen 
380a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
38133f5f57eSIlpo Järvinen {
38233f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
38333f5f57eSIlpo Järvinen }
38433f5f57eSIlpo Järvinen 
38533ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
38633ad798cSAdam Langley #define OPTION_TS		(1 << 1)
38733ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
38889e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
389bd0388aeSWilliam Allen Simpson #define OPTION_COOKIE_EXTENSION	(1 << 4)
3902100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
39133ad798cSAdam Langley 
39233ad798cSAdam Langley struct tcp_out_options {
3932100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
3942100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
39533ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
39633ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
397bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
398bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
3992100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4002100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
40133ad798cSAdam Langley };
40233ad798cSAdam Langley 
403bd0388aeSWilliam Allen Simpson /* The sysctl int routines are generic, so check consistency here.
404bd0388aeSWilliam Allen Simpson  */
405bd0388aeSWilliam Allen Simpson static u8 tcp_cookie_size_check(u8 desired)
406bd0388aeSWilliam Allen Simpson {
407f1987257SEric Dumazet 	int cookie_size;
408f1987257SEric Dumazet 
409f1987257SEric Dumazet 	if (desired > 0)
410bd0388aeSWilliam Allen Simpson 		/* previously specified */
411bd0388aeSWilliam Allen Simpson 		return desired;
412f1987257SEric Dumazet 
413f1987257SEric Dumazet 	cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
414f1987257SEric Dumazet 	if (cookie_size <= 0)
415bd0388aeSWilliam Allen Simpson 		/* no default specified */
416bd0388aeSWilliam Allen Simpson 		return 0;
417f1987257SEric Dumazet 
418f1987257SEric Dumazet 	if (cookie_size <= TCP_COOKIE_MIN)
419bd0388aeSWilliam Allen Simpson 		/* value too small, specify minimum */
420bd0388aeSWilliam Allen Simpson 		return TCP_COOKIE_MIN;
421f1987257SEric Dumazet 
422f1987257SEric Dumazet 	if (cookie_size >= TCP_COOKIE_MAX)
423bd0388aeSWilliam Allen Simpson 		/* value too large, specify maximum */
424bd0388aeSWilliam Allen Simpson 		return TCP_COOKIE_MAX;
425f1987257SEric Dumazet 
426f1987257SEric Dumazet 	if (cookie_size & 1)
427bd0388aeSWilliam Allen Simpson 		/* 8-bit multiple, illegal, fix it */
428f1987257SEric Dumazet 		cookie_size++;
429f1987257SEric Dumazet 
430f1987257SEric Dumazet 	return (u8)cookie_size;
431bd0388aeSWilliam Allen Simpson }
432bd0388aeSWilliam Allen Simpson 
43367edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
43467edfef7SAndi Kleen  *
43567edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
436fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
437fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
438fd6149d3SIlpo Järvinen  * inter-operatibility perspective it seems that we're somewhat stuck with
439fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
440fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
441fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
442fd6149d3SIlpo Järvinen  *
443fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
444fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
445fd6149d3SIlpo Järvinen  */
44633ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
447bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
448bd0388aeSWilliam Allen Simpson {
4492100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
450bd0388aeSWilliam Allen Simpson 
451bd0388aeSWilliam Allen Simpson 	/* Having both authentication and cookies for security is redundant,
452bd0388aeSWilliam Allen Simpson 	 * and there's certainly not enough room.  Instead, the cookie-less
453bd0388aeSWilliam Allen Simpson 	 * extension variant is proposed.
454bd0388aeSWilliam Allen Simpson 	 *
455bd0388aeSWilliam Allen Simpson 	 * Consider the pessimal case with authentication.  The options
456bd0388aeSWilliam Allen Simpson 	 * could look like:
457bd0388aeSWilliam Allen Simpson 	 *   COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
458bd0388aeSWilliam Allen Simpson 	 */
459bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
460bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
461bd0388aeSWilliam Allen Simpson 			*ptr++ = htonl((TCPOPT_COOKIE << 24) |
462bd0388aeSWilliam Allen Simpson 				       (TCPOLEN_COOKIE_BASE << 16) |
463bd0388aeSWilliam Allen Simpson 				       (TCPOPT_MD5SIG << 8) |
464bd0388aeSWilliam Allen Simpson 				       TCPOLEN_MD5SIG);
465bd0388aeSWilliam Allen Simpson 		} else {
46633ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_NOP << 24) |
46733ad798cSAdam Langley 				       (TCPOPT_NOP << 16) |
46833ad798cSAdam Langley 				       (TCPOPT_MD5SIG << 8) |
46933ad798cSAdam Langley 				       TCPOLEN_MD5SIG);
470bd0388aeSWilliam Allen Simpson 		}
471bd0388aeSWilliam Allen Simpson 		options &= ~OPTION_COOKIE_EXTENSION;
472bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
473bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
47433ad798cSAdam Langley 		ptr += 4;
47533ad798cSAdam Langley 	}
47633ad798cSAdam Langley 
477fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
478fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
479fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
480fd6149d3SIlpo Järvinen 			       opts->mss);
481fd6149d3SIlpo Järvinen 	}
482fd6149d3SIlpo Järvinen 
483bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
484bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
48533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
48633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
48733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
48833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
489bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
49033ad798cSAdam Langley 		} else {
491496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
49240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
49340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
49440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
49540efc6faSStephen Hemminger 		}
49633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
49733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
49833ad798cSAdam Langley 	}
49933ad798cSAdam Langley 
500bd0388aeSWilliam Allen Simpson 	/* Specification requires after timestamp, so do it now.
501bd0388aeSWilliam Allen Simpson 	 *
502bd0388aeSWilliam Allen Simpson 	 * Consider the pessimal case without authentication.  The options
503bd0388aeSWilliam Allen Simpson 	 * could look like:
504bd0388aeSWilliam Allen Simpson 	 *   MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
505bd0388aeSWilliam Allen Simpson 	 */
506bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
507bd0388aeSWilliam Allen Simpson 		__u8 *cookie_copy = opts->hash_location;
508bd0388aeSWilliam Allen Simpson 		u8 cookie_size = opts->hash_size;
509bd0388aeSWilliam Allen Simpson 
510bd0388aeSWilliam Allen Simpson 		/* 8-bit multiple handled in tcp_cookie_size_check() above,
511bd0388aeSWilliam Allen Simpson 		 * and elsewhere.
512bd0388aeSWilliam Allen Simpson 		 */
513bd0388aeSWilliam Allen Simpson 		if (0x2 & cookie_size) {
514bd0388aeSWilliam Allen Simpson 			__u8 *p = (__u8 *)ptr;
515bd0388aeSWilliam Allen Simpson 
516bd0388aeSWilliam Allen Simpson 			/* 16-bit multiple */
517bd0388aeSWilliam Allen Simpson 			*p++ = TCPOPT_COOKIE;
518bd0388aeSWilliam Allen Simpson 			*p++ = TCPOLEN_COOKIE_BASE + cookie_size;
519bd0388aeSWilliam Allen Simpson 			*p++ = *cookie_copy++;
520bd0388aeSWilliam Allen Simpson 			*p++ = *cookie_copy++;
521bd0388aeSWilliam Allen Simpson 			ptr++;
522bd0388aeSWilliam Allen Simpson 			cookie_size -= 2;
523bd0388aeSWilliam Allen Simpson 		} else {
524bd0388aeSWilliam Allen Simpson 			/* 32-bit multiple */
525bd0388aeSWilliam Allen Simpson 			*ptr++ = htonl(((TCPOPT_NOP << 24) |
526bd0388aeSWilliam Allen Simpson 					(TCPOPT_NOP << 16) |
527bd0388aeSWilliam Allen Simpson 					(TCPOPT_COOKIE << 8) |
528bd0388aeSWilliam Allen Simpson 					TCPOLEN_COOKIE_BASE) +
529bd0388aeSWilliam Allen Simpson 				       cookie_size);
530bd0388aeSWilliam Allen Simpson 		}
531bd0388aeSWilliam Allen Simpson 
532bd0388aeSWilliam Allen Simpson 		if (cookie_size > 0) {
533bd0388aeSWilliam Allen Simpson 			memcpy(ptr, cookie_copy, cookie_size);
534bd0388aeSWilliam Allen Simpson 			ptr += (cookie_size / 4);
535bd0388aeSWilliam Allen Simpson 		}
536bd0388aeSWilliam Allen Simpson 	}
537bd0388aeSWilliam Allen Simpson 
538bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
53933ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
54033ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
54133ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
54233ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
54333ad798cSAdam Langley 	}
54433ad798cSAdam Langley 
545bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
54633ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
54733ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
54833ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
54933ad798cSAdam Langley 			       opts->ws);
55033ad798cSAdam Langley 	}
55133ad798cSAdam Langley 
55233ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
55333ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
55433ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
55540efc6faSStephen Hemminger 		int this_sack;
55640efc6faSStephen Hemminger 
55740efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
55840efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
55940efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
56033ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
56140efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5622de979bdSStephen Hemminger 
56333ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
56433ad798cSAdam Langley 		     ++this_sack) {
56540efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
56640efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
56740efc6faSStephen Hemminger 		}
5682de979bdSStephen Hemminger 
56940efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
57040efc6faSStephen Hemminger 	}
5712100c8d2SYuchung Cheng 
5722100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5732100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5742100c8d2SYuchung Cheng 
5752100c8d2SYuchung Cheng 		*ptr++ = htonl((TCPOPT_EXP << 24) |
5762100c8d2SYuchung Cheng 			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
5772100c8d2SYuchung Cheng 			       TCPOPT_FASTOPEN_MAGIC);
5782100c8d2SYuchung Cheng 
5792100c8d2SYuchung Cheng 		memcpy(ptr, foc->val, foc->len);
5802100c8d2SYuchung Cheng 		if ((foc->len & 3) == 2) {
5812100c8d2SYuchung Cheng 			u8 *align = ((u8 *)ptr) + foc->len;
5822100c8d2SYuchung Cheng 			align[0] = align[1] = TCPOPT_NOP;
5832100c8d2SYuchung Cheng 		}
5842100c8d2SYuchung Cheng 		ptr += (foc->len + 3) >> 2;
5852100c8d2SYuchung Cheng 	}
58640efc6faSStephen Hemminger }
58740efc6faSStephen Hemminger 
58867edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58967edfef7SAndi Kleen  * network wire format yet.
59067edfef7SAndi Kleen  */
59195c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
59233ad798cSAdam Langley 				struct tcp_out_options *opts,
593cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
594cf533ea5SEric Dumazet {
59533ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
596bd0388aeSWilliam Allen Simpson 	struct tcp_cookie_values *cvp = tp->cookie_values;
59795c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
598bd0388aeSWilliam Allen Simpson 	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
599bd0388aeSWilliam Allen Simpson 			 tcp_cookie_size_check(cvp->cookie_desired) :
600bd0388aeSWilliam Allen Simpson 			 0;
601783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
60233ad798cSAdam Langley 
603cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
60433ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
60533ad798cSAdam Langley 	if (*md5) {
60633ad798cSAdam Langley 		opts->options |= OPTION_MD5;
607bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
608cfb6eeb4SYOSHIFUJI Hideaki 	}
60933ad798cSAdam Langley #else
61033ad798cSAdam Langley 	*md5 = NULL;
611cfb6eeb4SYOSHIFUJI Hideaki #endif
61233ad798cSAdam Langley 
61333ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
61433ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
61533ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61633ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61733ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61833ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61933ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
62033ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
62133ad798cSAdam Langley 	 * going out.  */
62233ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
623bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
62433ad798cSAdam Langley 
625bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
62633ad798cSAdam Langley 		opts->options |= OPTION_TS;
627ee684b6fSAndrey Vagin 		opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
62833ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
629bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
63033ad798cSAdam Langley 	}
631bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
63233ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
63389e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
634bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
63533ad798cSAdam Langley 	}
636bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
63733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
638b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
639bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
64033ad798cSAdam Langley 	}
64133ad798cSAdam Langley 
642783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
643783237e8SYuchung Cheng 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
644783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
645783237e8SYuchung Cheng 		if (remaining >= need) {
646783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
647783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
648783237e8SYuchung Cheng 			remaining -= need;
649783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
650783237e8SYuchung Cheng 		}
651783237e8SYuchung Cheng 	}
652bd0388aeSWilliam Allen Simpson 	/* Note that timestamps are required by the specification.
653bd0388aeSWilliam Allen Simpson 	 *
654bd0388aeSWilliam Allen Simpson 	 * Odd numbers of bytes are prohibited by the specification, ensuring
655bd0388aeSWilliam Allen Simpson 	 * that the cookie is 16-bit aligned, and the resulting cookie pair is
656bd0388aeSWilliam Allen Simpson 	 * 32-bit aligned.
657bd0388aeSWilliam Allen Simpson 	 */
658bd0388aeSWilliam Allen Simpson 	if (*md5 == NULL &&
659bd0388aeSWilliam Allen Simpson 	    (OPTION_TS & opts->options) &&
660bd0388aeSWilliam Allen Simpson 	    cookie_size > 0) {
661bd0388aeSWilliam Allen Simpson 		int need = TCPOLEN_COOKIE_BASE + cookie_size;
662bd0388aeSWilliam Allen Simpson 
663bd0388aeSWilliam Allen Simpson 		if (0x2 & need) {
664bd0388aeSWilliam Allen Simpson 			/* 32-bit multiple */
665bd0388aeSWilliam Allen Simpson 			need += 2; /* NOPs */
666bd0388aeSWilliam Allen Simpson 
667bd0388aeSWilliam Allen Simpson 			if (need > remaining) {
668bd0388aeSWilliam Allen Simpson 				/* try shrinking cookie to fit */
669bd0388aeSWilliam Allen Simpson 				cookie_size -= 2;
670bd0388aeSWilliam Allen Simpson 				need -= 4;
671bd0388aeSWilliam Allen Simpson 			}
672bd0388aeSWilliam Allen Simpson 		}
673bd0388aeSWilliam Allen Simpson 		while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
674bd0388aeSWilliam Allen Simpson 			cookie_size -= 4;
675bd0388aeSWilliam Allen Simpson 			need -= 4;
676bd0388aeSWilliam Allen Simpson 		}
677bd0388aeSWilliam Allen Simpson 		if (TCP_COOKIE_MIN <= cookie_size) {
678bd0388aeSWilliam Allen Simpson 			opts->options |= OPTION_COOKIE_EXTENSION;
679bd0388aeSWilliam Allen Simpson 			opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
680bd0388aeSWilliam Allen Simpson 			opts->hash_size = cookie_size;
681bd0388aeSWilliam Allen Simpson 
682bd0388aeSWilliam Allen Simpson 			/* Remember for future incarnations. */
683bd0388aeSWilliam Allen Simpson 			cvp->cookie_desired = cookie_size;
684bd0388aeSWilliam Allen Simpson 
685bd0388aeSWilliam Allen Simpson 			if (cvp->cookie_desired != cvp->cookie_pair_size) {
686bd0388aeSWilliam Allen Simpson 				/* Currently use random bytes as a nonce,
687bd0388aeSWilliam Allen Simpson 				 * assuming these are completely unpredictable
688bd0388aeSWilliam Allen Simpson 				 * by hostile users of the same system.
689bd0388aeSWilliam Allen Simpson 				 */
690bd0388aeSWilliam Allen Simpson 				get_random_bytes(&cvp->cookie_pair[0],
691bd0388aeSWilliam Allen Simpson 						 cookie_size);
692bd0388aeSWilliam Allen Simpson 				cvp->cookie_pair_size = cookie_size;
693bd0388aeSWilliam Allen Simpson 			}
694bd0388aeSWilliam Allen Simpson 
695bd0388aeSWilliam Allen Simpson 			remaining -= need;
696bd0388aeSWilliam Allen Simpson 		}
697bd0388aeSWilliam Allen Simpson 	}
698bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
69933ad798cSAdam Langley }
70033ad798cSAdam Langley 
70167edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
70295c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk,
70333ad798cSAdam Langley 				   struct request_sock *req,
70495c96174SEric Dumazet 				   unsigned int mss, struct sk_buff *skb,
70533ad798cSAdam Langley 				   struct tcp_out_options *opts,
7064957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
7078336886fSJerry Chu 				   struct tcp_extend_values *xvp,
7088336886fSJerry Chu 				   struct tcp_fastopen_cookie *foc)
7094957faadSWilliam Allen Simpson {
71033ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
71195c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
7124957faadSWilliam Allen Simpson 	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
7134957faadSWilliam Allen Simpson 			 xvp->cookie_plus :
7144957faadSWilliam Allen Simpson 			 0;
71533ad798cSAdam Langley 
71633ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
71733ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
71833ad798cSAdam Langley 	if (*md5) {
71933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
7204957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
7214957faadSWilliam Allen Simpson 
7224957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
7234957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
7244957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
7254957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
7264957faadSWilliam Allen Simpson 		 */
727de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
72833ad798cSAdam Langley 	}
72933ad798cSAdam Langley #else
73033ad798cSAdam Langley 	*md5 = NULL;
73133ad798cSAdam Langley #endif
73233ad798cSAdam Langley 
7334957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
73433ad798cSAdam Langley 	opts->mss = mss;
7354957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
73633ad798cSAdam Langley 
73733ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
73833ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
73989e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
7404957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
74133ad798cSAdam Langley 	}
742de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
74333ad798cSAdam Langley 		opts->options |= OPTION_TS;
74433ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
74533ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
7464957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
74733ad798cSAdam Langley 	}
74833ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
74933ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
750de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
7514957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
75233ad798cSAdam Langley 	}
7538336886fSJerry Chu 	if (foc != NULL) {
7548336886fSJerry Chu 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
7558336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7568336886fSJerry Chu 		if (remaining >= need) {
7578336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7588336886fSJerry Chu 			opts->fastopen_cookie = foc;
7598336886fSJerry Chu 			remaining -= need;
7608336886fSJerry Chu 		}
7618336886fSJerry Chu 	}
7624957faadSWilliam Allen Simpson 	/* Similar rationale to tcp_syn_options() applies here, too.
7634957faadSWilliam Allen Simpson 	 * If the <SYN> options fit, the same options should fit now!
7644957faadSWilliam Allen Simpson 	 */
7654957faadSWilliam Allen Simpson 	if (*md5 == NULL &&
766de213e5eSEric Dumazet 	    ireq->tstamp_ok &&
7674957faadSWilliam Allen Simpson 	    cookie_plus > TCPOLEN_COOKIE_BASE) {
7684957faadSWilliam Allen Simpson 		int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
7694957faadSWilliam Allen Simpson 
7704957faadSWilliam Allen Simpson 		if (0x2 & need) {
7714957faadSWilliam Allen Simpson 			/* 32-bit multiple */
7724957faadSWilliam Allen Simpson 			need += 2; /* NOPs */
7734957faadSWilliam Allen Simpson 		}
7744957faadSWilliam Allen Simpson 		if (need <= remaining) {
7754957faadSWilliam Allen Simpson 			opts->options |= OPTION_COOKIE_EXTENSION;
7764957faadSWilliam Allen Simpson 			opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
7774957faadSWilliam Allen Simpson 			remaining -= need;
7784957faadSWilliam Allen Simpson 		} else {
7794957faadSWilliam Allen Simpson 			/* There's no error return, so flag it. */
7804957faadSWilliam Allen Simpson 			xvp->cookie_out_never = 1; /* true */
7814957faadSWilliam Allen Simpson 			opts->hash_size = 0;
7824957faadSWilliam Allen Simpson 		}
7834957faadSWilliam Allen Simpson 	}
7844957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
78533ad798cSAdam Langley }
78633ad798cSAdam Langley 
78767edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
78867edfef7SAndi Kleen  * final wire format yet.
78967edfef7SAndi Kleen  */
79095c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
79133ad798cSAdam Langley 					struct tcp_out_options *opts,
792cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
793cf533ea5SEric Dumazet {
79433ad798cSAdam Langley 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
79533ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
79695c96174SEric Dumazet 	unsigned int size = 0;
797cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
79833ad798cSAdam Langley 
79933ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
80033ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
80133ad798cSAdam Langley 	if (unlikely(*md5)) {
80233ad798cSAdam Langley 		opts->options |= OPTION_MD5;
80333ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
80433ad798cSAdam Langley 	}
80533ad798cSAdam Langley #else
80633ad798cSAdam Langley 	*md5 = NULL;
80733ad798cSAdam Langley #endif
80833ad798cSAdam Langley 
80933ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
81033ad798cSAdam Langley 		opts->options |= OPTION_TS;
811ee684b6fSAndrey Vagin 		opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
81233ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
81333ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
81433ad798cSAdam Langley 	}
81533ad798cSAdam Langley 
816cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
817cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
81895c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
81933ad798cSAdam Langley 		opts->num_sack_blocks =
82095c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
82133ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
82233ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
82333ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
82433ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
82533ad798cSAdam Langley 	}
82633ad798cSAdam Langley 
82733ad798cSAdam Langley 	return size;
82840efc6faSStephen Hemminger }
8291da177e4SLinus Torvalds 
83046d3ceabSEric Dumazet 
83146d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
83246d3ceabSEric Dumazet  *
83346d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
83446d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
83546d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
83646d3ceabSEric Dumazet  *
83746d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
83846d3ceabSEric Dumazet  * needs to be reallocated in a driver.
83946d3ceabSEric Dumazet  * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
84046d3ceabSEric Dumazet  *
84146d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
84246d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
84346d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
84446d3ceabSEric Dumazet  */
84546d3ceabSEric Dumazet struct tsq_tasklet {
84646d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
84746d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
84846d3ceabSEric Dumazet };
84946d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
85046d3ceabSEric Dumazet 
8516f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
8526f458dfbSEric Dumazet {
8536f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
8546f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
8556f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
8566f458dfbSEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
8576f458dfbSEric Dumazet }
85846d3ceabSEric Dumazet /*
85946d3ceabSEric Dumazet  * One tasklest per cpu tries to send more skbs.
86046d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
86146d3ceabSEric Dumazet  * transfering tsq->head because tcp_wfree() might
86246d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
86346d3ceabSEric Dumazet  */
86446d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
86546d3ceabSEric Dumazet {
86646d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
86746d3ceabSEric Dumazet 	LIST_HEAD(list);
86846d3ceabSEric Dumazet 	unsigned long flags;
86946d3ceabSEric Dumazet 	struct list_head *q, *n;
87046d3ceabSEric Dumazet 	struct tcp_sock *tp;
87146d3ceabSEric Dumazet 	struct sock *sk;
87246d3ceabSEric Dumazet 
87346d3ceabSEric Dumazet 	local_irq_save(flags);
87446d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
87546d3ceabSEric Dumazet 	local_irq_restore(flags);
87646d3ceabSEric Dumazet 
87746d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
87846d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
87946d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
88046d3ceabSEric Dumazet 
88146d3ceabSEric Dumazet 		sk = (struct sock *)tp;
88246d3ceabSEric Dumazet 		bh_lock_sock(sk);
88346d3ceabSEric Dumazet 
88446d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
8856f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
88646d3ceabSEric Dumazet 		} else {
88746d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
8886f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
88946d3ceabSEric Dumazet 		}
89046d3ceabSEric Dumazet 		bh_unlock_sock(sk);
89146d3ceabSEric Dumazet 
89246d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
89346d3ceabSEric Dumazet 		sk_free(sk);
89446d3ceabSEric Dumazet 	}
89546d3ceabSEric Dumazet }
89646d3ceabSEric Dumazet 
8976f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
8986f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
899563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
900563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
90146d3ceabSEric Dumazet /**
90246d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
90346d3ceabSEric Dumazet  * @sk: socket
90446d3ceabSEric Dumazet  *
90546d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
90646d3ceabSEric Dumazet  * actions before socket release.
90746d3ceabSEric Dumazet  */
90846d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
90946d3ceabSEric Dumazet {
91046d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
9116f458dfbSEric Dumazet 	unsigned long flags, nflags;
91246d3ceabSEric Dumazet 
9136f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
9146f458dfbSEric Dumazet 	do {
9156f458dfbSEric Dumazet 		flags = tp->tsq_flags;
9166f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
9176f458dfbSEric Dumazet 			return;
9186f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
9196f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
9206f458dfbSEric Dumazet 
9216f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
9226f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
9236f458dfbSEric Dumazet 
924144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
9256f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
926144d56e9SEric Dumazet 		__sock_put(sk);
927144d56e9SEric Dumazet 	}
928144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
9296f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
930144d56e9SEric Dumazet 		__sock_put(sk);
931144d56e9SEric Dumazet 	}
932144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
933563d34d0SEric Dumazet 		sk->sk_prot->mtu_reduced(sk);
934144d56e9SEric Dumazet 		__sock_put(sk);
935144d56e9SEric Dumazet 	}
93646d3ceabSEric Dumazet }
93746d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
93846d3ceabSEric Dumazet 
93946d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
94046d3ceabSEric Dumazet {
94146d3ceabSEric Dumazet 	int i;
94246d3ceabSEric Dumazet 
94346d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
94446d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
94546d3ceabSEric Dumazet 
94646d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
94746d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
94846d3ceabSEric Dumazet 			     tcp_tasklet_func,
94946d3ceabSEric Dumazet 			     (unsigned long)tsq);
95046d3ceabSEric Dumazet 	}
95146d3ceabSEric Dumazet }
95246d3ceabSEric Dumazet 
95346d3ceabSEric Dumazet /*
95446d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
95546d3ceabSEric Dumazet  * We cant xmit new skbs from this context, as we might already
95646d3ceabSEric Dumazet  * hold qdisc lock.
95746d3ceabSEric Dumazet  */
9588e7dfbc8SSilviu-Mihai Popescu static void tcp_wfree(struct sk_buff *skb)
95946d3ceabSEric Dumazet {
96046d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
96146d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
96246d3ceabSEric Dumazet 
96346d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
96446d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
96546d3ceabSEric Dumazet 		unsigned long flags;
96646d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
96746d3ceabSEric Dumazet 
96846d3ceabSEric Dumazet 		/* Keep a ref on socket.
96946d3ceabSEric Dumazet 		 * This last ref will be released in tcp_tasklet_func()
97046d3ceabSEric Dumazet 		 */
97146d3ceabSEric Dumazet 		atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
97246d3ceabSEric Dumazet 
97346d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
97446d3ceabSEric Dumazet 		local_irq_save(flags);
97546d3ceabSEric Dumazet 		tsq = &__get_cpu_var(tsq_tasklet);
97646d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
97746d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
97846d3ceabSEric Dumazet 		local_irq_restore(flags);
97946d3ceabSEric Dumazet 	} else {
98046d3ceabSEric Dumazet 		sock_wfree(skb);
98146d3ceabSEric Dumazet 	}
98246d3ceabSEric Dumazet }
98346d3ceabSEric Dumazet 
9841da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
9851da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
9861da177e4SLinus Torvalds  * transmission and possible later retransmissions.
9871da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
9881da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
9891da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
9901da177e4SLinus Torvalds  * device.
9911da177e4SLinus Torvalds  *
9921da177e4SLinus Torvalds  * We are working here with either a clone of the original
9931da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
9941da177e4SLinus Torvalds  */
995056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
996056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
9971da177e4SLinus Torvalds {
9986687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
999dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1000dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1001dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
100233ad798cSAdam Langley 	struct tcp_out_options opts;
100395c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
1004cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10051da177e4SLinus Torvalds 	struct tcphdr *th;
10061da177e4SLinus Torvalds 	int err;
10071da177e4SLinus Torvalds 
1008dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
1009dfb4b9dcSDavid S. Miller 
1010dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
1011dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
1012dfb4b9dcSDavid S. Miller 	 */
1013164891aaSStephen Hemminger 	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
1014dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
1015dfb4b9dcSDavid S. Miller 
1016dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
1017dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
1018dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
1019dfb4b9dcSDavid S. Miller 		else
1020dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
1021dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1022dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1023dfb4b9dcSDavid S. Miller 	}
1024dfb4b9dcSDavid S. Miller 
1025dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1026dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
1027dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
102833ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10291da177e4SLinus Torvalds 
10304de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
103133ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
103233ad798cSAdam Langley 	else
103333ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
103433ad798cSAdam Langley 							   &md5);
103533ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10361da177e4SLinus Torvalds 
10373853b584STom Herbert 	if (tcp_packets_in_flight(tp) == 0) {
10386687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
10393853b584STom Herbert 		skb->ooo_okay = 1;
10403853b584STom Herbert 	} else
10413853b584STom Herbert 		skb->ooo_okay = 0;
10421da177e4SLinus Torvalds 
1043aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1044aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
104546d3ceabSEric Dumazet 
104646d3ceabSEric Dumazet 	skb_orphan(skb);
104746d3ceabSEric Dumazet 	skb->sk = sk;
104846d3ceabSEric Dumazet 	skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
104946d3ceabSEric Dumazet 			  tcp_wfree : sock_wfree;
105046d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1053aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
1054c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1055c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10561da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10571da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
1058df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10594de075e0SEric Dumazet 					tcb->tcp_flags);
1060dfb4b9dcSDavid S. Miller 
10614de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
10621da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
10631da177e4SLinus Torvalds 		 * is never scaled.
10641da177e4SLinus Torvalds 		 */
1065600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
10661da177e4SLinus Torvalds 	} else {
10671da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
10681da177e4SLinus Torvalds 	}
10691da177e4SLinus Torvalds 	th->check		= 0;
10701da177e4SLinus Torvalds 	th->urg_ptr		= 0;
10711da177e4SLinus Torvalds 
107233f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
10737691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
10747691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
10751da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
10761da177e4SLinus Torvalds 			th->urg = 1;
10777691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
10780eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
10797691367dSHerbert Xu 			th->urg = 1;
10807691367dSHerbert Xu 		}
10811da177e4SLinus Torvalds 	}
10821da177e4SLinus Torvalds 
1083bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
10844de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
10859e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
1086dfb4b9dcSDavid S. Miller 
1087cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1088cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1089cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1090a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1091bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
109249a72dfbSAdam Langley 					       md5, sk, NULL, skb);
1093cfb6eeb4SYOSHIFUJI Hideaki 	}
1094cfb6eeb4SYOSHIFUJI Hideaki #endif
1095cfb6eeb4SYOSHIFUJI Hideaki 
1096bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
10971da177e4SLinus Torvalds 
10984de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1099fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
11001da177e4SLinus Torvalds 
11011da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
1102cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
11031da177e4SLinus Torvalds 
1104bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1105aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1106aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11071da177e4SLinus Torvalds 
1108d9d8da80SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
110983de47cdSHua Zhong 	if (likely(err <= 0))
11101da177e4SLinus Torvalds 		return err;
11111da177e4SLinus Torvalds 
11123cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
11131da177e4SLinus Torvalds 
1114b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
11151da177e4SLinus Torvalds }
11161da177e4SLinus Torvalds 
111767edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11181da177e4SLinus Torvalds  *
11191da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11201da177e4SLinus Torvalds  * otherwise socket can stall.
11211da177e4SLinus Torvalds  */
11221da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11231da177e4SLinus Torvalds {
11241da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11271da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
11281da177e4SLinus Torvalds 	skb_header_release(skb);
1129fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11303ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11313ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11321da177e4SLinus Torvalds }
11331da177e4SLinus Torvalds 
113467edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
1135cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1136056834d9SIlpo Järvinen 				 unsigned int mss_now)
1137f6302d1dSDavid S. Miller {
11388e5b9ddaSHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk) ||
11398e5b9ddaSHerbert Xu 	    skb->ip_summed == CHECKSUM_NONE) {
1140f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1141f6302d1dSDavid S. Miller 		 * non-TSO case.
1142f6302d1dSDavid S. Miller 		 */
11437967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
11447967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
1145c9af6db4SPravin B Shelar 		skb_shinfo(skb)->gso_type = 0;
1146f6302d1dSDavid S. Miller 	} else {
1147356f89e1SIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
11487967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
1149c9af6db4SPravin B Shelar 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
11501da177e4SLinus Torvalds 	}
11511da177e4SLinus Torvalds }
11521da177e4SLinus Torvalds 
115391fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
115468f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
115591fed7a1SIlpo Järvinen  */
1156cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
115791fed7a1SIlpo Järvinen 				   int decr)
115891fed7a1SIlpo Järvinen {
1159a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1160a47e5a98SIlpo Järvinen 
1161dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
116291fed7a1SIlpo Järvinen 		return;
116391fed7a1SIlpo Järvinen 
11646859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
116591fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
116691fed7a1SIlpo Järvinen }
116791fed7a1SIlpo Järvinen 
1168797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1169797108d1SIlpo Järvinen  * tweaks to fix counters
1170797108d1SIlpo Järvinen  */
1171cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1172797108d1SIlpo Järvinen {
1173797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1174797108d1SIlpo Järvinen 
1175797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1176797108d1SIlpo Järvinen 
1177797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1178797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1179797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1180797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1181797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1182797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1183797108d1SIlpo Järvinen 
1184797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1185797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1186797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1187797108d1SIlpo Järvinen 
1188797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1189797108d1SIlpo Järvinen 
1190797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1191797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
119252cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1193797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1194797108d1SIlpo Järvinen 
1195797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1196797108d1SIlpo Järvinen }
1197797108d1SIlpo Järvinen 
11981da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
11991da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12001da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12011da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12021da177e4SLinus Torvalds  */
1203056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1204056834d9SIlpo Järvinen 		 unsigned int mss_now)
12051da177e4SLinus Torvalds {
12061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12071da177e4SLinus Torvalds 	struct sk_buff *buff;
12086475be16SDavid S. Miller 	int nsize, old_factor;
1209b60b49eaSHerbert Xu 	int nlen;
12109ce01461SIlpo Järvinen 	u8 flags;
12111da177e4SLinus Torvalds 
12122fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12132fceec13SIlpo Järvinen 		return -EINVAL;
12146a438bbeSStephen Hemminger 
12151da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
12161da177e4SLinus Torvalds 	if (nsize < 0)
12171da177e4SLinus Torvalds 		nsize = 0;
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
12201da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
12211da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
12221da177e4SLinus Torvalds 		return -ENOMEM;
12231da177e4SLinus Torvalds 
12241da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
12251da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
12261da177e4SLinus Torvalds 	if (buff == NULL)
12271da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1228ef5cb973SHerbert Xu 
12293ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
12303ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1231b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1232b60b49eaSHerbert Xu 	buff->truesize += nlen;
1233b60b49eaSHerbert Xu 	skb->truesize -= nlen;
12341da177e4SLinus Torvalds 
12351da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
12361da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
12371da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
12381da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
12414de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
12424de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
12434de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1244e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
12451da177e4SLinus Torvalds 
124684fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
12471da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1248056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1249056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
12501da177e4SLinus Torvalds 						       nsize, 0);
12511da177e4SLinus Torvalds 
12521da177e4SLinus Torvalds 		skb_trim(skb, len);
12531da177e4SLinus Torvalds 
12541da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
12551da177e4SLinus Torvalds 	} else {
125684fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
12571da177e4SLinus Torvalds 		skb_split(skb, buff, len);
12581da177e4SLinus Torvalds 	}
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
12611da177e4SLinus Torvalds 
12621da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
12631da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
12641da177e4SLinus Torvalds 	 */
12651da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1266a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
12671da177e4SLinus Torvalds 
12686475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
12696475be16SDavid S. Miller 
12701da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1271846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1272846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
12731da177e4SLinus Torvalds 
12746475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
12756475be16SDavid S. Miller 	 * adjust the various packet counters.
12766475be16SDavid S. Miller 	 */
1277cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
12786475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
12796475be16SDavid S. Miller 			tcp_skb_pcount(buff);
12801da177e4SLinus Torvalds 
1281797108d1SIlpo Järvinen 		if (diff)
1282797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
12831da177e4SLinus Torvalds 	}
12841da177e4SLinus Torvalds 
12851da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1286f44b5271SDavid S. Miller 	skb_header_release(buff);
1287fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
12881da177e4SLinus Torvalds 
12891da177e4SLinus Torvalds 	return 0;
12901da177e4SLinus Torvalds }
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
12931da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
12941da177e4SLinus Torvalds  * immediately discarded.
12951da177e4SLinus Torvalds  */
1296f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
12971da177e4SLinus Torvalds {
12981da177e4SLinus Torvalds 	int i, k, eat;
12991da177e4SLinus Torvalds 
13004fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13014fa48bf3SEric Dumazet 	if (eat) {
13024fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
130322b4a4f2SEric Dumazet 		skb->avail_size -= eat;
13044fa48bf3SEric Dumazet 		len -= eat;
13054fa48bf3SEric Dumazet 		if (!len)
13064fa48bf3SEric Dumazet 			return;
13074fa48bf3SEric Dumazet 	}
13081da177e4SLinus Torvalds 	eat = len;
13091da177e4SLinus Torvalds 	k = 0;
13101da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
13119e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
13129e903e08SEric Dumazet 
13139e903e08SEric Dumazet 		if (size <= eat) {
1314aff65da0SIan Campbell 			skb_frag_unref(skb, i);
13159e903e08SEric Dumazet 			eat -= size;
13161da177e4SLinus Torvalds 		} else {
13171da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
13181da177e4SLinus Torvalds 			if (eat) {
13191da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
13209e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
13211da177e4SLinus Torvalds 				eat = 0;
13221da177e4SLinus Torvalds 			}
13231da177e4SLinus Torvalds 			k++;
13241da177e4SLinus Torvalds 		}
13251da177e4SLinus Torvalds 	}
13261da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
13271da177e4SLinus Torvalds 
132827a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
13291da177e4SLinus Torvalds 	skb->data_len -= len;
13301da177e4SLinus Torvalds 	skb->len = skb->data_len;
13311da177e4SLinus Torvalds }
13321da177e4SLinus Torvalds 
133367edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
13341da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
13351da177e4SLinus Torvalds {
133614bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
13371da177e4SLinus Torvalds 		return -ENOMEM;
13381da177e4SLinus Torvalds 
13394fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
13401da177e4SLinus Torvalds 
13411da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
134284fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
13431da177e4SLinus Torvalds 
13441da177e4SLinus Torvalds 	skb->truesize	     -= len;
13451da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
13463ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
13471da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
13481da177e4SLinus Torvalds 
13495b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
13501da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
13515b35e1e6SNeal Cardwell 		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
13521da177e4SLinus Torvalds 
13531da177e4SLinus Torvalds 	return 0;
13541da177e4SLinus Torvalds }
13551da177e4SLinus Torvalds 
13561b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
13571b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
13585d424d5aSJohn Heffner {
1359cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1360cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
13615d424d5aSJohn Heffner 	int mss_now;
13625d424d5aSJohn Heffner 
13635d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
13645d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
13655d424d5aSJohn Heffner 	 */
13665d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
13675d424d5aSJohn Heffner 
136867469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
136967469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
137067469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
137167469601SEric Dumazet 
137267469601SEric Dumazet 		if (dst && dst_allfrag(dst))
137367469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
137467469601SEric Dumazet 	}
137567469601SEric Dumazet 
13765d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
13775d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
13785d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
13795d424d5aSJohn Heffner 
13805d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
13815d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
13825d424d5aSJohn Heffner 
13835d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
13845d424d5aSJohn Heffner 	if (mss_now < 48)
13855d424d5aSJohn Heffner 		mss_now = 48;
13865d424d5aSJohn Heffner 	return mss_now;
13875d424d5aSJohn Heffner }
13885d424d5aSJohn Heffner 
13891b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
13901b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
13911b63edd6SYuchung Cheng {
13921b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
13931b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
13941b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
13951b63edd6SYuchung Cheng }
13961b63edd6SYuchung Cheng 
13975d424d5aSJohn Heffner /* Inverse of above */
139867469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
13995d424d5aSJohn Heffner {
1400cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1401cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14025d424d5aSJohn Heffner 	int mtu;
14035d424d5aSJohn Heffner 
14045d424d5aSJohn Heffner 	mtu = mss +
14055d424d5aSJohn Heffner 	      tp->tcp_header_len +
14065d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14075d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14085d424d5aSJohn Heffner 
140967469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
141067469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
141167469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
141267469601SEric Dumazet 
141367469601SEric Dumazet 		if (dst && dst_allfrag(dst))
141467469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
141567469601SEric Dumazet 	}
14165d424d5aSJohn Heffner 	return mtu;
14175d424d5aSJohn Heffner }
14185d424d5aSJohn Heffner 
141967edfef7SAndi Kleen /* MTU probing init per socket */
14205d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
14215d424d5aSJohn Heffner {
14225d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
14235d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
14245d424d5aSJohn Heffner 
14255d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
14265d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
14275d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
14285d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
14295d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
14305d424d5aSJohn Heffner }
14314bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
14325d424d5aSJohn Heffner 
14331da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
14341da177e4SLinus Torvalds 
14351da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
14361da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
14371da177e4SLinus Torvalds 
14381da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1439caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
14401da177e4SLinus Torvalds    It also does not include TCP options.
14411da177e4SLinus Torvalds 
1442d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
14431da177e4SLinus Torvalds 
14441da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
14451da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
14461da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
14471da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
14481da177e4SLinus Torvalds 
14491da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
14501da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
14511da177e4SLinus Torvalds 
1452d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1453d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
14541da177e4SLinus Torvalds  */
14551da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
14561da177e4SLinus Torvalds {
14571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1458d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
14595d424d5aSJohn Heffner 	int mss_now;
14601da177e4SLinus Torvalds 
14615d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
14625d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
14631da177e4SLinus Torvalds 
14645d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1465409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
14661da177e4SLinus Torvalds 
14671da177e4SLinus Torvalds 	/* And store cached results */
1468d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
14695d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
14705d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1471c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
14721da177e4SLinus Torvalds 
14731da177e4SLinus Torvalds 	return mss_now;
14741da177e4SLinus Torvalds }
14754bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
14761da177e4SLinus Torvalds 
14771da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
14781da177e4SLinus Torvalds  * and even PMTU discovery events into account.
14791da177e4SLinus Torvalds  */
14800c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
14811da177e4SLinus Torvalds {
1482cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1483cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1484c1b4a7e6SDavid S. Miller 	u32 mss_now;
148595c96174SEric Dumazet 	unsigned int header_len;
148633ad798cSAdam Langley 	struct tcp_out_options opts;
148733ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
14881da177e4SLinus Torvalds 
1489c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1490c1b4a7e6SDavid S. Miller 
14911da177e4SLinus Torvalds 	if (dst) {
14921da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1493d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
14941da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
14951da177e4SLinus Torvalds 	}
14961da177e4SLinus Torvalds 
149733ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
149833ad798cSAdam Langley 		     sizeof(struct tcphdr);
149933ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
150033ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
150133ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
150233ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
150333ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
150433ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
150533ad798cSAdam Langley 		mss_now -= delta;
150633ad798cSAdam Langley 	}
1507cfb6eeb4SYOSHIFUJI Hideaki 
15081da177e4SLinus Torvalds 	return mss_now;
15091da177e4SLinus Torvalds }
15101da177e4SLinus Torvalds 
1511a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
15129e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1513a762a980SDavid S. Miller {
15149e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1515a762a980SDavid S. Miller 
1516d436d686SIlpo Järvinen 	if (tp->packets_out >= tp->snd_cwnd) {
1517a762a980SDavid S. Miller 		/* Network is feed fully. */
1518a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1519a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1520a762a980SDavid S. Miller 	} else {
1521a762a980SDavid S. Miller 		/* Network starves. */
1522a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1523a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1524a762a980SDavid S. Miller 
152515d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
152615d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1527a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1528a762a980SDavid S. Miller 	}
1529a762a980SDavid S. Miller }
1530a762a980SDavid S. Miller 
15310e3a4803SIlpo Järvinen /* Returns the portion of skb which can be sent right away without
15320e3a4803SIlpo Järvinen  * introducing MSS oddities to segment boundaries. In rare cases where
15330e3a4803SIlpo Järvinen  * mss_now != mss_cache, we will request caller to create a small skb
15340e3a4803SIlpo Järvinen  * per input skb which could be mostly avoided here (if desired).
15355ea3a748SIlpo Järvinen  *
15365ea3a748SIlpo Järvinen  * We explicitly want to create a request for splitting write queue tail
15375ea3a748SIlpo Järvinen  * to a small skb for Nagle purposes while avoiding unnecessary modulos,
15385ea3a748SIlpo Järvinen  * thus all the complexity (cwnd_len is always MSS multiple which we
15395ea3a748SIlpo Järvinen  * return whenever allowed by the other factors). Basically we need the
15405ea3a748SIlpo Järvinen  * modulo only when the receiver window alone is the limiting factor or
15415ea3a748SIlpo Järvinen  * when we would be allowed to send the split-due-to-Nagle skb fully.
15420e3a4803SIlpo Järvinen  */
1543cf533ea5SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
15441485348dSBen Hutchings 					unsigned int mss_now, unsigned int max_segs)
1545c1b4a7e6SDavid S. Miller {
1546cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
15471485348dSBen Hutchings 	u32 needed, window, max_len;
1548c1b4a7e6SDavid S. Miller 
154990840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
15501485348dSBen Hutchings 	max_len = mss_now * max_segs;
15510e3a4803SIlpo Järvinen 
15521485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
15531485348dSBen Hutchings 		return max_len;
15540e3a4803SIlpo Järvinen 
15555ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
15565ea3a748SIlpo Järvinen 
15571485348dSBen Hutchings 	if (max_len <= needed)
15581485348dSBen Hutchings 		return max_len;
15590e3a4803SIlpo Järvinen 
15600e3a4803SIlpo Järvinen 	return needed - needed % mss_now;
1561c1b4a7e6SDavid S. Miller }
1562c1b4a7e6SDavid S. Miller 
1563c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1564c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1565c1b4a7e6SDavid S. Miller  */
1566cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1567cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1568c1b4a7e6SDavid S. Miller {
1569c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1570c1b4a7e6SDavid S. Miller 
1571c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
15724de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
15734de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1574c1b4a7e6SDavid S. Miller 		return 1;
1575c1b4a7e6SDavid S. Miller 
1576c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1577c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1578c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1579c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1580c1b4a7e6SDavid S. Miller 
1581c1b4a7e6SDavid S. Miller 	return 0;
1582c1b4a7e6SDavid S. Miller }
1583c1b4a7e6SDavid S. Miller 
1584b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
158567edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1586c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1587c1b4a7e6SDavid S. Miller  */
1588cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1589056834d9SIlpo Järvinen 			     unsigned int mss_now)
1590c1b4a7e6SDavid S. Miller {
1591c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1592c1b4a7e6SDavid S. Miller 
1593f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1594846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1595c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1596c1b4a7e6SDavid S. Miller 	}
1597c1b4a7e6SDavid S. Miller 	return tso_segs;
1598c1b4a7e6SDavid S. Miller }
1599c1b4a7e6SDavid S. Miller 
160067edfef7SAndi Kleen /* Minshall's variant of the Nagle send check. */
1601a2a385d6SEric Dumazet static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1602c1b4a7e6SDavid S. Miller {
1603c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml, tp->snd_una) &&
1604c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
1605c1b4a7e6SDavid S. Miller }
1606c1b4a7e6SDavid S. Miller 
1607a2a385d6SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1608c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1609c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
16106d67e9beSFeng King  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1611c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1612c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1613c1b4a7e6SDavid S. Miller  */
1614a2a385d6SEric Dumazet static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1615c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
161695c96174SEric Dumazet 				  unsigned int mss_now, int nonagle)
1617c1b4a7e6SDavid S. Miller {
1618a02cec21SEric Dumazet 	return skb->len < mss_now &&
1619c1b4a7e6SDavid S. Miller 		((nonagle & TCP_NAGLE_CORK) ||
1620a02cec21SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1621c1b4a7e6SDavid S. Miller }
1622c1b4a7e6SDavid S. Miller 
1623a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1624c1b4a7e6SDavid S. Miller  * sent now.
1625c1b4a7e6SDavid S. Miller  */
1626a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1627c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1628c1b4a7e6SDavid S. Miller {
1629c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1630c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1631c1b4a7e6SDavid S. Miller 	 *
1632c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1633c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1634c1b4a7e6SDavid S. Miller 	 */
1635c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1636a2a385d6SEric Dumazet 		return true;
1637c1b4a7e6SDavid S. Miller 
1638d551e454SIlpo Järvinen 	/* Don't use the nagle rule for urgent data (or for the final FIN).
1639d551e454SIlpo Järvinen 	 * Nagle can be ignored during F-RTO too (see RFC4138).
1640d551e454SIlpo Järvinen 	 */
164133f5f57eSIlpo Järvinen 	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
16424de075e0SEric Dumazet 	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1643a2a385d6SEric Dumazet 		return true;
1644c1b4a7e6SDavid S. Miller 
1645c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1646a2a385d6SEric Dumazet 		return true;
1647c1b4a7e6SDavid S. Miller 
1648a2a385d6SEric Dumazet 	return false;
1649c1b4a7e6SDavid S. Miller }
1650c1b4a7e6SDavid S. Miller 
1651c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1652a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1653a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1654056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1655c1b4a7e6SDavid S. Miller {
1656c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1657c1b4a7e6SDavid S. Miller 
1658c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1659c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1660c1b4a7e6SDavid S. Miller 
166190840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1662c1b4a7e6SDavid S. Miller }
1663c1b4a7e6SDavid S. Miller 
1664fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1665c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1666c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1667c1b4a7e6SDavid S. Miller  */
1668cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1669c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1670c1b4a7e6SDavid S. Miller {
1671cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1672c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1673c1b4a7e6SDavid S. Miller 
1674846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1675c1b4a7e6SDavid S. Miller 
1676c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1677c1b4a7e6SDavid S. Miller 		return 0;
1678c1b4a7e6SDavid S. Miller 
1679c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1680056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1681c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1682c1b4a7e6SDavid S. Miller 
1683c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1684c1b4a7e6SDavid S. Miller }
1685c1b4a7e6SDavid S. Miller 
168667edfef7SAndi Kleen /* Test if sending is allowed right now. */
1687a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1688c1b4a7e6SDavid S. Miller {
1689cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1690fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1691c1b4a7e6SDavid S. Miller 
1692a02cec21SEric Dumazet 	return skb &&
16930c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1694c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1695a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1696c1b4a7e6SDavid S. Miller }
1697c1b4a7e6SDavid S. Miller 
1698c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1699c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1700c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1701c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1702c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1703c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1704c1b4a7e6SDavid S. Miller  */
1705056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1706c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1707c1b4a7e6SDavid S. Miller {
1708c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1709c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
17109ce01461SIlpo Järvinen 	u8 flags;
1711c1b4a7e6SDavid S. Miller 
1712c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1713c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1714c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1715c1b4a7e6SDavid S. Miller 
1716c4ead4c5SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp);
1717c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1718c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1719c1b4a7e6SDavid S. Miller 
17203ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
17213ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1722b60b49eaSHerbert Xu 	buff->truesize += nlen;
1723c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1724c1b4a7e6SDavid S. Miller 
1725c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1726c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1727c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1728c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1729c1b4a7e6SDavid S. Miller 
1730c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
17314de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
17324de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
17334de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1734c1b4a7e6SDavid S. Miller 
1735c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1736c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1737c1b4a7e6SDavid S. Miller 
173884fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1739c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1740c1b4a7e6SDavid S. Miller 
1741c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1742846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1743846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1744c1b4a7e6SDavid S. Miller 
1745c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1746c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1747fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1748c1b4a7e6SDavid S. Miller 
1749c1b4a7e6SDavid S. Miller 	return 0;
1750c1b4a7e6SDavid S. Miller }
1751c1b4a7e6SDavid S. Miller 
1752c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1753c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1754c1b4a7e6SDavid S. Miller  *
1755c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1756c1b4a7e6SDavid S. Miller  */
1757a2a385d6SEric Dumazet static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1758c1b4a7e6SDavid S. Miller {
17599e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
17606687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1761c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1762ad9f4f50SEric Dumazet 	int win_divisor;
1763c1b4a7e6SDavid S. Miller 
17644de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1765ae8064acSJohn Heffner 		goto send_now;
1766c1b4a7e6SDavid S. Miller 
17676687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1768ae8064acSJohn Heffner 		goto send_now;
1769ae8064acSJohn Heffner 
1770ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1771bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1772a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1773ae8064acSJohn Heffner 		goto send_now;
1774908a75c1SDavid S. Miller 
1775c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1776c1b4a7e6SDavid S. Miller 
1777056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1778c1b4a7e6SDavid S. Miller 
177990840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1780c1b4a7e6SDavid S. Miller 
1781c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1782c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1783c1b4a7e6SDavid S. Miller 
1784c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1785c1b4a7e6SDavid S. Miller 
1786ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
17871485348dSBen Hutchings 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
17881485348dSBen Hutchings 			   sk->sk_gso_max_segs * tp->mss_cache))
1789ae8064acSJohn Heffner 		goto send_now;
1790ba244fe9SDavid S. Miller 
179162ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
179262ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
179362ad2761SIlpo Järvinen 		goto send_now;
179462ad2761SIlpo Järvinen 
1795ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1796ad9f4f50SEric Dumazet 	if (win_divisor) {
1797c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1798c1b4a7e6SDavid S. Miller 
1799c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1800c1b4a7e6SDavid S. Miller 		 * just use it.
1801c1b4a7e6SDavid S. Miller 		 */
1802ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1803c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1804ae8064acSJohn Heffner 			goto send_now;
1805c1b4a7e6SDavid S. Miller 	} else {
1806c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1807c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1808c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1809c1b4a7e6SDavid S. Miller 		 * then send now.
1810c1b4a7e6SDavid S. Miller 		 */
18116b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1812ae8064acSJohn Heffner 			goto send_now;
1813c1b4a7e6SDavid S. Miller 	}
1814c1b4a7e6SDavid S. Miller 
1815c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1816ae8064acSJohn Heffner 	tp->tso_deferred = 1 | (jiffies << 1);
1817ae8064acSJohn Heffner 
1818a2a385d6SEric Dumazet 	return true;
1819ae8064acSJohn Heffner 
1820ae8064acSJohn Heffner send_now:
1821ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1822a2a385d6SEric Dumazet 	return false;
1823c1b4a7e6SDavid S. Miller }
1824c1b4a7e6SDavid S. Miller 
18255d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
182667edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
182767edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
182867edfef7SAndi Kleen  * changes resulting in larger path MTUs.
182967edfef7SAndi Kleen  *
18305d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
18315d424d5aSJohn Heffner  *         1 if a probe was sent,
1832056834d9SIlpo Järvinen  *         -1 otherwise
1833056834d9SIlpo Järvinen  */
18345d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
18355d424d5aSJohn Heffner {
18365d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
18375d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
18385d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
18395d424d5aSJohn Heffner 	int len;
18405d424d5aSJohn Heffner 	int probe_size;
184191cc17c0SIlpo Järvinen 	int size_needed;
18425d424d5aSJohn Heffner 	int copy;
18435d424d5aSJohn Heffner 	int mss_now;
18445d424d5aSJohn Heffner 
18455d424d5aSJohn Heffner 	/* Not currently probing/verifying,
18465d424d5aSJohn Heffner 	 * not in recovery,
18475d424d5aSJohn Heffner 	 * have enough cwnd, and
18485d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
18495d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
18505d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
18515d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
18525d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1853cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
18545d424d5aSJohn Heffner 		return -1;
18555d424d5aSJohn Heffner 
18565d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
18570c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
18585d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
185991cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
18605d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
18615d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
18625d424d5aSJohn Heffner 		return -1;
18635d424d5aSJohn Heffner 	}
18645d424d5aSJohn Heffner 
18655d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
18667f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
18675d424d5aSJohn Heffner 		return -1;
18685d424d5aSJohn Heffner 
186991cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
18705d424d5aSJohn Heffner 		return -1;
187190840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
18725d424d5aSJohn Heffner 		return 0;
18735d424d5aSJohn Heffner 
1874d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1875d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1876d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
18775d424d5aSJohn Heffner 			return -1;
18785d424d5aSJohn Heffner 		else
18795d424d5aSJohn Heffner 			return 0;
18805d424d5aSJohn Heffner 	}
18815d424d5aSJohn Heffner 
18825d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
18835d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
18845d424d5aSJohn Heffner 		return -1;
18853ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
18863ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
18875d424d5aSJohn Heffner 
1888fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
18895d424d5aSJohn Heffner 
18905d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
18915d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
18924de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
18935d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
18945d424d5aSJohn Heffner 	nskb->csum = 0;
189584fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
18965d424d5aSJohn Heffner 
189750c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
189850c4817eSIlpo Järvinen 
18995d424d5aSJohn Heffner 	len = 0;
1900234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
19015d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
19025d424d5aSJohn Heffner 		if (nskb->ip_summed)
19035d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
19045d424d5aSJohn Heffner 		else
19055d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1906056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1907056834d9SIlpo Järvinen 							    copy, nskb->csum);
19085d424d5aSJohn Heffner 
19095d424d5aSJohn Heffner 		if (skb->len <= copy) {
19105d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
19115d424d5aSJohn Heffner 			 * Throw it away. */
19124de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1913fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
19143ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
19155d424d5aSJohn Heffner 		} else {
19164de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1917a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
19185d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
19195d424d5aSJohn Heffner 				skb_pull(skb, copy);
192084fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1921056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1922056834d9SIlpo Järvinen 								 skb->len, 0);
19235d424d5aSJohn Heffner 			} else {
19245d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
19255d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
19265d424d5aSJohn Heffner 			}
19275d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
19285d424d5aSJohn Heffner 		}
19295d424d5aSJohn Heffner 
19305d424d5aSJohn Heffner 		len += copy;
1931234b6860SIlpo Järvinen 
1932234b6860SIlpo Järvinen 		if (len >= probe_size)
1933234b6860SIlpo Järvinen 			break;
19345d424d5aSJohn Heffner 	}
19355d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
19365d424d5aSJohn Heffner 
19375d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
19385d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
19395d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
19405d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
19415d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
19425d424d5aSJohn Heffner 		 * effectively two packets. */
19435d424d5aSJohn Heffner 		tp->snd_cwnd--;
194466f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
19455d424d5aSJohn Heffner 
19465d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
19470e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
19480e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
19495d424d5aSJohn Heffner 
19505d424d5aSJohn Heffner 		return 1;
19515d424d5aSJohn Heffner 	}
19525d424d5aSJohn Heffner 
19535d424d5aSJohn Heffner 	return -1;
19545d424d5aSJohn Heffner }
19555d424d5aSJohn Heffner 
19561da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
19571da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
19581da177e4SLinus Torvalds  * window for us.
19591da177e4SLinus Torvalds  *
1960f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1961f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1962f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1963f8269a49SIlpo Järvinen  *
1964*6ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
1965*6ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
1966*6ba8a3b1SNandita Dukkipati 
1967a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
1968a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
19691da177e4SLinus Torvalds  */
1970a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1971d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
19721da177e4SLinus Torvalds {
19731da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
197492df7b51SDavid S. Miller 	struct sk_buff *skb;
1975c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1976c1b4a7e6SDavid S. Miller 	int cwnd_quota;
19775d424d5aSJohn Heffner 	int result;
19781da177e4SLinus Torvalds 
1979c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
19805d424d5aSJohn Heffner 
1981d5dd9175SIlpo Järvinen 	if (!push_one) {
19825d424d5aSJohn Heffner 		/* Do MTU probing. */
1983d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1984d5dd9175SIlpo Järvinen 		if (!result) {
1985a2a385d6SEric Dumazet 			return false;
19865d424d5aSJohn Heffner 		} else if (result > 0) {
19875d424d5aSJohn Heffner 			sent_pkts = 1;
19885d424d5aSJohn Heffner 		}
1989d5dd9175SIlpo Järvinen 	}
19905d424d5aSJohn Heffner 
1991fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1992c8ac3774SHerbert Xu 		unsigned int limit;
1993c8ac3774SHerbert Xu 
199446d3ceabSEric Dumazet 
1995b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1996c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1997c1b4a7e6SDavid S. Miller 
1998ec342325SAndrew Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1999ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
2000ec342325SAndrew Vagin 
2001b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
2002*6ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
2003*6ba8a3b1SNandita Dukkipati 			if (push_one == 2)
2004*6ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
2005*6ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
2006*6ba8a3b1SNandita Dukkipati 			else
2007b68e9f85SHerbert Xu 				break;
2008*6ba8a3b1SNandita Dukkipati 		}
2009b68e9f85SHerbert Xu 
2010b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
2011b68e9f85SHerbert Xu 			break;
2012b68e9f85SHerbert Xu 
2013c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
2014aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2015aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2016aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2017aa93466bSDavid S. Miller 				break;
2018c1b4a7e6SDavid S. Miller 		} else {
2019d5dd9175SIlpo Järvinen 			if (!push_one && tcp_tso_should_defer(sk, skb))
2020aa93466bSDavid S. Miller 				break;
2021c1b4a7e6SDavid S. Miller 		}
2022aa93466bSDavid S. Miller 
202346d3ceabSEric Dumazet 		/* TSQ : sk_wmem_alloc accounts skb truesize,
202446d3ceabSEric Dumazet 		 * including skb overhead. But thats OK.
202546d3ceabSEric Dumazet 		 */
202646d3ceabSEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
202746d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
202846d3ceabSEric Dumazet 			break;
202946d3ceabSEric Dumazet 		}
2030c8ac3774SHerbert Xu 		limit = mss_now;
2031f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
20320e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
20331485348dSBen Hutchings 						    min_t(unsigned int,
20341485348dSBen Hutchings 							  cwnd_quota,
20351485348dSBen Hutchings 							  sk->sk_gso_max_segs));
2036c8ac3774SHerbert Xu 
2037c8ac3774SHerbert Xu 		if (skb->len > limit &&
2038c4ead4c5SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
20391da177e4SLinus Torvalds 			break;
20401da177e4SLinus Torvalds 
20411da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
2042c1b4a7e6SDavid S. Miller 
2043d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
20441da177e4SLinus Torvalds 			break;
20451da177e4SLinus Torvalds 
2046ec342325SAndrew Vagin repair:
20471da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
20481da177e4SLinus Torvalds 		 * This call will increment packets_out.
20491da177e4SLinus Torvalds 		 */
205066f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2053a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2054d5dd9175SIlpo Järvinen 
2055d5dd9175SIlpo Järvinen 		if (push_one)
2056d5dd9175SIlpo Järvinen 			break;
20571da177e4SLinus Torvalds 	}
20581da177e4SLinus Torvalds 
2059aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2060684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2061684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
2062*6ba8a3b1SNandita Dukkipati 
2063*6ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
2064*6ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2065*6ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
20669e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
2067a2a385d6SEric Dumazet 		return false;
20681da177e4SLinus Torvalds 	}
2069*6ba8a3b1SNandita Dukkipati 	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
2070*6ba8a3b1SNandita Dukkipati }
2071*6ba8a3b1SNandita Dukkipati 
2072*6ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
2073*6ba8a3b1SNandita Dukkipati {
2074*6ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
2075*6ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2076*6ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
2077*6ba8a3b1SNandita Dukkipati 	u32 rtt = tp->srtt >> 3;
2078*6ba8a3b1SNandita Dukkipati 
2079*6ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
2080*6ba8a3b1SNandita Dukkipati 		return false;
2081*6ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
2082*6ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2083*6ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
2084*6ba8a3b1SNandita Dukkipati 		return false;
2085*6ba8a3b1SNandita Dukkipati 	}
2086*6ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
2087*6ba8a3b1SNandita Dukkipati 	 * finishes.
2088*6ba8a3b1SNandita Dukkipati 	 */
2089*6ba8a3b1SNandita Dukkipati 	if (sk->sk_state == TCP_SYN_RECV)
2090*6ba8a3b1SNandita Dukkipati 		return false;
2091*6ba8a3b1SNandita Dukkipati 
2092*6ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
2093*6ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2094*6ba8a3b1SNandita Dukkipati 		return false;
2095*6ba8a3b1SNandita Dukkipati 
2096*6ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2097*6ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
2098*6ba8a3b1SNandita Dukkipati 	 */
2099*6ba8a3b1SNandita Dukkipati 	if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
2100*6ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
2101*6ba8a3b1SNandita Dukkipati 		return false;
2102*6ba8a3b1SNandita Dukkipati 
2103*6ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
2104*6ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
2105*6ba8a3b1SNandita Dukkipati 		return false;
2106*6ba8a3b1SNandita Dukkipati 
2107*6ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
2108*6ba8a3b1SNandita Dukkipati 	 * for delayed ack when there's one outstanding packet.
2109*6ba8a3b1SNandita Dukkipati 	 */
2110*6ba8a3b1SNandita Dukkipati 	timeout = rtt << 1;
2111*6ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
2112*6ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
2113*6ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
2114*6ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2115*6ba8a3b1SNandita Dukkipati 
2116*6ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
2117*6ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
2118*6ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
2119*6ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
2120*6ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
2121*6ba8a3b1SNandita Dukkipati 		if (delta > 0)
2122*6ba8a3b1SNandita Dukkipati 			timeout = delta;
2123*6ba8a3b1SNandita Dukkipati 	}
2124*6ba8a3b1SNandita Dukkipati 
2125*6ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2126*6ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
2127*6ba8a3b1SNandita Dukkipati 	return true;
2128*6ba8a3b1SNandita Dukkipati }
2129*6ba8a3b1SNandita Dukkipati 
2130*6ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else
2131*6ba8a3b1SNandita Dukkipati  * retransmit the last segment.
2132*6ba8a3b1SNandita Dukkipati  */
2133*6ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
2134*6ba8a3b1SNandita Dukkipati {
2135*6ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
2136*6ba8a3b1SNandita Dukkipati 	int pcount;
2137*6ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
2138*6ba8a3b1SNandita Dukkipati 	int err = -1;
2139*6ba8a3b1SNandita Dukkipati 
2140*6ba8a3b1SNandita Dukkipati 	if (tcp_send_head(sk) != NULL) {
2141*6ba8a3b1SNandita Dukkipati 		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2142*6ba8a3b1SNandita Dukkipati 		goto rearm_timer;
2143*6ba8a3b1SNandita Dukkipati 	}
2144*6ba8a3b1SNandita Dukkipati 
2145*6ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
2146*6ba8a3b1SNandita Dukkipati 	skb = tcp_write_queue_tail(sk);
2147*6ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
2148*6ba8a3b1SNandita Dukkipati 		goto rearm_timer;
2149*6ba8a3b1SNandita Dukkipati 
2150*6ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
2151*6ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
2152*6ba8a3b1SNandita Dukkipati 		goto rearm_timer;
2153*6ba8a3b1SNandita Dukkipati 
2154*6ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2155*6ba8a3b1SNandita Dukkipati 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
2156*6ba8a3b1SNandita Dukkipati 			goto rearm_timer;
2157*6ba8a3b1SNandita Dukkipati 		skb = tcp_write_queue_tail(sk);
2158*6ba8a3b1SNandita Dukkipati 	}
2159*6ba8a3b1SNandita Dukkipati 
2160*6ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2161*6ba8a3b1SNandita Dukkipati 		goto rearm_timer;
2162*6ba8a3b1SNandita Dukkipati 
2163*6ba8a3b1SNandita Dukkipati 	/* Probe with zero data doesn't trigger fast recovery. */
2164*6ba8a3b1SNandita Dukkipati 	if (skb->len > 0)
2165*6ba8a3b1SNandita Dukkipati 		err = __tcp_retransmit_skb(sk, skb);
2166*6ba8a3b1SNandita Dukkipati 
2167*6ba8a3b1SNandita Dukkipati rearm_timer:
2168*6ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2169*6ba8a3b1SNandita Dukkipati 				  inet_csk(sk)->icsk_rto,
2170*6ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
2171*6ba8a3b1SNandita Dukkipati 
2172*6ba8a3b1SNandita Dukkipati 	if (likely(!err))
2173*6ba8a3b1SNandita Dukkipati 		NET_INC_STATS_BH(sock_net(sk),
2174*6ba8a3b1SNandita Dukkipati 				 LINUX_MIB_TCPLOSSPROBES);
2175*6ba8a3b1SNandita Dukkipati 	return;
21761da177e4SLinus Torvalds }
21771da177e4SLinus Torvalds 
2178a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2179a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2180a762a980SDavid S. Miller  * The socket must be locked by the caller.
2181a762a980SDavid S. Miller  */
21829e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
21839e412ba7SIlpo Järvinen 			       int nonagle)
2184a762a980SDavid S. Miller {
2185726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2186726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2187726e07a8SIlpo Järvinen 	 * all will be happy.
2188726e07a8SIlpo Järvinen 	 */
2189726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2190726e07a8SIlpo Järvinen 		return;
2191726e07a8SIlpo Järvinen 
219299a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
219399a1dec7SMel Gorman 			   sk_gfp_atomic(sk, GFP_ATOMIC)))
21949e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2195a762a980SDavid S. Miller }
2196a762a980SDavid S. Miller 
2197c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2198c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2199c1b4a7e6SDavid S. Miller  */
2200c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2201c1b4a7e6SDavid S. Miller {
2202fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2203c1b4a7e6SDavid S. Miller 
2204c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2205c1b4a7e6SDavid S. Miller 
2206d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2207c1b4a7e6SDavid S. Miller }
2208c1b4a7e6SDavid S. Miller 
22091da177e4SLinus Torvalds /* This function returns the amount that we can raise the
22101da177e4SLinus Torvalds  * usable window based on the following constraints
22111da177e4SLinus Torvalds  *
22121da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
22131da177e4SLinus Torvalds  * 2. We limit memory per socket
22141da177e4SLinus Torvalds  *
22151da177e4SLinus Torvalds  * RFC 1122:
22161da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
22171da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
22181da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
22191da177e4SLinus Torvalds  *
22201da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
22211da177e4SLinus Torvalds  * it at least MSS bytes.
22221da177e4SLinus Torvalds  *
22231da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
22241da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
22251da177e4SLinus Torvalds  *
22261da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
22271da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
22281da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
22291da177e4SLinus Torvalds  * window to always advance by a single byte.
22301da177e4SLinus Torvalds  *
22311da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
22321da177e4SLinus Torvalds  * then this will not be a problem.
22331da177e4SLinus Torvalds  *
22341da177e4SLinus Torvalds  * BSD seems to make the following compromise:
22351da177e4SLinus Torvalds  *
22361da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
22371da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
22381da177e4SLinus Torvalds  *	then set the window to 0.
22391da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
22401da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
22411da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
22421da177e4SLinus Torvalds  *
22431da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
22441da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
22451da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
22461da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
22471da177e4SLinus Torvalds  * because the pipeline is full.
22481da177e4SLinus Torvalds  *
22491da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
22501da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
22511da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
22521da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
22531da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
22541da177e4SLinus Torvalds  *
22551da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
22561da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
22571da177e4SLinus Torvalds  *
22581da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
22591da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
22601da177e4SLinus Torvalds  */
22611da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
22621da177e4SLinus Torvalds {
2263463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
22641da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2265caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
22661da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
22671da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
22681da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
22691da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
22701da177e4SLinus Torvalds 	 */
2271463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
22721da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
22731da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
22741da177e4SLinus Torvalds 	int window;
22751da177e4SLinus Torvalds 
22761da177e4SLinus Torvalds 	if (mss > full_space)
22771da177e4SLinus Torvalds 		mss = full_space;
22781da177e4SLinus Torvalds 
2279b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2280463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
22811da177e4SLinus Torvalds 
2282180d8cd9SGlauber Costa 		if (sk_under_memory_pressure(sk))
2283056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2284056834d9SIlpo Järvinen 					       4U * tp->advmss);
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds 		if (free_space < mss)
22871da177e4SLinus Torvalds 			return 0;
22881da177e4SLinus Torvalds 	}
22891da177e4SLinus Torvalds 
22901da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
22911da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
22941da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
22951da177e4SLinus Torvalds 	 */
22961da177e4SLinus Torvalds 	window = tp->rcv_wnd;
22971da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
22981da177e4SLinus Torvalds 		window = free_space;
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
23011da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
23021da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
23031da177e4SLinus Torvalds 		 */
23041da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
23051da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
23061da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
23071da177e4SLinus Torvalds 	} else {
23081da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
23091da177e4SLinus Torvalds 		 * Window clamp already applied above.
23101da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
23111da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
23121da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
23131da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
23141da177e4SLinus Torvalds 		 * is too small.
23151da177e4SLinus Torvalds 		 */
23161da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
23171da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
231884565070SJohn Heffner 		else if (mss == full_space &&
2319b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
232084565070SJohn Heffner 			window = free_space;
23211da177e4SLinus Torvalds 	}
23221da177e4SLinus Torvalds 
23231da177e4SLinus Torvalds 	return window;
23241da177e4SLinus Torvalds }
23251da177e4SLinus Torvalds 
23264a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
23274a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
23281da177e4SLinus Torvalds {
23291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2330fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2331058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
23321da177e4SLinus Torvalds 
2333058dc334SIlpo Järvinen 	skb_size = skb->len;
2334058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
23351da177e4SLinus Torvalds 
2336058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
23371da177e4SLinus Torvalds 
23386859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2339a6963a6bSIlpo Järvinen 
2340fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
23411da177e4SLinus Torvalds 
2342058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
23431a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
23441da177e4SLinus Torvalds 
234552d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
234652d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
23471da177e4SLinus Torvalds 
234884fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
23491da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
23501da177e4SLinus Torvalds 
23511da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
23521da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
23531da177e4SLinus Torvalds 
2354e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
23554de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
23561da177e4SLinus Torvalds 
23571da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
23581da177e4SLinus Torvalds 	 * packet counting does not break.
23591da177e4SLinus Torvalds 	 */
23604828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2361b7689205SIlpo Järvinen 
2362b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2363ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2364ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2365ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2366b7689205SIlpo Järvinen 
2367797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2368797108d1SIlpo Järvinen 
23693ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
23701da177e4SLinus Torvalds }
23711da177e4SLinus Torvalds 
237267edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2373a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
23744a17fc3aSIlpo Järvinen {
23754a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2376a2a385d6SEric Dumazet 		return false;
23774a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
23784a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2379a2a385d6SEric Dumazet 		return false;
23804a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2381a2a385d6SEric Dumazet 		return false;
23824a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2383a2a385d6SEric Dumazet 		return false;
23844a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
23854a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2386a2a385d6SEric Dumazet 		return false;
23874a17fc3aSIlpo Järvinen 
2388a2a385d6SEric Dumazet 	return true;
23894a17fc3aSIlpo Järvinen }
23904a17fc3aSIlpo Järvinen 
239167edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
239267edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
239367edfef7SAndi Kleen  */
23944a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
23954a17fc3aSIlpo Järvinen 				     int space)
23964a17fc3aSIlpo Järvinen {
23974a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
23984a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2399a2a385d6SEric Dumazet 	bool first = true;
24004a17fc3aSIlpo Järvinen 
24014a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
24024a17fc3aSIlpo Järvinen 		return;
24034de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
24044a17fc3aSIlpo Järvinen 		return;
24054a17fc3aSIlpo Järvinen 
24064a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
24074a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
24084a17fc3aSIlpo Järvinen 			break;
24094a17fc3aSIlpo Järvinen 
24104a17fc3aSIlpo Järvinen 		space -= skb->len;
24114a17fc3aSIlpo Järvinen 
24124a17fc3aSIlpo Järvinen 		if (first) {
2413a2a385d6SEric Dumazet 			first = false;
24144a17fc3aSIlpo Järvinen 			continue;
24154a17fc3aSIlpo Järvinen 		}
24164a17fc3aSIlpo Järvinen 
24174a17fc3aSIlpo Järvinen 		if (space < 0)
24184a17fc3aSIlpo Järvinen 			break;
24194a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
24204a17fc3aSIlpo Järvinen 		 * the data in the second
24214a17fc3aSIlpo Järvinen 		 */
2422a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
24234a17fc3aSIlpo Järvinen 			break;
24244a17fc3aSIlpo Järvinen 
24254a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
24264a17fc3aSIlpo Järvinen 			break;
24274a17fc3aSIlpo Järvinen 
24284a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
24294a17fc3aSIlpo Järvinen 	}
24304a17fc3aSIlpo Järvinen }
24314a17fc3aSIlpo Järvinen 
24321da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
24331da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
24341da177e4SLinus Torvalds  * error occurred which prevented the send.
24351da177e4SLinus Torvalds  */
243693b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
24371da177e4SLinus Torvalds {
24381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24395d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
24407d227cd2SSridhar Samudrala 	unsigned int cur_mss;
24411da177e4SLinus Torvalds 
24425d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
24435d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
24445d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
24455d424d5aSJohn Heffner 	}
24465d424d5aSJohn Heffner 
24471da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2448caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
24491da177e4SLinus Torvalds 	 */
24501da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
24511da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
24521da177e4SLinus Torvalds 		return -EAGAIN;
24531da177e4SLinus Torvalds 
24541da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
24551da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
24561da177e4SLinus Torvalds 			BUG();
24571da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
24581da177e4SLinus Torvalds 			return -ENOMEM;
24591da177e4SLinus Torvalds 	}
24601da177e4SLinus Torvalds 
24617d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
24627d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
24637d227cd2SSridhar Samudrala 
24640c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
24657d227cd2SSridhar Samudrala 
24661da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
24671da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
24681da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
24691da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
24701da177e4SLinus Torvalds 	 */
24719d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
24729d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
24731da177e4SLinus Torvalds 		return -EAGAIN;
24741da177e4SLinus Torvalds 
24751da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
2476846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
24771da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
247802276f3cSIlpo Järvinen 	} else {
24799eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
24809eb9362eSIlpo Järvinen 
24819eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
248202276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
24839eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
24849eb9362eSIlpo Järvinen 		}
24851da177e4SLinus Torvalds 	}
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
24881da177e4SLinus Torvalds 
24891da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
24901da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
24911da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
24921da177e4SLinus Torvalds 	 */
24931da177e4SLinus Torvalds 	if (skb->len > 0 &&
24944de075e0SEric Dumazet 	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
24951da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
24961da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
2497e870a8efSIlpo Järvinen 			/* Reuse, even though it does some unnecessary work */
2498e870a8efSIlpo Järvinen 			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
24994de075e0SEric Dumazet 					     TCP_SKB_CB(skb)->tcp_flags);
25001da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
25011da177e4SLinus Torvalds 		}
25021da177e4SLinus Torvalds 	}
25031da177e4SLinus Torvalds 
25041da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
25051da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
25061da177e4SLinus Torvalds 	 */
25071da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
25081da177e4SLinus Torvalds 
2509117632e6SEric Dumazet 	/* make sure skb->data is aligned on arches that require it */
2510117632e6SEric Dumazet 	if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
2511117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2512117632e6SEric Dumazet 						   GFP_ATOMIC);
251393b174adSYuchung Cheng 		return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2514117632e6SEric Dumazet 			      -ENOBUFS;
2515117632e6SEric Dumazet 	} else {
251693b174adSYuchung Cheng 		return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2517117632e6SEric Dumazet 	}
251893b174adSYuchung Cheng }
251993b174adSYuchung Cheng 
252093b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
252193b174adSYuchung Cheng {
252293b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
252393b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds 	if (err == 0) {
25261da177e4SLinus Torvalds 		/* Update global TCP statistics. */
252781cc8a75SPavel Emelyanov 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
25281da177e4SLinus Torvalds 
25291da177e4SLinus Torvalds 		tp->total_retrans++;
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
25321da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2533e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
25341da177e4SLinus Torvalds 		}
25351da177e4SLinus Torvalds #endif
2536b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2537b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
25381da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
25391da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
25401da177e4SLinus Torvalds 
25411da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
25421da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
25431da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
25441da177e4SLinus Torvalds 
2545c24f691bSYuchung Cheng 		tp->undo_retrans += tcp_skb_pcount(skb);
25461da177e4SLinus Torvalds 
25471da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
25481da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
25491da177e4SLinus Torvalds 		 */
25501da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
25511da177e4SLinus Torvalds 	}
25521da177e4SLinus Torvalds 	return err;
25531da177e4SLinus Torvalds }
25541da177e4SLinus Torvalds 
255567edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
255667edfef7SAndi Kleen  * window/congestion state.
255767edfef7SAndi Kleen  */
2558a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2559b5afe7bcSIlpo Järvinen {
2560b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2561cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2562b5afe7bcSIlpo Järvinen 
2563b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2564b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2565a2a385d6SEric Dumazet 		return false;
2566b5afe7bcSIlpo Järvinen 
2567b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2568b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2569a2a385d6SEric Dumazet 		return false;
2570b5afe7bcSIlpo Järvinen 
2571b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2572b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2573b5afe7bcSIlpo Järvinen 	 *
2574b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2575b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2576b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2577b5afe7bcSIlpo Järvinen 	 */
2578b5afe7bcSIlpo Järvinen 
2579b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2580a2a385d6SEric Dumazet 		return false;
2581b5afe7bcSIlpo Järvinen 
2582a2a385d6SEric Dumazet 	return true;
2583b5afe7bcSIlpo Järvinen }
2584b5afe7bcSIlpo Järvinen 
25851da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
25861da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
25871da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
25881da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
25891da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
25901da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
25911da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
25921da177e4SLinus Torvalds  */
25931da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
25941da177e4SLinus Torvalds {
25956687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
25961da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25971da177e4SLinus Torvalds 	struct sk_buff *skb;
25980e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2599618d9f25SIlpo Järvinen 	u32 last_lost;
260061eb55f4SIlpo Järvinen 	int mib_idx;
26010e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
26026a438bbeSStephen Hemminger 
260345e77d31SIlpo Järvinen 	if (!tp->packets_out)
260445e77d31SIlpo Järvinen 		return;
260545e77d31SIlpo Järvinen 
260608ebd172SIlpo Järvinen 	if (!tp->lost_out)
260708ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
260808ebd172SIlpo Järvinen 
2609618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
26106a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2611618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2612618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2613618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2614618d9f25SIlpo Järvinen 	} else {
2615fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2616618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2617618d9f25SIlpo Järvinen 	}
26181da177e4SLinus Torvalds 
2619fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
26201da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
26211da177e4SLinus Torvalds 
2622fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2623fe067e8aSDavid S. Miller 			break;
26246a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
26250e1c54c2SIlpo Järvinen 		if (hole == NULL)
26266a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
26276a438bbeSStephen Hemminger 
26281da177e4SLinus Torvalds 		/* Assume this retransmit will generate
26291da177e4SLinus Torvalds 		 * only one packet for congestion window
26301da177e4SLinus Torvalds 		 * calculation purposes.  This works because
26311da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
26321da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
26331da177e4SLinus Torvalds 		 * packet counting works out.
26341da177e4SLinus Torvalds 		 */
26351da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
26361da177e4SLinus Torvalds 			return;
26370e1c54c2SIlpo Järvinen 
26380e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
26390e1c54c2SIlpo Järvinen begin_fwd:
26400e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2641006f582cSIlpo Järvinen 				break;
26420e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
26430e1c54c2SIlpo Järvinen 
26440e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2645618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
26460e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
26470e1c54c2SIlpo Järvinen 				break;
26480e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
26490e1c54c2SIlpo Järvinen 			if (hole != NULL) {
26500e1c54c2SIlpo Järvinen 				skb = hole;
26510e1c54c2SIlpo Järvinen 				hole = NULL;
26520e1c54c2SIlpo Järvinen 			}
26530e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
26540e1c54c2SIlpo Järvinen 			goto begin_fwd;
26550e1c54c2SIlpo Järvinen 
26560e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2657ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
26580e1c54c2SIlpo Järvinen 				hole = skb;
265961eb55f4SIlpo Järvinen 			continue;
26601da177e4SLinus Torvalds 
26610e1c54c2SIlpo Järvinen 		} else {
2662618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
26630e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
26640e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
26650e1c54c2SIlpo Järvinen 			else
26660e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
26670e1c54c2SIlpo Järvinen 		}
26680e1c54c2SIlpo Järvinen 
26690e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
267061eb55f4SIlpo Järvinen 			continue;
267140b215e5SPavel Emelyanov 
267209e9b813SEric Dumazet 		if (tcp_retransmit_skb(sk, skb)) {
267309e9b813SEric Dumazet 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
26741da177e4SLinus Torvalds 			return;
267509e9b813SEric Dumazet 		}
2676de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
26771da177e4SLinus Torvalds 
2678684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2679a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2680a262f0cdSNandita Dukkipati 
2681fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2682463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
26833f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
26843f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
26851da177e4SLinus Torvalds 	}
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
26891da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
26901da177e4SLinus Torvalds  */
26911da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
26921da177e4SLinus Torvalds {
26931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2694fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
26951da177e4SLinus Torvalds 	int mss_now;
26961da177e4SLinus Torvalds 
26971da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
26981da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
26991da177e4SLinus Torvalds 	 * and IP options.
27001da177e4SLinus Torvalds 	 */
27010c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
27021da177e4SLinus Torvalds 
2703fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
27044de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
27051da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
27061da177e4SLinus Torvalds 		tp->write_seq++;
27071da177e4SLinus Torvalds 	} else {
27081da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
27091da177e4SLinus Torvalds 		for (;;) {
2710aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2711aa133076SWu Fengguang 					       sk->sk_allocation);
27121da177e4SLinus Torvalds 			if (skb)
27131da177e4SLinus Torvalds 				break;
27141da177e4SLinus Torvalds 			yield();
27151da177e4SLinus Torvalds 		}
27161da177e4SLinus Torvalds 
27171da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
27181da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
27191da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2720e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2721a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
27221da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
27231da177e4SLinus Torvalds 	}
27249e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
27251da177e4SLinus Torvalds }
27261da177e4SLinus Torvalds 
27271da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
27281da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
27291da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
273065bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
27311da177e4SLinus Torvalds  */
2732dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
27331da177e4SLinus Torvalds {
27341da177e4SLinus Torvalds 	struct sk_buff *skb;
27351da177e4SLinus Torvalds 
27361da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
27371da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
27381da177e4SLinus Torvalds 	if (!skb) {
27394e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
27401da177e4SLinus Torvalds 		return;
27411da177e4SLinus Torvalds 	}
27421da177e4SLinus Torvalds 
27431da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
27441da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2745e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2746a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
27471da177e4SLinus Torvalds 	/* Send it off. */
27481da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2749dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
27504e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
275126af65cbSSridhar Samudrala 
275281cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
27531da177e4SLinus Torvalds }
27541da177e4SLinus Torvalds 
275567edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
275667edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
27571da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
27581da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
27591da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
27601da177e4SLinus Torvalds  */
27611da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
27621da177e4SLinus Torvalds {
27631da177e4SLinus Torvalds 	struct sk_buff *skb;
27641da177e4SLinus Torvalds 
2765fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
27664de075e0SEric Dumazet 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
276791df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
27681da177e4SLinus Torvalds 		return -EFAULT;
27691da177e4SLinus Torvalds 	}
27704de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
27711da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
27721da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
27731da177e4SLinus Torvalds 			if (nskb == NULL)
27741da177e4SLinus Torvalds 				return -ENOMEM;
2775fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
27761da177e4SLinus Torvalds 			skb_header_release(nskb);
2777fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
27783ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
27793ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
27803ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
27811da177e4SLinus Torvalds 			skb = nskb;
27821da177e4SLinus Torvalds 		}
27831da177e4SLinus Torvalds 
27844de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
27851da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
27861da177e4SLinus Torvalds 	}
27871da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2788dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
27891da177e4SLinus Torvalds }
27901da177e4SLinus Torvalds 
27914aea39c1SEric Dumazet /**
27924aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
27934aea39c1SEric Dumazet  * sk: listener socket
27944aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
27954aea39c1SEric Dumazet  * req: request_sock pointer
27964aea39c1SEric Dumazet  * rvp: request_values pointer
27974aea39c1SEric Dumazet  *
27984aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
27994aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
28004aea39c1SEric Dumazet  */
28011da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2802e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
28038336886fSJerry Chu 				struct request_values *rvp,
28048336886fSJerry Chu 				struct tcp_fastopen_cookie *foc)
28051da177e4SLinus Torvalds {
2806bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
28074957faadSWilliam Allen Simpson 	struct tcp_extend_values *xvp = tcp_xv(rvp);
28082e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
28091da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
281028b2774aSEric Dumazet 	const struct tcp_cookie_values *cvp = tp->cookie_values;
28111da177e4SLinus Torvalds 	struct tcphdr *th;
28121da177e4SLinus Torvalds 	struct sk_buff *skb;
2813cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2814bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2815f5fff5dcSTom Quetchenbach 	int mss;
281628b2774aSEric Dumazet 	int s_data_desired = 0;
28171da177e4SLinus Torvalds 
281828b2774aSEric Dumazet 	if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
281928b2774aSEric Dumazet 		s_data_desired = cvp->s_data_desired;
282099a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
282199a1dec7SMel Gorman 			sk_gfp_atomic(sk, GFP_ATOMIC));
28224aea39c1SEric Dumazet 	if (unlikely(!skb)) {
28234aea39c1SEric Dumazet 		dst_release(dst);
28241da177e4SLinus Torvalds 		return NULL;
28254aea39c1SEric Dumazet 	}
28261da177e4SLinus Torvalds 	/* Reserve space for headers. */
28271da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
28281da177e4SLinus Torvalds 
28294aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
28301da177e4SLinus Torvalds 
28310dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
2832f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2833f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2834f5fff5dcSTom Quetchenbach 
283533ad798cSAdam Langley 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
283633ad798cSAdam Langley 		__u8 rcv_wscale;
283733ad798cSAdam Langley 		/* Set this up on the first call only */
283833ad798cSAdam Langley 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2839e88c64f0SHagen Paul Pfeifer 
2840e88c64f0SHagen Paul Pfeifer 		/* limit the window selection if the user enforce a smaller rx buffer */
2841e88c64f0SHagen Paul Pfeifer 		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2842e88c64f0SHagen Paul Pfeifer 		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2843e88c64f0SHagen Paul Pfeifer 			req->window_clamp = tcp_full_space(sk);
2844e88c64f0SHagen Paul Pfeifer 
284533ad798cSAdam Langley 		/* tcp_full_space because it is guaranteed to be the first packet */
284633ad798cSAdam Langley 		tcp_select_initial_window(tcp_full_space(sk),
2847f5fff5dcSTom Quetchenbach 			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
284833ad798cSAdam Langley 			&req->rcv_wnd,
284933ad798cSAdam Langley 			&req->window_clamp,
285033ad798cSAdam Langley 			ireq->wscale_ok,
285131d12926Slaurent chavey 			&rcv_wscale,
285231d12926Slaurent chavey 			dst_metric(dst, RTAX_INITRWND));
285333ad798cSAdam Langley 		ireq->rcv_wscale = rcv_wscale;
285433ad798cSAdam Langley 	}
2855cfb6eeb4SYOSHIFUJI Hideaki 
285633ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
28578b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
28588b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
28598b5f12d0SFlorian Westphal 		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
28608b5f12d0SFlorian Westphal 	else
28618b5f12d0SFlorian Westphal #endif
286233ad798cSAdam Langley 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2863f5fff5dcSTom Quetchenbach 	tcp_header_size = tcp_synack_options(sk, req, mss,
28648336886fSJerry Chu 					     skb, &opts, &md5, xvp, foc)
28654957faadSWilliam Allen Simpson 			+ sizeof(*th);
286633ad798cSAdam Langley 
2867aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2868aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
28691da177e4SLinus Torvalds 
2870aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
28711da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
28721da177e4SLinus Torvalds 	th->syn = 1;
28731da177e4SLinus Torvalds 	th->ack = 1;
28741da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
2875a3116ac5SKOVACS Krisztian 	th->source = ireq->loc_port;
28762e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
2877e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2878e870a8efSIlpo Järvinen 	 * not even correctly set)
2879e870a8efSIlpo Järvinen 	 */
2880e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2881a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
28824957faadSWilliam Allen Simpson 
28834957faadSWilliam Allen Simpson 	if (OPTION_COOKIE_EXTENSION & opts.options) {
288428b2774aSEric Dumazet 		if (s_data_desired) {
288528b2774aSEric Dumazet 			u8 *buf = skb_put(skb, s_data_desired);
28864957faadSWilliam Allen Simpson 
28874957faadSWilliam Allen Simpson 			/* copy data directly from the listening socket. */
288828b2774aSEric Dumazet 			memcpy(buf, cvp->s_data_payload, s_data_desired);
288928b2774aSEric Dumazet 			TCP_SKB_CB(skb)->end_seq += s_data_desired;
28904957faadSWilliam Allen Simpson 		}
28914957faadSWilliam Allen Simpson 
28924957faadSWilliam Allen Simpson 		if (opts.hash_size > 0) {
28934957faadSWilliam Allen Simpson 			__u32 workspace[SHA_WORKSPACE_WORDS];
28944957faadSWilliam Allen Simpson 			u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
28954957faadSWilliam Allen Simpson 			u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
28964957faadSWilliam Allen Simpson 
28974957faadSWilliam Allen Simpson 			/* Secret recipe depends on the Timestamp, (future)
28984957faadSWilliam Allen Simpson 			 * Sequence and Acknowledgment Numbers, Initiator
28994957faadSWilliam Allen Simpson 			 * Cookie, and others handled by IP variant caller.
29004957faadSWilliam Allen Simpson 			 */
29014957faadSWilliam Allen Simpson 			*tail-- ^= opts.tsval;
29024957faadSWilliam Allen Simpson 			*tail-- ^= tcp_rsk(req)->rcv_isn + 1;
29034957faadSWilliam Allen Simpson 			*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
29044957faadSWilliam Allen Simpson 
29054957faadSWilliam Allen Simpson 			/* recommended */
29060eae88f3SEric Dumazet 			*tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2907f9a2e69eSDavid S. Miller 			*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
29084957faadSWilliam Allen Simpson 
29094957faadSWilliam Allen Simpson 			sha_transform((__u32 *)&xvp->cookie_bakery[0],
29104957faadSWilliam Allen Simpson 				      (char *)mess,
29114957faadSWilliam Allen Simpson 				      &workspace[0]);
29124957faadSWilliam Allen Simpson 			opts.hash_location =
29134957faadSWilliam Allen Simpson 				(__u8 *)&xvp->cookie_bakery[0];
29144957faadSWilliam Allen Simpson 		}
29154957faadSWilliam Allen Simpson 	}
29164957faadSWilliam Allen Simpson 
29171da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
29188336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
29198336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
29201da177e4SLinus Torvalds 
29211da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2922600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2923bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
29241da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
2925aa2ea058STom Herbert 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2926cfb6eeb4SYOSHIFUJI Hideaki 
2927cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2928cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2929cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2930bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
293149a72dfbSAdam Langley 					       md5, NULL, req, skb);
2932cfb6eeb4SYOSHIFUJI Hideaki 	}
2933cfb6eeb4SYOSHIFUJI Hideaki #endif
2934cfb6eeb4SYOSHIFUJI Hideaki 
29351da177e4SLinus Torvalds 	return skb;
29361da177e4SLinus Torvalds }
29374bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
29381da177e4SLinus Torvalds 
293967edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
2940370816aeSPavel Emelyanov void tcp_connect_init(struct sock *sk)
29411da177e4SLinus Torvalds {
2942cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
29431da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
29441da177e4SLinus Torvalds 	__u8 rcv_wscale;
29451da177e4SLinus Torvalds 
29461da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
29471da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
29481da177e4SLinus Torvalds 	 */
29491da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2950bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
29511da177e4SLinus Torvalds 
2952cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2953cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2954cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2955cfb6eeb4SYOSHIFUJI Hideaki #endif
2956cfb6eeb4SYOSHIFUJI Hideaki 
29571da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
29581da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
29591da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
29601da177e4SLinus Torvalds 	tp->max_window = 0;
29615d424d5aSJohn Heffner 	tcp_mtup_init(sk);
29621da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
29631da177e4SLinus Torvalds 
29641da177e4SLinus Torvalds 	if (!tp->window_clamp)
29651da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
29660dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
2967f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2968f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2969f5fff5dcSTom Quetchenbach 
29701da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
29711da177e4SLinus Torvalds 
2972e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
2973e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2974e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2975e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
2976e88c64f0SHagen Paul Pfeifer 
29771da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
29781da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
29791da177e4SLinus Torvalds 				  &tp->rcv_wnd,
29801da177e4SLinus Torvalds 				  &tp->window_clamp,
2981bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
298231d12926Slaurent chavey 				  &rcv_wscale,
298331d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
29841da177e4SLinus Torvalds 
29851da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
29861da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
29871da177e4SLinus Torvalds 
29881da177e4SLinus Torvalds 	sk->sk_err = 0;
29891da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
29901da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2991ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
29921da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
29931da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
299433f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
2995370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
2996ee995283SPavel Emelyanov 
2997ee995283SPavel Emelyanov 	if (likely(!tp->repair))
29981da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
2999ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3000ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
30011da177e4SLinus Torvalds 
3002463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
3003463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
30041da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
30051da177e4SLinus Torvalds }
30061da177e4SLinus Torvalds 
3007783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3008783237e8SYuchung Cheng {
3009783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3010783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3011783237e8SYuchung Cheng 
3012783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3013783237e8SYuchung Cheng 	skb_header_release(skb);
3014783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
3015783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3016783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3017783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3018783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3019783237e8SYuchung Cheng }
3020783237e8SYuchung Cheng 
3021783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3022783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3023783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3024783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3025783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3026783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3027783237e8SYuchung Cheng  */
3028783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3029783237e8SYuchung Cheng {
3030783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3031783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3032aab48743SYuchung Cheng 	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
3033783237e8SYuchung Cheng 	struct sk_buff *syn_data = NULL, *data;
3034aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
3035783237e8SYuchung Cheng 
303667da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3037aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
3038aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
3039aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
3040aab48743SYuchung Cheng 	if (syn_loss > 1 &&
3041aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
3042aab48743SYuchung Cheng 		fo->cookie.len = -1;
3043aab48743SYuchung Cheng 		goto fallback;
3044aab48743SYuchung Cheng 	}
3045aab48743SYuchung Cheng 
304667da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
304767da22d2SYuchung Cheng 		fo->cookie.len = -1;
304867da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
3049783237e8SYuchung Cheng 		goto fallback;
3050783237e8SYuchung Cheng 
3051783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3052783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3053783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3054783237e8SYuchung Cheng 	 */
3055783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
3056783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
30571b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3058783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3059783237e8SYuchung Cheng 
3060783237e8SYuchung Cheng 	syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
3061783237e8SYuchung Cheng 				   sk->sk_allocation);
3062783237e8SYuchung Cheng 	if (syn_data == NULL)
3063783237e8SYuchung Cheng 		goto fallback;
3064783237e8SYuchung Cheng 
3065783237e8SYuchung Cheng 	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
3066783237e8SYuchung Cheng 		struct iovec *iov = &fo->data->msg_iov[i];
3067783237e8SYuchung Cheng 		unsigned char __user *from = iov->iov_base;
3068783237e8SYuchung Cheng 		int len = iov->iov_len;
3069783237e8SYuchung Cheng 
3070783237e8SYuchung Cheng 		if (syn_data->len + len > space)
3071783237e8SYuchung Cheng 			len = space - syn_data->len;
3072783237e8SYuchung Cheng 		else if (i + 1 == iovlen)
3073783237e8SYuchung Cheng 			/* No more data pending in inet_wait_for_connect() */
3074783237e8SYuchung Cheng 			fo->data = NULL;
3075783237e8SYuchung Cheng 
3076783237e8SYuchung Cheng 		if (skb_add_data(syn_data, from, len))
3077783237e8SYuchung Cheng 			goto fallback;
3078783237e8SYuchung Cheng 	}
3079783237e8SYuchung Cheng 
3080783237e8SYuchung Cheng 	/* Queue a data-only packet after the regular SYN for retransmission */
3081783237e8SYuchung Cheng 	data = pskb_copy(syn_data, sk->sk_allocation);
3082783237e8SYuchung Cheng 	if (data == NULL)
3083783237e8SYuchung Cheng 		goto fallback;
3084783237e8SYuchung Cheng 	TCP_SKB_CB(data)->seq++;
3085783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
3086783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
3087783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, data);
3088783237e8SYuchung Cheng 	fo->copied = data->len;
3089783237e8SYuchung Cheng 
3090783237e8SYuchung Cheng 	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
309167da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
3092783237e8SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
3093783237e8SYuchung Cheng 		goto done;
3094783237e8SYuchung Cheng 	}
3095783237e8SYuchung Cheng 	syn_data = NULL;
3096783237e8SYuchung Cheng 
3097783237e8SYuchung Cheng fallback:
3098783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3099783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3100783237e8SYuchung Cheng 		fo->cookie.len = 0;
3101783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3102783237e8SYuchung Cheng 	if (err)
3103783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3104783237e8SYuchung Cheng 	kfree_skb(syn_data);
3105783237e8SYuchung Cheng done:
3106783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3107783237e8SYuchung Cheng 	return err;
3108783237e8SYuchung Cheng }
3109783237e8SYuchung Cheng 
311067edfef7SAndi Kleen /* Build a SYN and send it off. */
31111da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
31121da177e4SLinus Torvalds {
31131da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31141da177e4SLinus Torvalds 	struct sk_buff *buff;
3115ee586811SEric Paris 	int err;
31161da177e4SLinus Torvalds 
31171da177e4SLinus Torvalds 	tcp_connect_init(sk);
31181da177e4SLinus Torvalds 
31192b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
31202b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
31212b916477SAndrey Vagin 		return 0;
31222b916477SAndrey Vagin 	}
31232b916477SAndrey Vagin 
3124d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
31251da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
31261da177e4SLinus Torvalds 		return -ENOBUFS;
31271da177e4SLinus Torvalds 
31281da177e4SLinus Torvalds 	/* Reserve space for headers. */
31291da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
31301da177e4SLinus Torvalds 
3131a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3132783237e8SYuchung Cheng 	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
3133783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3134e870a8efSIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
31351da177e4SLinus Torvalds 
3136783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3137783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3138783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3139ee586811SEric Paris 	if (err == -ECONNREFUSED)
3140ee586811SEric Paris 		return err;
3141bd37a088SWei Yongjun 
3142bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3143bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3144bd37a088SWei Yongjun 	 */
3145bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3146bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
314781cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
31481da177e4SLinus Torvalds 
31491da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
31503f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
31513f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
31521da177e4SLinus Torvalds 	return 0;
31531da177e4SLinus Torvalds }
31544bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
31551da177e4SLinus Torvalds 
31561da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
31571da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
31581da177e4SLinus Torvalds  * for details.
31591da177e4SLinus Torvalds  */
31601da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
31611da177e4SLinus Torvalds {
3162463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3163463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
31641da177e4SLinus Torvalds 	unsigned long timeout;
31651da177e4SLinus Torvalds 
31661da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3167463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
31681da177e4SLinus Torvalds 		int max_ato = HZ / 2;
31691da177e4SLinus Torvalds 
3170056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3171056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
31721da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
31731da177e4SLinus Torvalds 
31741da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
31751da177e4SLinus Torvalds 
31761da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3177463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
31781da177e4SLinus Torvalds 		 * directly.
31791da177e4SLinus Torvalds 		 */
31801da177e4SLinus Torvalds 		if (tp->srtt) {
31811da177e4SLinus Torvalds 			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
31821da177e4SLinus Torvalds 
31831da177e4SLinus Torvalds 			if (rtt < max_ato)
31841da177e4SLinus Torvalds 				max_ato = rtt;
31851da177e4SLinus Torvalds 		}
31861da177e4SLinus Torvalds 
31871da177e4SLinus Torvalds 		ato = min(ato, max_ato);
31881da177e4SLinus Torvalds 	}
31891da177e4SLinus Torvalds 
31901da177e4SLinus Torvalds 	/* Stay within the limit we were given */
31911da177e4SLinus Torvalds 	timeout = jiffies + ato;
31921da177e4SLinus Torvalds 
31931da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3194463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
31951da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
31961da177e4SLinus Torvalds 		 * send ACK now.
31971da177e4SLinus Torvalds 		 */
3198463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3199463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
32001da177e4SLinus Torvalds 			tcp_send_ack(sk);
32011da177e4SLinus Torvalds 			return;
32021da177e4SLinus Torvalds 		}
32031da177e4SLinus Torvalds 
3204463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3205463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
32061da177e4SLinus Torvalds 	}
3207463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3208463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3209463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
32101da177e4SLinus Torvalds }
32111da177e4SLinus Torvalds 
32121da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
32131da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
32141da177e4SLinus Torvalds {
32151da177e4SLinus Torvalds 	struct sk_buff *buff;
32161da177e4SLinus Torvalds 
3217058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3218058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3219058dc334SIlpo Järvinen 		return;
3220058dc334SIlpo Järvinen 
32211da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
32221da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
32231da177e4SLinus Torvalds 	 * sock.
32241da177e4SLinus Torvalds 	 */
322599a1dec7SMel Gorman 	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
32261da177e4SLinus Torvalds 	if (buff == NULL) {
3227463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3228463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
32293f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
32303f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
32311da177e4SLinus Torvalds 		return;
32321da177e4SLinus Torvalds 	}
32331da177e4SLinus Torvalds 
32341da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
32351da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3236a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
32371da177e4SLinus Torvalds 
32381da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
32391da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
324099a1dec7SMel Gorman 	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
32411da177e4SLinus Torvalds }
32421da177e4SLinus Torvalds 
32431da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
32441da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
32451da177e4SLinus Torvalds  *
32461da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
32471da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
32481da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
32491da177e4SLinus Torvalds  *
32501da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
32511da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
32521da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
32531da177e4SLinus Torvalds  */
32541da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
32551da177e4SLinus Torvalds {
32561da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32571da177e4SLinus Torvalds 	struct sk_buff *skb;
32581da177e4SLinus Torvalds 
32591da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
326099a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
32611da177e4SLinus Torvalds 	if (skb == NULL)
32621da177e4SLinus Torvalds 		return -1;
32631da177e4SLinus Torvalds 
32641da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
32651da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32661da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
32671da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
32681da177e4SLinus Torvalds 	 * send it.
32691da177e4SLinus Torvalds 	 */
3270a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
32711da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
3272dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
32731da177e4SLinus Torvalds }
32741da177e4SLinus Torvalds 
3275ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3276ee995283SPavel Emelyanov {
3277ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3278ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3279c0e88ff0SPavel Emelyanov 		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
3280ee995283SPavel Emelyanov 		tcp_xmit_probe_skb(sk, 0);
3281ee995283SPavel Emelyanov 	}
3282ee995283SPavel Emelyanov }
3283ee995283SPavel Emelyanov 
328467edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
32851da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
32861da177e4SLinus Torvalds {
32871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32881da177e4SLinus Torvalds 	struct sk_buff *skb;
32891da177e4SLinus Torvalds 
3290058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3291058dc334SIlpo Järvinen 		return -1;
3292058dc334SIlpo Järvinen 
3293fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
329490840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
32951da177e4SLinus Torvalds 		int err;
32960c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
329790840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
32981da177e4SLinus Torvalds 
32991da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
33001da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
33011da177e4SLinus Torvalds 
33021da177e4SLinus Torvalds 		/* We are probing the opening of a window
33031da177e4SLinus Torvalds 		 * but the window size is != 0
33041da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
33051da177e4SLinus Torvalds 		 */
33061da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
33071da177e4SLinus Torvalds 		    skb->len > mss) {
33081da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
33094de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3310846998aeSDavid S. Miller 			if (tcp_fragment(sk, skb, seg_size, mss))
33111da177e4SLinus Torvalds 				return -1;
33121da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
3313846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
33141da177e4SLinus Torvalds 
33154de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
33161da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
3317dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
331866f5fe62SIlpo Järvinen 		if (!err)
331966f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
33201da177e4SLinus Torvalds 		return err;
33211da177e4SLinus Torvalds 	} else {
332233f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
33234828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
33241da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
33251da177e4SLinus Torvalds 	}
33261da177e4SLinus Torvalds }
33271da177e4SLinus Torvalds 
33281da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
33291da177e4SLinus Torvalds  * a partial packet else a zero probe.
33301da177e4SLinus Torvalds  */
33311da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
33321da177e4SLinus Torvalds {
3333463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
33341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33351da177e4SLinus Torvalds 	int err;
33361da177e4SLinus Torvalds 
33371da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
33381da177e4SLinus Torvalds 
3339fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
33401da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
33416687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3342463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
33431da177e4SLinus Torvalds 		return;
33441da177e4SLinus Torvalds 	}
33451da177e4SLinus Torvalds 
33461da177e4SLinus Torvalds 	if (err <= 0) {
3347463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3348463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
33496687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3350463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
33513f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
33523f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
33531da177e4SLinus Torvalds 	} else {
33541da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
33556687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
33561da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
33571da177e4SLinus Torvalds 		 *
33581da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
33591da177e4SLinus Torvalds 		 */
33606687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
33616687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3362463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3363463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
33643f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
33653f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
33661da177e4SLinus Torvalds 	}
33671da177e4SLinus Torvalds }
3368