xref: /linux/net/ipv4/tcp_output.c (revision 39bb5e62867de82b269b07df900165029b928359)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */
5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
645d424d5aSJohn Heffner 
6535089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6735089bb2SDavid S. Miller 
68c9bee3b7SEric Dumazet unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
69c9bee3b7SEric Dumazet EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
70c9bee3b7SEric Dumazet 
7146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
7246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
73519855c5SWilliam Allen Simpson 
7467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
75cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
766ff03ac3SIlpo Järvinen {
776ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
786ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
809e412ba7SIlpo Järvinen 
81fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
821da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
838512430eSIlpo Järvinen 
8466f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
856ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
866a5dc9e5SEric Dumazet 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
87750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
881da177e4SLinus Torvalds 	}
89f19c29e3SYuchung Cheng 
90f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
91f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
926a5dc9e5SEric Dumazet }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
951da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
961da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
971da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
981da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
991da177e4SLinus Torvalds  */
100cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
1011da177e4SLinus Torvalds {
102cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1039e412ba7SIlpo Järvinen 
10490840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1051da177e4SLinus Torvalds 		return tp->snd_nxt;
1061da177e4SLinus Torvalds 	else
10790840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1081da177e4SLinus Torvalds }
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1111da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1121da177e4SLinus Torvalds  *
1131da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1141da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1151da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1161da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1171da177e4SLinus Torvalds  *    large MSS.
1181da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1191da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1201da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1211da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1221da177e4SLinus Torvalds  *    probably even Jumbo".
1231da177e4SLinus Torvalds  */
1241da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1251da177e4SLinus Torvalds {
1261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
127cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1281da177e4SLinus Torvalds 	int mss = tp->advmss;
1291da177e4SLinus Torvalds 
1300dbaee3bSDavid S. Miller 	if (dst) {
1310dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1320dbaee3bSDavid S. Miller 
1330dbaee3bSDavid S. Miller 		if (metric < mss) {
1340dbaee3bSDavid S. Miller 			mss = metric;
1351da177e4SLinus Torvalds 			tp->advmss = mss;
1361da177e4SLinus Torvalds 		}
1370dbaee3bSDavid S. Miller 	}
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds 	return (__u16)mss;
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1431da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
144cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1451da177e4SLinus Torvalds {
146463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1471da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1481da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1491da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1501da177e4SLinus Torvalds 
1516687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1521da177e4SLinus Torvalds 
1536687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1541da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1551da177e4SLinus Torvalds 
156463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1571da177e4SLinus Torvalds 		cwnd >>= 1;
1581da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1591da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1601da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1611da177e4SLinus Torvalds }
1621da177e4SLinus Torvalds 
16367edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
165cf533ea5SEric Dumazet 				struct sock *sk)
1661da177e4SLinus Torvalds {
167463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
168463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
169bcefe17cSCong Wang 	const struct dst_entry *dst = __sk_dst_get(sk);
1701da177e4SLinus Torvalds 
17135089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
17235089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
173463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds 	tp->lsndtime = now;
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1781da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1791da177e4SLinus Torvalds 	 */
180bcefe17cSCong Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
181bcefe17cSCong Wang 	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
182463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
1831da177e4SLinus Torvalds }
1841da177e4SLinus Torvalds 
18567edfef7SAndi Kleen /* Account for an ACK we sent. */
18640efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1871da177e4SLinus Torvalds {
188463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
189463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1901da177e4SLinus Torvalds }
1911da177e4SLinus Torvalds 
19285f16525SYuchung Cheng 
19385f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
19485f16525SYuchung Cheng {
19585f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1969ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19785f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19885f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19985f16525SYuchung Cheng 	 */
20085f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
20185f16525SYuchung Cheng 
20285f16525SYuchung Cheng 	if (mss > 1460)
20385f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
20485f16525SYuchung Cheng 	return init_rwnd;
20585f16525SYuchung Cheng }
20685f16525SYuchung Cheng 
2071da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2081da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2091da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2101da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2111da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2121da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2131da177e4SLinus Torvalds  */
2141da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2151da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2181da177e4SLinus Torvalds {
2191da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2221da177e4SLinus Torvalds 	if (*window_clamp == 0)
2231da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2241da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2271da177e4SLinus Torvalds 	if (space > mss)
2281da177e4SLinus Torvalds 		space = (space / mss) * mss;
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
23115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
23215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
23315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
23415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
23515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2371da177e4SLinus Torvalds 	 */
23815d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2391da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
24015d99e02SRick Jones 	else
24115d99e02SRick Jones 		(*rcv_wnd) = space;
24215d99e02SRick Jones 
2431da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2441da177e4SLinus Torvalds 	if (wscale_ok) {
2451da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2461da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2471da177e4SLinus Torvalds 		 */
2481da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
249316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2501da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2511da177e4SLinus Torvalds 			space >>= 1;
2521da177e4SLinus Torvalds 			(*rcv_wscale)++;
2531da177e4SLinus Torvalds 		}
2541da177e4SLinus Torvalds 	}
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25785f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25885f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
259b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2601da177e4SLinus Torvalds 	}
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2631da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2641da177e4SLinus Torvalds }
2654bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2681da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2691da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2701da177e4SLinus Torvalds  * frame.
2711da177e4SLinus Torvalds  */
27240efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2731da177e4SLinus Torvalds {
2741da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2758e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2761da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2771da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/* Never shrink the offered window */
2801da177e4SLinus Torvalds 	if (new_win < cur_win) {
2811da177e4SLinus Torvalds 		/* Danger Will Robinson!
2821da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2831da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2841da177e4SLinus Torvalds 		 * window in time.  --DaveM
2851da177e4SLinus Torvalds 		 *
2861da177e4SLinus Torvalds 		 * Relax Will Robinson.
2871da177e4SLinus Torvalds 		 */
2888e165e20SFlorian Westphal 		if (new_win == 0)
2898e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2908e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
291607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2921da177e4SLinus Torvalds 	}
2931da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2941da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2971da177e4SLinus Torvalds 	 * scaled window.
2981da177e4SLinus Torvalds 	 */
29915d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
3001da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
3011da177e4SLinus Torvalds 	else
3021da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3051da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3061da177e4SLinus Torvalds 
3071da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
3088e165e20SFlorian Westphal 	if (new_win == 0) {
3091da177e4SLinus Torvalds 		tp->pred_flags = 0;
3108e165e20SFlorian Westphal 		if (old_win)
3118e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3128e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3138e165e20SFlorian Westphal 	} else if (old_win == 0) {
3148e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3158e165e20SFlorian Westphal 	}
3161da177e4SLinus Torvalds 
3171da177e4SLinus Torvalds 	return new_win;
3181da177e4SLinus Torvalds }
3191da177e4SLinus Torvalds 
32067edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
321735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
322bdf1ee5dSIlpo Järvinen {
32330e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
32430e502a3SDaniel Borkmann 
3254de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
326bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3274de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32830e502a3SDaniel Borkmann 	else if (tcp_ca_needs_ecn(sk))
32930e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
330bdf1ee5dSIlpo Järvinen }
331bdf1ee5dSIlpo Järvinen 
33267edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
333735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
334bdf1ee5dSIlpo Järvinen {
335bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
336bdf1ee5dSIlpo Järvinen 
337bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
33830e502a3SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
33930e502a3SDaniel Borkmann 	    tcp_ca_needs_ecn(sk)) {
3404de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
341bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34230e502a3SDaniel Borkmann 		if (tcp_ca_needs_ecn(sk))
34330e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
344bdf1ee5dSIlpo Järvinen 	}
345bdf1ee5dSIlpo Järvinen }
346bdf1ee5dSIlpo Järvinen 
347735d3831SFlorian Westphal static void
348735d3831SFlorian Westphal tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
34930e502a3SDaniel Borkmann 		    struct sock *sk)
350bdf1ee5dSIlpo Järvinen {
35130e502a3SDaniel Borkmann 	if (inet_rsk(req)->ecn_ok) {
352bdf1ee5dSIlpo Järvinen 		th->ece = 1;
35330e502a3SDaniel Borkmann 		if (tcp_ca_needs_ecn(sk))
35430e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
35530e502a3SDaniel Borkmann 	}
356bdf1ee5dSIlpo Järvinen }
357bdf1ee5dSIlpo Järvinen 
35867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
35967edfef7SAndi Kleen  * be sent.
36067edfef7SAndi Kleen  */
361735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
362bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
363bdf1ee5dSIlpo Järvinen {
364bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
365bdf1ee5dSIlpo Järvinen 
366bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
367bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
368bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
369bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
370bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
371bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
372bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
373bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
374bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
375bdf1ee5dSIlpo Järvinen 			}
37630e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
377bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
378bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
379bdf1ee5dSIlpo Järvinen 		}
380bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
381bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
382bdf1ee5dSIlpo Järvinen 	}
383bdf1ee5dSIlpo Järvinen }
384bdf1ee5dSIlpo Järvinen 
385e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
386e870a8efSIlpo Järvinen  * auto increment end seqno.
387e870a8efSIlpo Järvinen  */
388e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
389e870a8efSIlpo Järvinen {
3907b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
3917b7fc97aSEric Dumazet 
3922e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
393e870a8efSIlpo Järvinen 	skb->csum = 0;
394e870a8efSIlpo Järvinen 
3954de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
396e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
397e870a8efSIlpo Järvinen 
398cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
3997b7fc97aSEric Dumazet 	shinfo->gso_size = 0;
4007b7fc97aSEric Dumazet 	shinfo->gso_type = 0;
401e870a8efSIlpo Järvinen 
402e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
403a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
404e870a8efSIlpo Järvinen 		seq++;
405e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
406e870a8efSIlpo Järvinen }
407e870a8efSIlpo Järvinen 
408a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40933f5f57eSIlpo Järvinen {
41033f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41133f5f57eSIlpo Järvinen }
41233f5f57eSIlpo Järvinen 
41333ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
41433ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41533ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41689e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4172100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41833ad798cSAdam Langley 
41933ad798cSAdam Langley struct tcp_out_options {
4202100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4212100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
42233ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
42333ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
424bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
425bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4262100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4272100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
42833ad798cSAdam Langley };
42933ad798cSAdam Langley 
43067edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
43167edfef7SAndi Kleen  *
43267edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
433fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
434fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4358e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
436fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
437fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
438fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
439fd6149d3SIlpo Järvinen  *
440fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
441fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
442fd6149d3SIlpo Järvinen  */
44333ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
444bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
445bd0388aeSWilliam Allen Simpson {
4462100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
447bd0388aeSWilliam Allen Simpson 
448bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4491a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4501a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
451bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
452bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
45333ad798cSAdam Langley 		ptr += 4;
45433ad798cSAdam Langley 	}
45533ad798cSAdam Langley 
456fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
457fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
458fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
459fd6149d3SIlpo Järvinen 			       opts->mss);
460fd6149d3SIlpo Järvinen 	}
461fd6149d3SIlpo Järvinen 
462bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
463bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
46433ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
46533ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
46633ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
46733ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
468bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
46933ad798cSAdam Langley 		} else {
470496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
47140efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
47240efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
47340efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
47440efc6faSStephen Hemminger 		}
47533ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
47633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
47733ad798cSAdam Langley 	}
47833ad798cSAdam Langley 
479bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
48033ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48133ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
48233ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
48333ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
48433ad798cSAdam Langley 	}
48533ad798cSAdam Langley 
486bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
48733ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48833ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
48933ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
49033ad798cSAdam Langley 			       opts->ws);
49133ad798cSAdam Langley 	}
49233ad798cSAdam Langley 
49333ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
49433ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
49533ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
49640efc6faSStephen Hemminger 		int this_sack;
49740efc6faSStephen Hemminger 
49840efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
49940efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
50040efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
50133ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
50240efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5032de979bdSStephen Hemminger 
50433ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
50533ad798cSAdam Langley 		     ++this_sack) {
50640efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
50740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
50840efc6faSStephen Hemminger 		}
5092de979bdSStephen Hemminger 
51040efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
51140efc6faSStephen Hemminger 	}
5122100c8d2SYuchung Cheng 
5132100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5142100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5152100c8d2SYuchung Cheng 
5162100c8d2SYuchung Cheng 		*ptr++ = htonl((TCPOPT_EXP << 24) |
5172100c8d2SYuchung Cheng 			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
5182100c8d2SYuchung Cheng 			       TCPOPT_FASTOPEN_MAGIC);
5192100c8d2SYuchung Cheng 
5202100c8d2SYuchung Cheng 		memcpy(ptr, foc->val, foc->len);
5212100c8d2SYuchung Cheng 		if ((foc->len & 3) == 2) {
5222100c8d2SYuchung Cheng 			u8 *align = ((u8 *)ptr) + foc->len;
5232100c8d2SYuchung Cheng 			align[0] = align[1] = TCPOPT_NOP;
5242100c8d2SYuchung Cheng 		}
5252100c8d2SYuchung Cheng 		ptr += (foc->len + 3) >> 2;
5262100c8d2SYuchung Cheng 	}
52740efc6faSStephen Hemminger }
52840efc6faSStephen Hemminger 
52967edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
53067edfef7SAndi Kleen  * network wire format yet.
53167edfef7SAndi Kleen  */
53295c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
53333ad798cSAdam Langley 				struct tcp_out_options *opts,
534cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
535cf533ea5SEric Dumazet {
53633ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
53795c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
538783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
53933ad798cSAdam Langley 
540cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
54133ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
54233ad798cSAdam Langley 	if (*md5) {
54333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
544bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
545cfb6eeb4SYOSHIFUJI Hideaki 	}
54633ad798cSAdam Langley #else
54733ad798cSAdam Langley 	*md5 = NULL;
548cfb6eeb4SYOSHIFUJI Hideaki #endif
54933ad798cSAdam Langley 
55033ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
55133ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
55233ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
55333ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
55433ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
55533ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
55633ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
55733ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
55833ad798cSAdam Langley 	 * going out.  */
55933ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
560bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
56133ad798cSAdam Langley 
562bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
56333ad798cSAdam Langley 		opts->options |= OPTION_TS;
5647faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
56533ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
566bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
56733ad798cSAdam Langley 	}
568bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
56933ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
57089e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
571bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
57233ad798cSAdam Langley 	}
573bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
57433ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
575b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
576bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
57733ad798cSAdam Langley 	}
57833ad798cSAdam Langley 
579783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
580783237e8SYuchung Cheng 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
581783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
582783237e8SYuchung Cheng 		if (remaining >= need) {
583783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
584783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
585783237e8SYuchung Cheng 			remaining -= need;
586783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
587783237e8SYuchung Cheng 		}
588783237e8SYuchung Cheng 	}
589bd0388aeSWilliam Allen Simpson 
590bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
59133ad798cSAdam Langley }
59233ad798cSAdam Langley 
59367edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
59495c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk,
59533ad798cSAdam Langley 				   struct request_sock *req,
59695c96174SEric Dumazet 				   unsigned int mss, struct sk_buff *skb,
59733ad798cSAdam Langley 				   struct tcp_out_options *opts,
5984957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
5998336886fSJerry Chu 				   struct tcp_fastopen_cookie *foc)
6004957faadSWilliam Allen Simpson {
60133ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
60295c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
60333ad798cSAdam Langley 
60433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
60533ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
60633ad798cSAdam Langley 	if (*md5) {
60733ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6084957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6094957faadSWilliam Allen Simpson 
6104957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6114957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6124957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6134957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6144957faadSWilliam Allen Simpson 		 */
615de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
61633ad798cSAdam Langley 	}
61733ad798cSAdam Langley #else
61833ad798cSAdam Langley 	*md5 = NULL;
61933ad798cSAdam Langley #endif
62033ad798cSAdam Langley 
6214957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
62233ad798cSAdam Langley 	opts->mss = mss;
6234957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
62433ad798cSAdam Langley 
62533ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
62633ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
62789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6284957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
62933ad798cSAdam Langley 	}
630de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
63133ad798cSAdam Langley 		opts->options |= OPTION_TS;
6327faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb);
63333ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6344957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
63533ad798cSAdam Langley 	}
63633ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
63733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
638de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6394957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
64033ad798cSAdam Langley 	}
64189278c9dSYuchung Cheng 	if (foc != NULL && foc->len >= 0) {
6428336886fSJerry Chu 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
6438336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6448336886fSJerry Chu 		if (remaining >= need) {
6458336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6468336886fSJerry Chu 			opts->fastopen_cookie = foc;
6478336886fSJerry Chu 			remaining -= need;
6488336886fSJerry Chu 		}
6498336886fSJerry Chu 	}
6504957faadSWilliam Allen Simpson 
6514957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65233ad798cSAdam Langley }
65333ad798cSAdam Langley 
65467edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
65567edfef7SAndi Kleen  * final wire format yet.
65667edfef7SAndi Kleen  */
65795c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
65833ad798cSAdam Langley 					struct tcp_out_options *opts,
659cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
660cf533ea5SEric Dumazet {
66133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
66295c96174SEric Dumazet 	unsigned int size = 0;
663cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
66433ad798cSAdam Langley 
6655843ef42SAndi Kleen 	opts->options = 0;
6665843ef42SAndi Kleen 
66733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
66833ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
66933ad798cSAdam Langley 	if (unlikely(*md5)) {
67033ad798cSAdam Langley 		opts->options |= OPTION_MD5;
67133ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
67233ad798cSAdam Langley 	}
67333ad798cSAdam Langley #else
67433ad798cSAdam Langley 	*md5 = NULL;
67533ad798cSAdam Langley #endif
67633ad798cSAdam Langley 
67733ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
67833ad798cSAdam Langley 		opts->options |= OPTION_TS;
6797faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
68033ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
68133ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
68233ad798cSAdam Langley 	}
68333ad798cSAdam Langley 
684cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
685cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
68695c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
68733ad798cSAdam Langley 		opts->num_sack_blocks =
68895c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
68933ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
69033ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
69133ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
69233ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
69333ad798cSAdam Langley 	}
69433ad798cSAdam Langley 
69533ad798cSAdam Langley 	return size;
69640efc6faSStephen Hemminger }
6971da177e4SLinus Torvalds 
69846d3ceabSEric Dumazet 
69946d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
70046d3ceabSEric Dumazet  *
70146d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
70246d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
70346d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
70446d3ceabSEric Dumazet  *
70546d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
70646d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7078e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
70846d3ceabSEric Dumazet  *
70946d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
71046d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
71146d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
71246d3ceabSEric Dumazet  */
71346d3ceabSEric Dumazet struct tsq_tasklet {
71446d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
71546d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
71646d3ceabSEric Dumazet };
71746d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
71846d3ceabSEric Dumazet 
7196f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
7206f458dfbSEric Dumazet {
7216f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7226f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
7236f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
724bf06200eSJohn Ogness 		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
725bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7266f458dfbSEric Dumazet }
72746d3ceabSEric Dumazet /*
7288e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
72946d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7308e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
73146d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
73246d3ceabSEric Dumazet  */
73346d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
73446d3ceabSEric Dumazet {
73546d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
73646d3ceabSEric Dumazet 	LIST_HEAD(list);
73746d3ceabSEric Dumazet 	unsigned long flags;
73846d3ceabSEric Dumazet 	struct list_head *q, *n;
73946d3ceabSEric Dumazet 	struct tcp_sock *tp;
74046d3ceabSEric Dumazet 	struct sock *sk;
74146d3ceabSEric Dumazet 
74246d3ceabSEric Dumazet 	local_irq_save(flags);
74346d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
74446d3ceabSEric Dumazet 	local_irq_restore(flags);
74546d3ceabSEric Dumazet 
74646d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
74746d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
74846d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
74946d3ceabSEric Dumazet 
75046d3ceabSEric Dumazet 		sk = (struct sock *)tp;
75146d3ceabSEric Dumazet 		bh_lock_sock(sk);
75246d3ceabSEric Dumazet 
75346d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
7546f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
75546d3ceabSEric Dumazet 		} else {
75646d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
7576f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
75846d3ceabSEric Dumazet 		}
75946d3ceabSEric Dumazet 		bh_unlock_sock(sk);
76046d3ceabSEric Dumazet 
76146d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
76246d3ceabSEric Dumazet 		sk_free(sk);
76346d3ceabSEric Dumazet 	}
76446d3ceabSEric Dumazet }
76546d3ceabSEric Dumazet 
7666f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
7676f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
768563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
769563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
77046d3ceabSEric Dumazet /**
77146d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
77246d3ceabSEric Dumazet  * @sk: socket
77346d3ceabSEric Dumazet  *
77446d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
77546d3ceabSEric Dumazet  * actions before socket release.
77646d3ceabSEric Dumazet  */
77746d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
77846d3ceabSEric Dumazet {
77946d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
7806f458dfbSEric Dumazet 	unsigned long flags, nflags;
78146d3ceabSEric Dumazet 
7826f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
7836f458dfbSEric Dumazet 	do {
7846f458dfbSEric Dumazet 		flags = tp->tsq_flags;
7856f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
7866f458dfbSEric Dumazet 			return;
7876f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
7886f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
7896f458dfbSEric Dumazet 
7906f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
7916f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
7926f458dfbSEric Dumazet 
793c3f9b018SEric Dumazet 	/* Here begins the tricky part :
794c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
795c3f9b018SEric Dumazet 	 * 1) BH disabled
796c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
797c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
798c3f9b018SEric Dumazet 	 *
799c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
800c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
801c3f9b018SEric Dumazet 	 */
802c3f9b018SEric Dumazet 	sock_release_ownership(sk);
803c3f9b018SEric Dumazet 
804144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
8056f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
806144d56e9SEric Dumazet 		__sock_put(sk);
807144d56e9SEric Dumazet 	}
808144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
8096f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
810144d56e9SEric Dumazet 		__sock_put(sk);
811144d56e9SEric Dumazet 	}
812144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
8134fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
814144d56e9SEric Dumazet 		__sock_put(sk);
815144d56e9SEric Dumazet 	}
81646d3ceabSEric Dumazet }
81746d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
81846d3ceabSEric Dumazet 
81946d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
82046d3ceabSEric Dumazet {
82146d3ceabSEric Dumazet 	int i;
82246d3ceabSEric Dumazet 
82346d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
82446d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
82546d3ceabSEric Dumazet 
82646d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
82746d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
82846d3ceabSEric Dumazet 			     tcp_tasklet_func,
82946d3ceabSEric Dumazet 			     (unsigned long)tsq);
83046d3ceabSEric Dumazet 	}
83146d3ceabSEric Dumazet }
83246d3ceabSEric Dumazet 
83346d3ceabSEric Dumazet /*
83446d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8358e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
83646d3ceabSEric Dumazet  * hold qdisc lock.
83746d3ceabSEric Dumazet  */
838d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
83946d3ceabSEric Dumazet {
84046d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
84146d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
8429b462d02SEric Dumazet 	int wmem;
8439b462d02SEric Dumazet 
8449b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
8459b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
8469b462d02SEric Dumazet 	 */
8479b462d02SEric Dumazet 	wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
8489b462d02SEric Dumazet 
8499b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
8509b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
8519b462d02SEric Dumazet 	 * This gives :
8529b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
8539b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
8549b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
8559b462d02SEric Dumazet 	 */
8569b462d02SEric Dumazet 	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
8579b462d02SEric Dumazet 		goto out;
85846d3ceabSEric Dumazet 
85946d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
86046d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
86146d3ceabSEric Dumazet 		unsigned long flags;
86246d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
86346d3ceabSEric Dumazet 
86446d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
86546d3ceabSEric Dumazet 		local_irq_save(flags);
866903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
86746d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
86846d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
86946d3ceabSEric Dumazet 		local_irq_restore(flags);
8709b462d02SEric Dumazet 		return;
87146d3ceabSEric Dumazet 	}
8729b462d02SEric Dumazet out:
8739b462d02SEric Dumazet 	sk_free(sk);
87446d3ceabSEric Dumazet }
87546d3ceabSEric Dumazet 
8761da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
8771da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
8781da177e4SLinus Torvalds  * transmission and possible later retransmissions.
8791da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
8801da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
8811da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
8821da177e4SLinus Torvalds  * device.
8831da177e4SLinus Torvalds  *
8841da177e4SLinus Torvalds  * We are working here with either a clone of the original
8851da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
8861da177e4SLinus Torvalds  */
887056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
888056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
8891da177e4SLinus Torvalds {
8906687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
891dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
892dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
893dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
89433ad798cSAdam Langley 	struct tcp_out_options opts;
89595c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
896cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
8971da177e4SLinus Torvalds 	struct tcphdr *th;
8981da177e4SLinus Torvalds 	int err;
8991da177e4SLinus Torvalds 
900dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
901dfb4b9dcSDavid S. Miller 
902ccdbb6e9SEric Dumazet 	if (clone_it) {
903740b0f18SEric Dumazet 		skb_mstamp_get(&skb->skb_mstamp);
904dfb4b9dcSDavid S. Miller 
905dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
906dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
907dfb4b9dcSDavid S. Miller 		else
908dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
909dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
910dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
911dfb4b9dcSDavid S. Miller 	}
912dfb4b9dcSDavid S. Miller 
913dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
914dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
915dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
91633ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
9171da177e4SLinus Torvalds 
9184de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
91933ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
92033ad798cSAdam Langley 	else
92133ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
92233ad798cSAdam Langley 							   &md5);
92333ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
9241da177e4SLinus Torvalds 
925547669d4SEric Dumazet 	if (tcp_packets_in_flight(tp) == 0)
9266687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
927547669d4SEric Dumazet 
928547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
929b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
930b2532eb9SEric Dumazet 	 * which holds one reference to sk_wmem_alloc.
931b2532eb9SEric Dumazet 	 *
932b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
933b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
934547669d4SEric Dumazet 	 */
935b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
9361da177e4SLinus Torvalds 
937aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
938aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
93946d3ceabSEric Dumazet 
94046d3ceabSEric Dumazet 	skb_orphan(skb);
94146d3ceabSEric Dumazet 	skb->sk = sk;
942c9eeec26SEric Dumazet 	skb->destructor = tcp_wfree;
943b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
94446d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
947aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
948c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
949c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
9501da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
9511da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
952df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
9534de075e0SEric Dumazet 					tcb->tcp_flags);
954dfb4b9dcSDavid S. Miller 
9554de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
9561da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
9571da177e4SLinus Torvalds 		 * is never scaled.
9581da177e4SLinus Torvalds 		 */
959600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
9601da177e4SLinus Torvalds 	} else {
9611da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
9621da177e4SLinus Torvalds 	}
9631da177e4SLinus Torvalds 	th->check		= 0;
9641da177e4SLinus Torvalds 	th->urg_ptr		= 0;
9651da177e4SLinus Torvalds 
96633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
9677691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
9687691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
9691da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
9701da177e4SLinus Torvalds 			th->urg = 1;
9717691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
9720eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
9737691367dSHerbert Xu 			th->urg = 1;
9747691367dSHerbert Xu 		}
9751da177e4SLinus Torvalds 	}
9761da177e4SLinus Torvalds 
977bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
9784de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
979735d3831SFlorian Westphal 		tcp_ecn_send(sk, skb, tcp_header_size);
980dfb4b9dcSDavid S. Miller 
981cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
982cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
983cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
984a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
985bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
98649a72dfbSAdam Langley 					       md5, sk, NULL, skb);
987cfb6eeb4SYOSHIFUJI Hideaki 	}
988cfb6eeb4SYOSHIFUJI Hideaki #endif
989cfb6eeb4SYOSHIFUJI Hideaki 
990bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
9911da177e4SLinus Torvalds 
9924de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
993fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
9941da177e4SLinus Torvalds 
9951da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
996cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
9971da177e4SLinus Torvalds 
998bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
999aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1000aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
10011da177e4SLinus Torvalds 
1002cd7d8498SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_segs */
1003cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1004cd7d8498SEric Dumazet 
10057faee5c0SEric Dumazet 	/* Our usage of tstamp should remain private */
10067faee5c0SEric Dumazet 	skb->tstamp.tv64 = 0;
1007971f10ecSEric Dumazet 
1008971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1009971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1010971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1011971f10ecSEric Dumazet 
1012b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
10137faee5c0SEric Dumazet 
101483de47cdSHua Zhong 	if (likely(err <= 0))
10151da177e4SLinus Torvalds 		return err;
10161da177e4SLinus Torvalds 
10175ee2c941SChristoph Paasch 	tcp_enter_cwr(sk);
10181da177e4SLinus Torvalds 
1019b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
10201da177e4SLinus Torvalds }
10211da177e4SLinus Torvalds 
102267edfef7SAndi Kleen /* This routine just queues the buffer for sending.
10231da177e4SLinus Torvalds  *
10241da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
10251da177e4SLinus Torvalds  * otherwise socket can stall.
10261da177e4SLinus Torvalds  */
10271da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
10281da177e4SLinus Torvalds {
10291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10301da177e4SLinus Torvalds 
10311da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
10321da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1033f4a775d1SEric Dumazet 	__skb_header_release(skb);
1034fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
10353ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
10363ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
10371da177e4SLinus Torvalds }
10381da177e4SLinus Torvalds 
103967edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
1040cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1041056834d9SIlpo Järvinen 				 unsigned int mss_now)
1042f6302d1dSDavid S. Miller {
10437b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
10447b7fc97aSEric Dumazet 
1045c52e2421SEric Dumazet 	/* Make sure we own this skb before messing gso_size/gso_segs */
1046c52e2421SEric Dumazet 	WARN_ON_ONCE(skb_cloned(skb));
1047c52e2421SEric Dumazet 
10488f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1049f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1050f6302d1dSDavid S. Miller 		 * non-TSO case.
1051f6302d1dSDavid S. Miller 		 */
1052cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
10537b7fc97aSEric Dumazet 		shinfo->gso_size = 0;
10547b7fc97aSEric Dumazet 		shinfo->gso_type = 0;
1055f6302d1dSDavid S. Miller 	} else {
1056cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
10577b7fc97aSEric Dumazet 		shinfo->gso_size = mss_now;
10587b7fc97aSEric Dumazet 		shinfo->gso_type = sk->sk_gso_type;
10591da177e4SLinus Torvalds 	}
10601da177e4SLinus Torvalds }
10611da177e4SLinus Torvalds 
106291fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
106368f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
106491fed7a1SIlpo Järvinen  */
1065cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
106691fed7a1SIlpo Järvinen 				   int decr)
106791fed7a1SIlpo Järvinen {
1068a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1069a47e5a98SIlpo Järvinen 
1070dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
107191fed7a1SIlpo Järvinen 		return;
107291fed7a1SIlpo Järvinen 
10736859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
107491fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
107591fed7a1SIlpo Järvinen }
107691fed7a1SIlpo Järvinen 
1077797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1078797108d1SIlpo Järvinen  * tweaks to fix counters
1079797108d1SIlpo Järvinen  */
1080cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1081797108d1SIlpo Järvinen {
1082797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1083797108d1SIlpo Järvinen 
1084797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1085797108d1SIlpo Järvinen 
1086797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1087797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1088797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1089797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1090797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1091797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1092797108d1SIlpo Järvinen 
1093797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1094797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1095797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1096797108d1SIlpo Järvinen 
1097797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1098797108d1SIlpo Järvinen 
1099797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1100797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
110152cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1102797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1103797108d1SIlpo Järvinen 
1104797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1105797108d1SIlpo Järvinen }
1106797108d1SIlpo Järvinen 
1107490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1108490cc7d0SWillem de Bruijn {
1109490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1110490cc7d0SWillem de Bruijn 
1111490cc7d0SWillem de Bruijn 	if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1112490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1113490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1114490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1115490cc7d0SWillem de Bruijn 
1116490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1117490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1118490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1119490cc7d0SWillem de Bruijn 	}
1120490cc7d0SWillem de Bruijn }
1121490cc7d0SWillem de Bruijn 
11221da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
11231da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
11241da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
11251da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
11261da177e4SLinus Torvalds  */
1127056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
11286cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
11291da177e4SLinus Torvalds {
11301da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11311da177e4SLinus Torvalds 	struct sk_buff *buff;
11326475be16SDavid S. Miller 	int nsize, old_factor;
1133b60b49eaSHerbert Xu 	int nlen;
11349ce01461SIlpo Järvinen 	u8 flags;
11351da177e4SLinus Torvalds 
11362fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
11372fceec13SIlpo Järvinen 		return -EINVAL;
11386a438bbeSStephen Hemminger 
11391da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
11401da177e4SLinus Torvalds 	if (nsize < 0)
11411da177e4SLinus Torvalds 		nsize = 0;
11421da177e4SLinus Torvalds 
11436cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
11441da177e4SLinus Torvalds 		return -ENOMEM;
11451da177e4SLinus Torvalds 
11461da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
11476cc55e09SOctavian Purdila 	buff = sk_stream_alloc_skb(sk, nsize, gfp);
11481da177e4SLinus Torvalds 	if (buff == NULL)
11491da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1150ef5cb973SHerbert Xu 
11513ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
11523ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1153b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1154b60b49eaSHerbert Xu 	buff->truesize += nlen;
1155b60b49eaSHerbert Xu 	skb->truesize -= nlen;
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
11581da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
11591da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
11601da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
11611da177e4SLinus Torvalds 
11621da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
11634de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
11644de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
11654de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1166e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
11671da177e4SLinus Torvalds 
116884fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
11691da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1170056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1171056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
11721da177e4SLinus Torvalds 						       nsize, 0);
11731da177e4SLinus Torvalds 
11741da177e4SLinus Torvalds 		skb_trim(skb, len);
11751da177e4SLinus Torvalds 
11761da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
11771da177e4SLinus Torvalds 	} else {
117884fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
11791da177e4SLinus Torvalds 		skb_split(skb, buff, len);
11801da177e4SLinus Torvalds 	}
11811da177e4SLinus Torvalds 
11821da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
11831da177e4SLinus Torvalds 
1184a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1185490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
11861da177e4SLinus Torvalds 
11876475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
11886475be16SDavid S. Miller 
11891da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1190846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1191846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
11921da177e4SLinus Torvalds 
11936475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
11946475be16SDavid S. Miller 	 * adjust the various packet counters.
11956475be16SDavid S. Miller 	 */
1196cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
11976475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
11986475be16SDavid S. Miller 			tcp_skb_pcount(buff);
11991da177e4SLinus Torvalds 
1200797108d1SIlpo Järvinen 		if (diff)
1201797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
12021da177e4SLinus Torvalds 	}
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1205f4a775d1SEric Dumazet 	__skb_header_release(buff);
1206fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds 	return 0;
12091da177e4SLinus Torvalds }
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
12121da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
12131da177e4SLinus Torvalds  * immediately discarded.
12141da177e4SLinus Torvalds  */
1215f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
12161da177e4SLinus Torvalds {
12177b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
12181da177e4SLinus Torvalds 	int i, k, eat;
12191da177e4SLinus Torvalds 
12204fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
12214fa48bf3SEric Dumazet 	if (eat) {
12224fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
12234fa48bf3SEric Dumazet 		len -= eat;
12244fa48bf3SEric Dumazet 		if (!len)
12254fa48bf3SEric Dumazet 			return;
12264fa48bf3SEric Dumazet 	}
12271da177e4SLinus Torvalds 	eat = len;
12281da177e4SLinus Torvalds 	k = 0;
12297b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
12307b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
12317b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
12329e903e08SEric Dumazet 
12339e903e08SEric Dumazet 		if (size <= eat) {
1234aff65da0SIan Campbell 			skb_frag_unref(skb, i);
12359e903e08SEric Dumazet 			eat -= size;
12361da177e4SLinus Torvalds 		} else {
12377b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
12381da177e4SLinus Torvalds 			if (eat) {
12397b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
12407b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
12411da177e4SLinus Torvalds 				eat = 0;
12421da177e4SLinus Torvalds 			}
12431da177e4SLinus Torvalds 			k++;
12441da177e4SLinus Torvalds 		}
12451da177e4SLinus Torvalds 	}
12467b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
12471da177e4SLinus Torvalds 
124827a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
12491da177e4SLinus Torvalds 	skb->data_len -= len;
12501da177e4SLinus Torvalds 	skb->len = skb->data_len;
12511da177e4SLinus Torvalds }
12521da177e4SLinus Torvalds 
125367edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
12541da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
12551da177e4SLinus Torvalds {
125614bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
12571da177e4SLinus Torvalds 		return -ENOMEM;
12581da177e4SLinus Torvalds 
12594fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
126284fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
12631da177e4SLinus Torvalds 
12641da177e4SLinus Torvalds 	skb->truesize	     -= len;
12651da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
12663ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
12671da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
12681da177e4SLinus Torvalds 
12695b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
12701da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
12715b35e1e6SNeal Cardwell 		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
12721da177e4SLinus Torvalds 
12731da177e4SLinus Torvalds 	return 0;
12741da177e4SLinus Torvalds }
12751da177e4SLinus Torvalds 
12761b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
12771b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
12785d424d5aSJohn Heffner {
1279cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1280cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12815d424d5aSJohn Heffner 	int mss_now;
12825d424d5aSJohn Heffner 
12835d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
12845d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
12855d424d5aSJohn Heffner 	 */
12865d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
12875d424d5aSJohn Heffner 
128867469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
128967469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
129067469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
129167469601SEric Dumazet 
129267469601SEric Dumazet 		if (dst && dst_allfrag(dst))
129367469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
129467469601SEric Dumazet 	}
129567469601SEric Dumazet 
12965d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
12975d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
12985d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
12995d424d5aSJohn Heffner 
13005d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
13015d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
13025d424d5aSJohn Heffner 
13035d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
13045d424d5aSJohn Heffner 	if (mss_now < 48)
13055d424d5aSJohn Heffner 		mss_now = 48;
13065d424d5aSJohn Heffner 	return mss_now;
13075d424d5aSJohn Heffner }
13085d424d5aSJohn Heffner 
13091b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
13101b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
13111b63edd6SYuchung Cheng {
13121b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
13131b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
13141b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
13151b63edd6SYuchung Cheng }
13161b63edd6SYuchung Cheng 
13175d424d5aSJohn Heffner /* Inverse of above */
131867469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
13195d424d5aSJohn Heffner {
1320cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1321cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
13225d424d5aSJohn Heffner 	int mtu;
13235d424d5aSJohn Heffner 
13245d424d5aSJohn Heffner 	mtu = mss +
13255d424d5aSJohn Heffner 	      tp->tcp_header_len +
13265d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
13275d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
13285d424d5aSJohn Heffner 
132967469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
133067469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
133167469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
133267469601SEric Dumazet 
133367469601SEric Dumazet 		if (dst && dst_allfrag(dst))
133467469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
133567469601SEric Dumazet 	}
13365d424d5aSJohn Heffner 	return mtu;
13375d424d5aSJohn Heffner }
13385d424d5aSJohn Heffner 
133967edfef7SAndi Kleen /* MTU probing init per socket */
13405d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
13415d424d5aSJohn Heffner {
13425d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
13435d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
13445d424d5aSJohn Heffner 
13455d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
13465d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
13475d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
13485d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
13495d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
13505d424d5aSJohn Heffner }
13514bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
13525d424d5aSJohn Heffner 
13531da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
13561da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
13571da177e4SLinus Torvalds 
13581da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1359caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
13601da177e4SLinus Torvalds    It also does not include TCP options.
13611da177e4SLinus Torvalds 
1362d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
13631da177e4SLinus Torvalds 
13641da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
13651da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
13661da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
13671da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
13701da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
13711da177e4SLinus Torvalds 
1372d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1373d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
13741da177e4SLinus Torvalds  */
13751da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
13761da177e4SLinus Torvalds {
13771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1378d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
13795d424d5aSJohn Heffner 	int mss_now;
13801da177e4SLinus Torvalds 
13815d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
13825d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
13831da177e4SLinus Torvalds 
13845d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1385409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds 	/* And store cached results */
1388d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
13895d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
13905d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1391c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
13921da177e4SLinus Torvalds 
13931da177e4SLinus Torvalds 	return mss_now;
13941da177e4SLinus Torvalds }
13954bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
13961da177e4SLinus Torvalds 
13971da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
13981da177e4SLinus Torvalds  * and even PMTU discovery events into account.
13991da177e4SLinus Torvalds  */
14000c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
14011da177e4SLinus Torvalds {
1402cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1403cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1404c1b4a7e6SDavid S. Miller 	u32 mss_now;
140595c96174SEric Dumazet 	unsigned int header_len;
140633ad798cSAdam Langley 	struct tcp_out_options opts;
140733ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
14081da177e4SLinus Torvalds 
1409c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1410c1b4a7e6SDavid S. Miller 
14111da177e4SLinus Torvalds 	if (dst) {
14121da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1413d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
14141da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
14151da177e4SLinus Torvalds 	}
14161da177e4SLinus Torvalds 
141733ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
141833ad798cSAdam Langley 		     sizeof(struct tcphdr);
141933ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
142033ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
142133ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
142233ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
142333ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
142433ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
142533ad798cSAdam Langley 		mss_now -= delta;
142633ad798cSAdam Langley 	}
1427cfb6eeb4SYOSHIFUJI Hideaki 
14281da177e4SLinus Torvalds 	return mss_now;
14291da177e4SLinus Torvalds }
14301da177e4SLinus Torvalds 
143186fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
143286fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
143386fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
143486fd14adSWeiping Pan  */
143586fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1436a762a980SDavid S. Miller {
14379e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1438a762a980SDavid S. Miller 
143986fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
144086fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
144186fd14adSWeiping Pan 		/* Limited by application or receiver window. */
144286fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
144386fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
144486fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
144586fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
144686fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
144786fd14adSWeiping Pan 		}
144886fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
144986fd14adSWeiping Pan 	}
145086fd14adSWeiping Pan 	tp->snd_cwnd_stamp = tcp_time_stamp;
145186fd14adSWeiping Pan }
145286fd14adSWeiping Pan 
1453ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1454a762a980SDavid S. Miller {
1455a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1456a762a980SDavid S. Miller 
1457ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1458ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1459ca8a2263SNeal Cardwell 	 */
1460ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1461ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1462ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1463ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1464ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1465ca8a2263SNeal Cardwell 	}
1466e114a710SEric Dumazet 
146724901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1468a762a980SDavid S. Miller 		/* Network is feed fully. */
1469a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1470a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1471a762a980SDavid S. Miller 	} else {
1472a762a980SDavid S. Miller 		/* Network starves. */
1473a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1474a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1475a762a980SDavid S. Miller 
147615d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
147715d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1478a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1479a762a980SDavid S. Miller 	}
1480a762a980SDavid S. Miller }
1481a762a980SDavid S. Miller 
1482d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1483d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1484d4589926SEric Dumazet {
1485d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1486d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1487d4589926SEric Dumazet }
1488d4589926SEric Dumazet 
1489d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1490d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1491d4589926SEric Dumazet  * The test is really :
1492d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1493d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1494d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1495d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
14960e3a4803SIlpo Järvinen  */
1497d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1498d4589926SEric Dumazet 				const struct sk_buff *skb)
1499d4589926SEric Dumazet {
1500d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1501d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1502d4589926SEric Dumazet }
1503d4589926SEric Dumazet 
1504d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1505d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1506d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1507d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1508d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1509d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1510d4589926SEric Dumazet  */
1511d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1512cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1513d4589926SEric Dumazet {
1514d4589926SEric Dumazet 	return partial &&
1515d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1516d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1517d4589926SEric Dumazet }
1518d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1519d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1520d4589926SEric Dumazet 					const struct sk_buff *skb,
1521d4589926SEric Dumazet 					unsigned int mss_now,
1522d4589926SEric Dumazet 					unsigned int max_segs,
1523d4589926SEric Dumazet 					int nonagle)
1524c1b4a7e6SDavid S. Miller {
1525cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1526d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1527c1b4a7e6SDavid S. Miller 
152890840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
15291485348dSBen Hutchings 	max_len = mss_now * max_segs;
15300e3a4803SIlpo Järvinen 
15311485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
15321485348dSBen Hutchings 		return max_len;
15330e3a4803SIlpo Järvinen 
15345ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
15355ea3a748SIlpo Järvinen 
15361485348dSBen Hutchings 	if (max_len <= needed)
15371485348dSBen Hutchings 		return max_len;
15380e3a4803SIlpo Järvinen 
1539d4589926SEric Dumazet 	partial = needed % mss_now;
1540d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1541d4589926SEric Dumazet 	 * to include this last segment in this skb.
1542d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1543d4589926SEric Dumazet 	 */
1544cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1545d4589926SEric Dumazet 		return needed - partial;
1546d4589926SEric Dumazet 
1547d4589926SEric Dumazet 	return needed;
1548c1b4a7e6SDavid S. Miller }
1549c1b4a7e6SDavid S. Miller 
1550c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1551c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1552c1b4a7e6SDavid S. Miller  */
1553cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1554cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1555c1b4a7e6SDavid S. Miller {
1556c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1557c1b4a7e6SDavid S. Miller 
1558c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
15594de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
15604de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1561c1b4a7e6SDavid S. Miller 		return 1;
1562c1b4a7e6SDavid S. Miller 
1563c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1564c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1565c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1566c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1567c1b4a7e6SDavid S. Miller 
1568c1b4a7e6SDavid S. Miller 	return 0;
1569c1b4a7e6SDavid S. Miller }
1570c1b4a7e6SDavid S. Miller 
1571b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
157267edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1573c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1574c1b4a7e6SDavid S. Miller  */
1575cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1576056834d9SIlpo Järvinen 			     unsigned int mss_now)
1577c1b4a7e6SDavid S. Miller {
1578c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1579c1b4a7e6SDavid S. Miller 
1580f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1581846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1582c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1583c1b4a7e6SDavid S. Miller 	}
1584c1b4a7e6SDavid S. Miller 	return tso_segs;
1585c1b4a7e6SDavid S. Miller }
1586c1b4a7e6SDavid S. Miller 
1587c1b4a7e6SDavid S. Miller 
1588a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1589c1b4a7e6SDavid S. Miller  * sent now.
1590c1b4a7e6SDavid S. Miller  */
1591a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1592c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1593c1b4a7e6SDavid S. Miller {
1594c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1595c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1596c1b4a7e6SDavid S. Miller 	 *
1597c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1598c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1599c1b4a7e6SDavid S. Miller 	 */
1600c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1601a2a385d6SEric Dumazet 		return true;
1602c1b4a7e6SDavid S. Miller 
16039b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
16049b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1605a2a385d6SEric Dumazet 		return true;
1606c1b4a7e6SDavid S. Miller 
1607cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1608a2a385d6SEric Dumazet 		return true;
1609c1b4a7e6SDavid S. Miller 
1610a2a385d6SEric Dumazet 	return false;
1611c1b4a7e6SDavid S. Miller }
1612c1b4a7e6SDavid S. Miller 
1613c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1614a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1615a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1616056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1617c1b4a7e6SDavid S. Miller {
1618c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1619c1b4a7e6SDavid S. Miller 
1620c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1621c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1622c1b4a7e6SDavid S. Miller 
162390840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1624c1b4a7e6SDavid S. Miller }
1625c1b4a7e6SDavid S. Miller 
1626fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1627c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1628c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1629c1b4a7e6SDavid S. Miller  */
1630cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1631c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1632c1b4a7e6SDavid S. Miller {
1633cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1634c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1635c1b4a7e6SDavid S. Miller 
1636846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1637c1b4a7e6SDavid S. Miller 
1638c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1639c1b4a7e6SDavid S. Miller 		return 0;
1640c1b4a7e6SDavid S. Miller 
1641c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1642056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1643c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1644c1b4a7e6SDavid S. Miller 
1645c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1646c1b4a7e6SDavid S. Miller }
1647c1b4a7e6SDavid S. Miller 
164867edfef7SAndi Kleen /* Test if sending is allowed right now. */
1649a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1650c1b4a7e6SDavid S. Miller {
1651cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1652fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1653c1b4a7e6SDavid S. Miller 
1654a02cec21SEric Dumazet 	return skb &&
16550c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1656c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1657a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1658c1b4a7e6SDavid S. Miller }
1659c1b4a7e6SDavid S. Miller 
1660c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1661c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1662c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1663c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1664c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1665c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1666c1b4a7e6SDavid S. Miller  */
1667056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1668c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1669c1b4a7e6SDavid S. Miller {
1670c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1671c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
16729ce01461SIlpo Järvinen 	u8 flags;
1673c1b4a7e6SDavid S. Miller 
1674c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1675c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
16766cc55e09SOctavian Purdila 		return tcp_fragment(sk, skb, len, mss_now, gfp);
1677c1b4a7e6SDavid S. Miller 
1678c4ead4c5SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp);
1679c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1680c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1681c1b4a7e6SDavid S. Miller 
16823ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
16833ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1684b60b49eaSHerbert Xu 	buff->truesize += nlen;
1685c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1686c1b4a7e6SDavid S. Miller 
1687c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1688c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1689c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1690c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1691c1b4a7e6SDavid S. Miller 
1692c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
16934de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
16944de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
16954de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1696c1b4a7e6SDavid S. Miller 
1697c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1698c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1699c1b4a7e6SDavid S. Miller 
170084fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1701c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1702490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1703c1b4a7e6SDavid S. Miller 
1704c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1705846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1706846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1707c1b4a7e6SDavid S. Miller 
1708c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1709f4a775d1SEric Dumazet 	__skb_header_release(buff);
1710fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1711c1b4a7e6SDavid S. Miller 
1712c1b4a7e6SDavid S. Miller 	return 0;
1713c1b4a7e6SDavid S. Miller }
1714c1b4a7e6SDavid S. Miller 
1715c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1716c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1717c1b4a7e6SDavid S. Miller  *
1718c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1719c1b4a7e6SDavid S. Miller  */
1720ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1721ca8a2263SNeal Cardwell 				 bool *is_cwnd_limited)
1722c1b4a7e6SDavid S. Miller {
17239e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
17246687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1725c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1726ad9f4f50SEric Dumazet 	int win_divisor;
1727c1b4a7e6SDavid S. Miller 
17284de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1729ae8064acSJohn Heffner 		goto send_now;
1730c1b4a7e6SDavid S. Miller 
17316687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1732ae8064acSJohn Heffner 		goto send_now;
1733ae8064acSJohn Heffner 
1734ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1735bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1736a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1737ae8064acSJohn Heffner 		goto send_now;
1738908a75c1SDavid S. Miller 
1739c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1740c1b4a7e6SDavid S. Miller 
1741056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1742c1b4a7e6SDavid S. Miller 
174390840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1744c1b4a7e6SDavid S. Miller 
1745c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1746c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1747c1b4a7e6SDavid S. Miller 
1748c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1749c1b4a7e6SDavid S. Miller 
1750ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
17511485348dSBen Hutchings 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
175295bd09ebSEric Dumazet 			   tp->xmit_size_goal_segs * tp->mss_cache))
1753ae8064acSJohn Heffner 		goto send_now;
1754ba244fe9SDavid S. Miller 
175562ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
175662ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
175762ad2761SIlpo Järvinen 		goto send_now;
175862ad2761SIlpo Järvinen 
1759ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1760ad9f4f50SEric Dumazet 	if (win_divisor) {
1761c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1762c1b4a7e6SDavid S. Miller 
1763c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1764c1b4a7e6SDavid S. Miller 		 * just use it.
1765c1b4a7e6SDavid S. Miller 		 */
1766ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1767c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1768ae8064acSJohn Heffner 			goto send_now;
1769c1b4a7e6SDavid S. Miller 	} else {
1770c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1771c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1772c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1773c1b4a7e6SDavid S. Miller 		 * then send now.
1774c1b4a7e6SDavid S. Miller 		 */
17756b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1776ae8064acSJohn Heffner 			goto send_now;
1777c1b4a7e6SDavid S. Miller 	}
1778c1b4a7e6SDavid S. Miller 
1779f4541d60SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1780f4541d60SEric Dumazet 	 * Do not rearm the timer if already set to not break TCP ACK clocking.
1781f4541d60SEric Dumazet 	 */
1782f4541d60SEric Dumazet 	if (!tp->tso_deferred)
1783ae8064acSJohn Heffner 		tp->tso_deferred = 1 | (jiffies << 1);
1784ae8064acSJohn Heffner 
1785ca8a2263SNeal Cardwell 	if (cong_win < send_win && cong_win < skb->len)
1786ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1787ca8a2263SNeal Cardwell 
1788a2a385d6SEric Dumazet 	return true;
1789ae8064acSJohn Heffner 
1790ae8064acSJohn Heffner send_now:
1791ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1792a2a385d6SEric Dumazet 	return false;
1793c1b4a7e6SDavid S. Miller }
1794c1b4a7e6SDavid S. Miller 
17955d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
179667edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
179767edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
179867edfef7SAndi Kleen  * changes resulting in larger path MTUs.
179967edfef7SAndi Kleen  *
18005d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
18015d424d5aSJohn Heffner  *         1 if a probe was sent,
1802056834d9SIlpo Järvinen  *         -1 otherwise
1803056834d9SIlpo Järvinen  */
18045d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
18055d424d5aSJohn Heffner {
18065d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
18075d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
18085d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
18095d424d5aSJohn Heffner 	int len;
18105d424d5aSJohn Heffner 	int probe_size;
181191cc17c0SIlpo Järvinen 	int size_needed;
18125d424d5aSJohn Heffner 	int copy;
18135d424d5aSJohn Heffner 	int mss_now;
18145d424d5aSJohn Heffner 
18155d424d5aSJohn Heffner 	/* Not currently probing/verifying,
18165d424d5aSJohn Heffner 	 * not in recovery,
18175d424d5aSJohn Heffner 	 * have enough cwnd, and
18185d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
18195d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
18205d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
18215d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
18225d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1823cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
18245d424d5aSJohn Heffner 		return -1;
18255d424d5aSJohn Heffner 
18265d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
18270c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
18285d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
182991cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
18305d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
18315d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
18325d424d5aSJohn Heffner 		return -1;
18335d424d5aSJohn Heffner 	}
18345d424d5aSJohn Heffner 
18355d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
18367f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
18375d424d5aSJohn Heffner 		return -1;
18385d424d5aSJohn Heffner 
183991cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
18405d424d5aSJohn Heffner 		return -1;
184190840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
18425d424d5aSJohn Heffner 		return 0;
18435d424d5aSJohn Heffner 
1844d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1845d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1846d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
18475d424d5aSJohn Heffner 			return -1;
18485d424d5aSJohn Heffner 		else
18495d424d5aSJohn Heffner 			return 0;
18505d424d5aSJohn Heffner 	}
18515d424d5aSJohn Heffner 
18525d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
18535d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
18545d424d5aSJohn Heffner 		return -1;
18553ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
18563ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
18575d424d5aSJohn Heffner 
1858fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
18595d424d5aSJohn Heffner 
18605d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
18615d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
18624de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
18635d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
18645d424d5aSJohn Heffner 	nskb->csum = 0;
186584fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
18665d424d5aSJohn Heffner 
186750c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
186850c4817eSIlpo Järvinen 
18695d424d5aSJohn Heffner 	len = 0;
1870234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
18715d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
18725d424d5aSJohn Heffner 		if (nskb->ip_summed)
18735d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
18745d424d5aSJohn Heffner 		else
18755d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1876056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1877056834d9SIlpo Järvinen 							    copy, nskb->csum);
18785d424d5aSJohn Heffner 
18795d424d5aSJohn Heffner 		if (skb->len <= copy) {
18805d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
18815d424d5aSJohn Heffner 			 * Throw it away. */
18824de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1883fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
18843ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
18855d424d5aSJohn Heffner 		} else {
18864de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1887a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
18885d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
18895d424d5aSJohn Heffner 				skb_pull(skb, copy);
189084fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1891056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1892056834d9SIlpo Järvinen 								 skb->len, 0);
18935d424d5aSJohn Heffner 			} else {
18945d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
18955d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
18965d424d5aSJohn Heffner 			}
18975d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
18985d424d5aSJohn Heffner 		}
18995d424d5aSJohn Heffner 
19005d424d5aSJohn Heffner 		len += copy;
1901234b6860SIlpo Järvinen 
1902234b6860SIlpo Järvinen 		if (len >= probe_size)
1903234b6860SIlpo Järvinen 			break;
19045d424d5aSJohn Heffner 	}
19055d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
19065d424d5aSJohn Heffner 
19075d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
19087faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
19097faee5c0SEric Dumazet 	 */
19105d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
19115d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
19125d424d5aSJohn Heffner 		 * effectively two packets. */
19135d424d5aSJohn Heffner 		tp->snd_cwnd--;
191466f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
19155d424d5aSJohn Heffner 
19165d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
19170e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
19180e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
19195d424d5aSJohn Heffner 
19205d424d5aSJohn Heffner 		return 1;
19215d424d5aSJohn Heffner 	}
19225d424d5aSJohn Heffner 
19235d424d5aSJohn Heffner 	return -1;
19245d424d5aSJohn Heffner }
19255d424d5aSJohn Heffner 
19261da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
19271da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
19281da177e4SLinus Torvalds  * window for us.
19291da177e4SLinus Torvalds  *
1930f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1931f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1932f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1933f8269a49SIlpo Järvinen  *
19346ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
19356ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
19366ba8a3b1SNandita Dukkipati 
1937a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
1938a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
19391da177e4SLinus Torvalds  */
1940a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1941d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
19421da177e4SLinus Torvalds {
19431da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
194492df7b51SDavid S. Miller 	struct sk_buff *skb;
1945c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1946c1b4a7e6SDavid S. Miller 	int cwnd_quota;
19475d424d5aSJohn Heffner 	int result;
1948ca8a2263SNeal Cardwell 	bool is_cwnd_limited = false;
19491da177e4SLinus Torvalds 
1950c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
19515d424d5aSJohn Heffner 
1952d5dd9175SIlpo Järvinen 	if (!push_one) {
19535d424d5aSJohn Heffner 		/* Do MTU probing. */
1954d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1955d5dd9175SIlpo Järvinen 		if (!result) {
1956a2a385d6SEric Dumazet 			return false;
19575d424d5aSJohn Heffner 		} else if (result > 0) {
19585d424d5aSJohn Heffner 			sent_pkts = 1;
19595d424d5aSJohn Heffner 		}
1960d5dd9175SIlpo Järvinen 	}
19615d424d5aSJohn Heffner 
1962fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1963c8ac3774SHerbert Xu 		unsigned int limit;
1964c8ac3774SHerbert Xu 
1965b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1966c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1967c1b4a7e6SDavid S. Miller 
19689d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
19697faee5c0SEric Dumazet 			/* "skb_mstamp" is used as a start point for the retransmit timer */
19707faee5c0SEric Dumazet 			skb_mstamp_get(&skb->skb_mstamp);
1971ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
19729d186cacSAndrey Vagin 		}
1973ec342325SAndrew Vagin 
1974b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
19756ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
1976ca8a2263SNeal Cardwell 			is_cwnd_limited = true;
19776ba8a3b1SNandita Dukkipati 			if (push_one == 2)
19786ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
19796ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
19806ba8a3b1SNandita Dukkipati 			else
1981b68e9f85SHerbert Xu 				break;
19826ba8a3b1SNandita Dukkipati 		}
1983b68e9f85SHerbert Xu 
1984b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1985b68e9f85SHerbert Xu 			break;
1986b68e9f85SHerbert Xu 
1987c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1988aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1989aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1990aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1991aa93466bSDavid S. Miller 				break;
1992c1b4a7e6SDavid S. Miller 		} else {
1993ca8a2263SNeal Cardwell 			if (!push_one &&
1994ca8a2263SNeal Cardwell 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
1995aa93466bSDavid S. Miller 				break;
1996c1b4a7e6SDavid S. Miller 		}
1997aa93466bSDavid S. Miller 
1998c9eeec26SEric Dumazet 		/* TCP Small Queues :
1999c9eeec26SEric Dumazet 		 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2000c9eeec26SEric Dumazet 		 * This allows for :
2001c9eeec26SEric Dumazet 		 *  - better RTT estimation and ACK scheduling
2002c9eeec26SEric Dumazet 		 *  - faster recovery
2003c9eeec26SEric Dumazet 		 *  - high rates
200498e09386SEric Dumazet 		 * Alas, some drivers / subsystems require a fair amount
200598e09386SEric Dumazet 		 * of queued bytes to ensure line rate.
200698e09386SEric Dumazet 		 * One example is wifi aggregation (802.11 AMPDU)
200746d3ceabSEric Dumazet 		 */
200898e09386SEric Dumazet 		limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
200998e09386SEric Dumazet 			      sk->sk_pacing_rate >> 10);
2010c9eeec26SEric Dumazet 
2011c9eeec26SEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
201246d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
2013bf06200eSJohn Ogness 			/* It is possible TX completion already happened
2014bf06200eSJohn Ogness 			 * before we set TSQ_THROTTLED, so we must
2015bf06200eSJohn Ogness 			 * test again the condition.
2016bf06200eSJohn Ogness 			 */
20174e857c58SPeter Zijlstra 			smp_mb__after_atomic();
2018bf06200eSJohn Ogness 			if (atomic_read(&sk->sk_wmem_alloc) > limit)
201946d3ceabSEric Dumazet 				break;
202046d3ceabSEric Dumazet 		}
2021c9eeec26SEric Dumazet 
2022c8ac3774SHerbert Xu 		limit = mss_now;
2023f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
20240e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
20251485348dSBen Hutchings 						    min_t(unsigned int,
20261485348dSBen Hutchings 							  cwnd_quota,
2027d4589926SEric Dumazet 							  sk->sk_gso_max_segs),
2028d4589926SEric Dumazet 						    nonagle);
2029c8ac3774SHerbert Xu 
2030c8ac3774SHerbert Xu 		if (skb->len > limit &&
2031c4ead4c5SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
20321da177e4SLinus Torvalds 			break;
20331da177e4SLinus Torvalds 
2034d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
20351da177e4SLinus Torvalds 			break;
20361da177e4SLinus Torvalds 
2037ec342325SAndrew Vagin repair:
20381da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
20391da177e4SLinus Torvalds 		 * This call will increment packets_out.
20401da177e4SLinus Torvalds 		 */
204166f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
20421da177e4SLinus Torvalds 
20431da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2044a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2045d5dd9175SIlpo Järvinen 
2046d5dd9175SIlpo Järvinen 		if (push_one)
2047d5dd9175SIlpo Järvinen 			break;
20481da177e4SLinus Torvalds 	}
20491da177e4SLinus Torvalds 
2050aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2051684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2052684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
20536ba8a3b1SNandita Dukkipati 
20546ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
20556ba8a3b1SNandita Dukkipati 		if (push_one != 2)
20566ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
2057ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2058a2a385d6SEric Dumazet 		return false;
20591da177e4SLinus Torvalds 	}
20606ba8a3b1SNandita Dukkipati 	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
20616ba8a3b1SNandita Dukkipati }
20626ba8a3b1SNandita Dukkipati 
20636ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
20646ba8a3b1SNandita Dukkipati {
20656ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
20666ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
20676ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
2068740b0f18SEric Dumazet 	u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
20696ba8a3b1SNandita Dukkipati 
20706ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
20716ba8a3b1SNandita Dukkipati 		return false;
20726ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
20736ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
20746ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
20756ba8a3b1SNandita Dukkipati 		return false;
20766ba8a3b1SNandita Dukkipati 	}
20776ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
20786ba8a3b1SNandita Dukkipati 	 * finishes.
20796ba8a3b1SNandita Dukkipati 	 */
20806ba8a3b1SNandita Dukkipati 	if (sk->sk_state == TCP_SYN_RECV)
20816ba8a3b1SNandita Dukkipati 		return false;
20826ba8a3b1SNandita Dukkipati 
20836ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
20846ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
20856ba8a3b1SNandita Dukkipati 		return false;
20866ba8a3b1SNandita Dukkipati 
20876ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
20886ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
20896ba8a3b1SNandita Dukkipati 	 */
2090740b0f18SEric Dumazet 	if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
20916ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
20926ba8a3b1SNandita Dukkipati 		return false;
20936ba8a3b1SNandita Dukkipati 
20946ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
20956ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
20966ba8a3b1SNandita Dukkipati 		return false;
20976ba8a3b1SNandita Dukkipati 
20986ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
20996ba8a3b1SNandita Dukkipati 	 * for delayed ack when there's one outstanding packet.
21006ba8a3b1SNandita Dukkipati 	 */
21016ba8a3b1SNandita Dukkipati 	timeout = rtt << 1;
21026ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
21036ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
21046ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
21056ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
21066ba8a3b1SNandita Dukkipati 
21076ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
21086ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
21096ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
21106ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
21116ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
21126ba8a3b1SNandita Dukkipati 		if (delta > 0)
21136ba8a3b1SNandita Dukkipati 			timeout = delta;
21146ba8a3b1SNandita Dukkipati 	}
21156ba8a3b1SNandita Dukkipati 
21166ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
21176ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
21186ba8a3b1SNandita Dukkipati 	return true;
21196ba8a3b1SNandita Dukkipati }
21206ba8a3b1SNandita Dukkipati 
21211f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
21221f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
21231f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
21241f3279aeSEric Dumazet  * Note: This is called from BH context only.
21251f3279aeSEric Dumazet  */
21261f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
21271f3279aeSEric Dumazet 				    const struct sk_buff *skb)
21281f3279aeSEric Dumazet {
2129*39bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
21301f3279aeSEric Dumazet 		NET_INC_STATS_BH(sock_net(sk),
21311f3279aeSEric Dumazet 				 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
21321f3279aeSEric Dumazet 		return true;
21331f3279aeSEric Dumazet 	}
21341f3279aeSEric Dumazet 	return false;
21351f3279aeSEric Dumazet }
21361f3279aeSEric Dumazet 
21376ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else
21386ba8a3b1SNandita Dukkipati  * retransmit the last segment.
21396ba8a3b1SNandita Dukkipati  */
21406ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
21416ba8a3b1SNandita Dukkipati {
21429b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
21436ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
21446ba8a3b1SNandita Dukkipati 	int pcount;
21456ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
21466ba8a3b1SNandita Dukkipati 	int err = -1;
21476ba8a3b1SNandita Dukkipati 
21486ba8a3b1SNandita Dukkipati 	if (tcp_send_head(sk) != NULL) {
21496ba8a3b1SNandita Dukkipati 		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
21506ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21516ba8a3b1SNandita Dukkipati 	}
21526ba8a3b1SNandita Dukkipati 
21539b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
21549b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
21559b717a8dSNandita Dukkipati 		goto rearm_timer;
21569b717a8dSNandita Dukkipati 
21576ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
21586ba8a3b1SNandita Dukkipati 	skb = tcp_write_queue_tail(sk);
21596ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
21606ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21616ba8a3b1SNandita Dukkipati 
21621f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
21631f3279aeSEric Dumazet 		goto rearm_timer;
21641f3279aeSEric Dumazet 
21656ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
21666ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
21676ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21686ba8a3b1SNandita Dukkipati 
21696ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
21706cc55e09SOctavian Purdila 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
21716cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
21726ba8a3b1SNandita Dukkipati 			goto rearm_timer;
21736ba8a3b1SNandita Dukkipati 		skb = tcp_write_queue_tail(sk);
21746ba8a3b1SNandita Dukkipati 	}
21756ba8a3b1SNandita Dukkipati 
21766ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
21776ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21786ba8a3b1SNandita Dukkipati 
21796ba8a3b1SNandita Dukkipati 	err = __tcp_retransmit_skb(sk, skb);
21806ba8a3b1SNandita Dukkipati 
21819b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
21829b717a8dSNandita Dukkipati 	if (likely(!err))
21839b717a8dSNandita Dukkipati 		tp->tlp_high_seq = tp->snd_nxt;
21849b717a8dSNandita Dukkipati 
21856ba8a3b1SNandita Dukkipati rearm_timer:
21866ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
21876ba8a3b1SNandita Dukkipati 				  inet_csk(sk)->icsk_rto,
21886ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
21896ba8a3b1SNandita Dukkipati 
21906ba8a3b1SNandita Dukkipati 	if (likely(!err))
21916ba8a3b1SNandita Dukkipati 		NET_INC_STATS_BH(sock_net(sk),
21926ba8a3b1SNandita Dukkipati 				 LINUX_MIB_TCPLOSSPROBES);
21931da177e4SLinus Torvalds }
21941da177e4SLinus Torvalds 
2195a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2196a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2197a762a980SDavid S. Miller  * The socket must be locked by the caller.
2198a762a980SDavid S. Miller  */
21999e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
22009e412ba7SIlpo Järvinen 			       int nonagle)
2201a762a980SDavid S. Miller {
2202726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2203726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2204726e07a8SIlpo Järvinen 	 * all will be happy.
2205726e07a8SIlpo Järvinen 	 */
2206726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2207726e07a8SIlpo Järvinen 		return;
2208726e07a8SIlpo Järvinen 
220999a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
221099a1dec7SMel Gorman 			   sk_gfp_atomic(sk, GFP_ATOMIC)))
22119e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2212a762a980SDavid S. Miller }
2213a762a980SDavid S. Miller 
2214c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2215c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2216c1b4a7e6SDavid S. Miller  */
2217c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2218c1b4a7e6SDavid S. Miller {
2219fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2220c1b4a7e6SDavid S. Miller 
2221c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2222c1b4a7e6SDavid S. Miller 
2223d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2224c1b4a7e6SDavid S. Miller }
2225c1b4a7e6SDavid S. Miller 
22261da177e4SLinus Torvalds /* This function returns the amount that we can raise the
22271da177e4SLinus Torvalds  * usable window based on the following constraints
22281da177e4SLinus Torvalds  *
22291da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
22301da177e4SLinus Torvalds  * 2. We limit memory per socket
22311da177e4SLinus Torvalds  *
22321da177e4SLinus Torvalds  * RFC 1122:
22331da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
22341da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
22351da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
22361da177e4SLinus Torvalds  *
22371da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
22381da177e4SLinus Torvalds  * it at least MSS bytes.
22391da177e4SLinus Torvalds  *
22401da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
22411da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
22421da177e4SLinus Torvalds  *
22431da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
22441da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
22451da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
22461da177e4SLinus Torvalds  * window to always advance by a single byte.
22471da177e4SLinus Torvalds  *
22481da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
22491da177e4SLinus Torvalds  * then this will not be a problem.
22501da177e4SLinus Torvalds  *
22511da177e4SLinus Torvalds  * BSD seems to make the following compromise:
22521da177e4SLinus Torvalds  *
22531da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
22541da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
22551da177e4SLinus Torvalds  *	then set the window to 0.
22561da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
22571da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
22581da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
22591da177e4SLinus Torvalds  *
22601da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
22611da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
22621da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
22631da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
22641da177e4SLinus Torvalds  * because the pipeline is full.
22651da177e4SLinus Torvalds  *
22661da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
22671da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
22681da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
22691da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
22701da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
22711da177e4SLinus Torvalds  *
22721da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
22731da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
22741da177e4SLinus Torvalds  *
22751da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
22761da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
22771da177e4SLinus Torvalds  */
22781da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
22791da177e4SLinus Torvalds {
2280463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
22811da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2282caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
22831da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
22841da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
22851da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
22861da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
22871da177e4SLinus Torvalds 	 */
2288463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
22891da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
229086c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
229186c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
22921da177e4SLinus Torvalds 	int window;
22931da177e4SLinus Torvalds 
22941da177e4SLinus Torvalds 	if (mss > full_space)
22951da177e4SLinus Torvalds 		mss = full_space;
22961da177e4SLinus Torvalds 
2297b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2298463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
22991da177e4SLinus Torvalds 
2300180d8cd9SGlauber Costa 		if (sk_under_memory_pressure(sk))
2301056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2302056834d9SIlpo Järvinen 					       4U * tp->advmss);
23031da177e4SLinus Torvalds 
230486c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
230586c1a045SFlorian Westphal 		 * increase it due to wscale.
230686c1a045SFlorian Westphal 		 */
230786c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
230886c1a045SFlorian Westphal 
230986c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
231086c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
231186c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
231286c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
231386c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
231486c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
231586c1a045SFlorian Westphal 		 */
231686c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
23171da177e4SLinus Torvalds 			return 0;
23181da177e4SLinus Torvalds 	}
23191da177e4SLinus Torvalds 
23201da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
23211da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
23221da177e4SLinus Torvalds 
23231da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
23241da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
23251da177e4SLinus Torvalds 	 */
23261da177e4SLinus Torvalds 	window = tp->rcv_wnd;
23271da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
23281da177e4SLinus Torvalds 		window = free_space;
23291da177e4SLinus Torvalds 
23301da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
23311da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
23321da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
23331da177e4SLinus Torvalds 		 */
23341da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
23351da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
23361da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
23371da177e4SLinus Torvalds 	} else {
23381da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
23391da177e4SLinus Torvalds 		 * Window clamp already applied above.
23401da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
23411da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
23421da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
23431da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
23441da177e4SLinus Torvalds 		 * is too small.
23451da177e4SLinus Torvalds 		 */
23461da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
23471da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
234884565070SJohn Heffner 		else if (mss == full_space &&
2349b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
235084565070SJohn Heffner 			window = free_space;
23511da177e4SLinus Torvalds 	}
23521da177e4SLinus Torvalds 
23531da177e4SLinus Torvalds 	return window;
23541da177e4SLinus Torvalds }
23551da177e4SLinus Torvalds 
23564a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
23574a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
23581da177e4SLinus Torvalds {
23591da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2360fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2361058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
23621da177e4SLinus Torvalds 
2363058dc334SIlpo Järvinen 	skb_size = skb->len;
2364058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
23651da177e4SLinus Torvalds 
2366058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
23671da177e4SLinus Torvalds 
23686859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2369a6963a6bSIlpo Järvinen 
2370fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
23711da177e4SLinus Torvalds 
2372058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
23731a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
23741da177e4SLinus Torvalds 
237552d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
237652d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
23771da177e4SLinus Torvalds 
237884fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
23791da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
23801da177e4SLinus Torvalds 
23811da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
23821da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
23831da177e4SLinus Torvalds 
2384e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
23854de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
23861da177e4SLinus Torvalds 
23871da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
23881da177e4SLinus Torvalds 	 * packet counting does not break.
23891da177e4SLinus Torvalds 	 */
23904828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2391b7689205SIlpo Järvinen 
2392b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2393ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2394ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2395ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2396b7689205SIlpo Järvinen 
2397797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2398797108d1SIlpo Järvinen 
23993ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
24001da177e4SLinus Torvalds }
24011da177e4SLinus Torvalds 
240267edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2403a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
24044a17fc3aSIlpo Järvinen {
24054a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2406a2a385d6SEric Dumazet 		return false;
24074a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
24084a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2409a2a385d6SEric Dumazet 		return false;
24104a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2411a2a385d6SEric Dumazet 		return false;
24124a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2413a2a385d6SEric Dumazet 		return false;
24144a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
24154a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2416a2a385d6SEric Dumazet 		return false;
24174a17fc3aSIlpo Järvinen 
2418a2a385d6SEric Dumazet 	return true;
24194a17fc3aSIlpo Järvinen }
24204a17fc3aSIlpo Järvinen 
242167edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
242267edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
242367edfef7SAndi Kleen  */
24244a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
24254a17fc3aSIlpo Järvinen 				     int space)
24264a17fc3aSIlpo Järvinen {
24274a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
24284a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2429a2a385d6SEric Dumazet 	bool first = true;
24304a17fc3aSIlpo Järvinen 
24314a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
24324a17fc3aSIlpo Järvinen 		return;
24334de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
24344a17fc3aSIlpo Järvinen 		return;
24354a17fc3aSIlpo Järvinen 
24364a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
24374a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
24384a17fc3aSIlpo Järvinen 			break;
24394a17fc3aSIlpo Järvinen 
24404a17fc3aSIlpo Järvinen 		space -= skb->len;
24414a17fc3aSIlpo Järvinen 
24424a17fc3aSIlpo Järvinen 		if (first) {
2443a2a385d6SEric Dumazet 			first = false;
24444a17fc3aSIlpo Järvinen 			continue;
24454a17fc3aSIlpo Järvinen 		}
24464a17fc3aSIlpo Järvinen 
24474a17fc3aSIlpo Järvinen 		if (space < 0)
24484a17fc3aSIlpo Järvinen 			break;
24494a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
24504a17fc3aSIlpo Järvinen 		 * the data in the second
24514a17fc3aSIlpo Järvinen 		 */
2452a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
24534a17fc3aSIlpo Järvinen 			break;
24544a17fc3aSIlpo Järvinen 
24554a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
24564a17fc3aSIlpo Järvinen 			break;
24574a17fc3aSIlpo Järvinen 
24584a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
24594a17fc3aSIlpo Järvinen 	}
24604a17fc3aSIlpo Järvinen }
24614a17fc3aSIlpo Järvinen 
24621da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
24631da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
24641da177e4SLinus Torvalds  * error occurred which prevented the send.
24651da177e4SLinus Torvalds  */
246693b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
24671da177e4SLinus Torvalds {
24681da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24695d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
24707d227cd2SSridhar Samudrala 	unsigned int cur_mss;
2471c84a5711SYuchung Cheng 	int err;
24721da177e4SLinus Torvalds 
24735d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
24745d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
24755d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
24765d424d5aSJohn Heffner 	}
24775d424d5aSJohn Heffner 
24781da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2479caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
24801da177e4SLinus Torvalds 	 */
24811da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
24821da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
24831da177e4SLinus Torvalds 		return -EAGAIN;
24841da177e4SLinus Torvalds 
24851f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
24861f3279aeSEric Dumazet 		return -EBUSY;
24871f3279aeSEric Dumazet 
24881da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
24891da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
24901da177e4SLinus Torvalds 			BUG();
24911da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
24921da177e4SLinus Torvalds 			return -ENOMEM;
24931da177e4SLinus Torvalds 	}
24941da177e4SLinus Torvalds 
24957d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
24967d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
24977d227cd2SSridhar Samudrala 
24980c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
24997d227cd2SSridhar Samudrala 
25001da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
25011da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
25021da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
25031da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
25041da177e4SLinus Torvalds 	 */
25059d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
25069d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
25071da177e4SLinus Torvalds 		return -EAGAIN;
25081da177e4SLinus Torvalds 
25091da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
25106cc55e09SOctavian Purdila 		if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
25111da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
251202276f3cSIlpo Järvinen 	} else {
25139eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
25149eb9362eSIlpo Järvinen 
25159eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
2516c52e2421SEric Dumazet 			if (skb_unclone(skb, GFP_ATOMIC))
2517c52e2421SEric Dumazet 				return -ENOMEM;
251802276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
25199eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
25209eb9362eSIlpo Järvinen 		}
25211da177e4SLinus Torvalds 	}
25221da177e4SLinus Torvalds 
25231da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
25261da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
25271da177e4SLinus Torvalds 	 */
25281da177e4SLinus Torvalds 
252950bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
253050bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
253150bceae9SThomas Graf 	 * beyond what csum_start can cover.
253250bceae9SThomas Graf 	 */
253350bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
253450bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
2535117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2536117632e6SEric Dumazet 						   GFP_ATOMIC);
2537c84a5711SYuchung Cheng 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2538117632e6SEric Dumazet 			     -ENOBUFS;
2539117632e6SEric Dumazet 	} else {
2540c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2541117632e6SEric Dumazet 	}
2542c84a5711SYuchung Cheng 
2543fc9f3501SEric Dumazet 	if (likely(!err)) {
2544c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2545fc9f3501SEric Dumazet 		/* Update global TCP statistics. */
2546fc9f3501SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2547fc9f3501SEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2548fc9f3501SEric Dumazet 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2549fc9f3501SEric Dumazet 		tp->total_retrans++;
2550fc9f3501SEric Dumazet 	}
2551c84a5711SYuchung Cheng 	return err;
255293b174adSYuchung Cheng }
255393b174adSYuchung Cheng 
255493b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
255593b174adSYuchung Cheng {
255693b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
255793b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
25581da177e4SLinus Torvalds 
25591da177e4SLinus Torvalds 	if (err == 0) {
25601da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
25611da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2562e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
25631da177e4SLinus Torvalds 		}
25641da177e4SLinus Torvalds #endif
2565b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2566b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
25671da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
25681da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
25691da177e4SLinus Torvalds 
25701da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
25711da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
25727faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
25731da177e4SLinus Torvalds 
25741da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
25751da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
25761da177e4SLinus Torvalds 		 */
25771da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
25781f3279aeSEric Dumazet 	} else if (err != -EBUSY) {
257924ab6becSYuchung Cheng 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
25801da177e4SLinus Torvalds 	}
25816e08d5e3SYuchung Cheng 
25826e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
25836e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
25846e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
25851da177e4SLinus Torvalds 	return err;
25861da177e4SLinus Torvalds }
25871da177e4SLinus Torvalds 
258867edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
258967edfef7SAndi Kleen  * window/congestion state.
259067edfef7SAndi Kleen  */
2591a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2592b5afe7bcSIlpo Järvinen {
2593b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2594cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2595b5afe7bcSIlpo Järvinen 
2596b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2597b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2598a2a385d6SEric Dumazet 		return false;
2599b5afe7bcSIlpo Järvinen 
2600b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2601b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2602a2a385d6SEric Dumazet 		return false;
2603b5afe7bcSIlpo Järvinen 
2604b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2605b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2606b5afe7bcSIlpo Järvinen 	 *
2607b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2608b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2609b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2610b5afe7bcSIlpo Järvinen 	 */
2611b5afe7bcSIlpo Järvinen 
2612b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2613a2a385d6SEric Dumazet 		return false;
2614b5afe7bcSIlpo Järvinen 
2615a2a385d6SEric Dumazet 	return true;
2616b5afe7bcSIlpo Järvinen }
2617b5afe7bcSIlpo Järvinen 
26181da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
26191da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
26201da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
26211da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
26221da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
26231da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
26241da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
26251da177e4SLinus Torvalds  */
26261da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
26271da177e4SLinus Torvalds {
26286687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
26291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
26301da177e4SLinus Torvalds 	struct sk_buff *skb;
26310e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2632618d9f25SIlpo Järvinen 	u32 last_lost;
263361eb55f4SIlpo Järvinen 	int mib_idx;
26340e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
26356a438bbeSStephen Hemminger 
263645e77d31SIlpo Järvinen 	if (!tp->packets_out)
263745e77d31SIlpo Järvinen 		return;
263845e77d31SIlpo Järvinen 
263908ebd172SIlpo Järvinen 	if (!tp->lost_out)
264008ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
264108ebd172SIlpo Järvinen 
2642618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
26436a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2644618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2645618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2646618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2647618d9f25SIlpo Järvinen 	} else {
2648fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2649618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2650618d9f25SIlpo Järvinen 	}
26511da177e4SLinus Torvalds 
2652fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
26531da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
26541da177e4SLinus Torvalds 
2655fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2656fe067e8aSDavid S. Miller 			break;
26576a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
26580e1c54c2SIlpo Järvinen 		if (hole == NULL)
26596a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
26606a438bbeSStephen Hemminger 
26611da177e4SLinus Torvalds 		/* Assume this retransmit will generate
26621da177e4SLinus Torvalds 		 * only one packet for congestion window
26631da177e4SLinus Torvalds 		 * calculation purposes.  This works because
26641da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
26651da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
26661da177e4SLinus Torvalds 		 * packet counting works out.
26671da177e4SLinus Torvalds 		 */
26681da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
26691da177e4SLinus Torvalds 			return;
26700e1c54c2SIlpo Järvinen 
26710e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
26720e1c54c2SIlpo Järvinen begin_fwd:
26730e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2674006f582cSIlpo Järvinen 				break;
26750e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
26760e1c54c2SIlpo Järvinen 
26770e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2678618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
26790e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
26800e1c54c2SIlpo Järvinen 				break;
26810e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
26820e1c54c2SIlpo Järvinen 			if (hole != NULL) {
26830e1c54c2SIlpo Järvinen 				skb = hole;
26840e1c54c2SIlpo Järvinen 				hole = NULL;
26850e1c54c2SIlpo Järvinen 			}
26860e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
26870e1c54c2SIlpo Järvinen 			goto begin_fwd;
26880e1c54c2SIlpo Järvinen 
26890e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2690ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
26910e1c54c2SIlpo Järvinen 				hole = skb;
269261eb55f4SIlpo Järvinen 			continue;
26931da177e4SLinus Torvalds 
26940e1c54c2SIlpo Järvinen 		} else {
2695618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
26960e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
26970e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
26980e1c54c2SIlpo Järvinen 			else
26990e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
27000e1c54c2SIlpo Järvinen 		}
27010e1c54c2SIlpo Järvinen 
27020e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
270361eb55f4SIlpo Järvinen 			continue;
270440b215e5SPavel Emelyanov 
270524ab6becSYuchung Cheng 		if (tcp_retransmit_skb(sk, skb))
27061da177e4SLinus Torvalds 			return;
270724ab6becSYuchung Cheng 
2708de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
27091da177e4SLinus Torvalds 
2710684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2711a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2712a262f0cdSNandita Dukkipati 
2713fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2714463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
27153f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
27163f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
27171da177e4SLinus Torvalds 	}
27181da177e4SLinus Torvalds }
27191da177e4SLinus Torvalds 
27201da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
27211da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
27221da177e4SLinus Torvalds  */
27231da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
27241da177e4SLinus Torvalds {
27251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2726fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
27271da177e4SLinus Torvalds 	int mss_now;
27281da177e4SLinus Torvalds 
27291da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
27301da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
27311da177e4SLinus Torvalds 	 * and IP options.
27321da177e4SLinus Torvalds 	 */
27330c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
27341da177e4SLinus Torvalds 
2735fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
27364de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
27371da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
27381da177e4SLinus Torvalds 		tp->write_seq++;
27391da177e4SLinus Torvalds 	} else {
27401da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
27411da177e4SLinus Torvalds 		for (;;) {
2742aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2743aa133076SWu Fengguang 					       sk->sk_allocation);
27441da177e4SLinus Torvalds 			if (skb)
27451da177e4SLinus Torvalds 				break;
27461da177e4SLinus Torvalds 			yield();
27471da177e4SLinus Torvalds 		}
27481da177e4SLinus Torvalds 
27491da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
27501da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
27511da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2752e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2753a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
27541da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
27551da177e4SLinus Torvalds 	}
27569e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
27571da177e4SLinus Torvalds }
27581da177e4SLinus Torvalds 
27591da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
27601da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
27611da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
276265bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
27631da177e4SLinus Torvalds  */
2764dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds 	struct sk_buff *skb;
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
27691da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
27701da177e4SLinus Torvalds 	if (!skb) {
27714e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
27721da177e4SLinus Torvalds 		return;
27731da177e4SLinus Torvalds 	}
27741da177e4SLinus Torvalds 
27751da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
27761da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2777e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2778a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
27791da177e4SLinus Torvalds 	/* Send it off. */
2780dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
27814e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
278226af65cbSSridhar Samudrala 
278381cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
27841da177e4SLinus Torvalds }
27851da177e4SLinus Torvalds 
278667edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
278767edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
27881da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
27891da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
27901da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
27911da177e4SLinus Torvalds  */
27921da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
27931da177e4SLinus Torvalds {
27941da177e4SLinus Torvalds 	struct sk_buff *skb;
27951da177e4SLinus Torvalds 
2796fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
27974de075e0SEric Dumazet 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
279891df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
27991da177e4SLinus Torvalds 		return -EFAULT;
28001da177e4SLinus Torvalds 	}
28014de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
28021da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
28031da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
28041da177e4SLinus Torvalds 			if (nskb == NULL)
28051da177e4SLinus Torvalds 				return -ENOMEM;
2806fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
2807f4a775d1SEric Dumazet 			__skb_header_release(nskb);
2808fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
28093ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
28103ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
28113ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
28121da177e4SLinus Torvalds 			skb = nskb;
28131da177e4SLinus Torvalds 		}
28141da177e4SLinus Torvalds 
28154de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2816735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
28171da177e4SLinus Torvalds 	}
2818dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
28191da177e4SLinus Torvalds }
28201da177e4SLinus Torvalds 
28214aea39c1SEric Dumazet /**
28224aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
28234aea39c1SEric Dumazet  * sk: listener socket
28244aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
28254aea39c1SEric Dumazet  * req: request_sock pointer
28264aea39c1SEric Dumazet  *
28274aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
28284aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
28294aea39c1SEric Dumazet  */
28301da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2831e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
28328336886fSJerry Chu 				struct tcp_fastopen_cookie *foc)
28331da177e4SLinus Torvalds {
2834bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
28352e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
28361da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
28371da177e4SLinus Torvalds 	struct tcphdr *th;
28381da177e4SLinus Torvalds 	struct sk_buff *skb;
2839cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2840bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2841f5fff5dcSTom Quetchenbach 	int mss;
28421da177e4SLinus Torvalds 
2843a0b8486cSEric Dumazet 	skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
28444aea39c1SEric Dumazet 	if (unlikely(!skb)) {
28454aea39c1SEric Dumazet 		dst_release(dst);
28461da177e4SLinus Torvalds 		return NULL;
28474aea39c1SEric Dumazet 	}
28481da177e4SLinus Torvalds 	/* Reserve space for headers. */
28491da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
28501da177e4SLinus Torvalds 
28514aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
2852ca10b9e9SEric Dumazet 	security_skb_owned_by(skb, sk);
28531da177e4SLinus Torvalds 
28540dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
2855f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2856f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2857f5fff5dcSTom Quetchenbach 
285833ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
28598b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
28608b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
28617faee5c0SEric Dumazet 		skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
28628b5f12d0SFlorian Westphal 	else
28638b5f12d0SFlorian Westphal #endif
28647faee5c0SEric Dumazet 	skb_mstamp_get(&skb->skb_mstamp);
28651a2c6181SChristoph Paasch 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
28661a2c6181SChristoph Paasch 					     foc) + sizeof(*th);
286733ad798cSAdam Langley 
2868aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2869aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
28701da177e4SLinus Torvalds 
2871aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
28721da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
28731da177e4SLinus Torvalds 	th->syn = 1;
28741da177e4SLinus Torvalds 	th->ack = 1;
2875735d3831SFlorian Westphal 	tcp_ecn_make_synack(req, th, sk);
2876b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
2877634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
2878e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2879e870a8efSIlpo Järvinen 	 * not even correctly set)
2880e870a8efSIlpo Järvinen 	 */
2881e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2882a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
28834957faadSWilliam Allen Simpson 
28841da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
28858336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
28868336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
28871da177e4SLinus Torvalds 
28881da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2889600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2890bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
28911da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
2892a0b8486cSEric Dumazet 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
2893cfb6eeb4SYOSHIFUJI Hideaki 
2894cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2895cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2896cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2897bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
289849a72dfbSAdam Langley 					       md5, NULL, req, skb);
2899cfb6eeb4SYOSHIFUJI Hideaki 	}
2900cfb6eeb4SYOSHIFUJI Hideaki #endif
2901cfb6eeb4SYOSHIFUJI Hideaki 
29021da177e4SLinus Torvalds 	return skb;
29031da177e4SLinus Torvalds }
29044bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
29051da177e4SLinus Torvalds 
290667edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
2907f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
29081da177e4SLinus Torvalds {
2909cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
29101da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
29111da177e4SLinus Torvalds 	__u8 rcv_wscale;
29121da177e4SLinus Torvalds 
29131da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
29141da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
29151da177e4SLinus Torvalds 	 */
29161da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2917bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
29181da177e4SLinus Torvalds 
2919cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2920cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2921cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2922cfb6eeb4SYOSHIFUJI Hideaki #endif
2923cfb6eeb4SYOSHIFUJI Hideaki 
29241da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
29251da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
29261da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
29271da177e4SLinus Torvalds 	tp->max_window = 0;
29285d424d5aSJohn Heffner 	tcp_mtup_init(sk);
29291da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
29301da177e4SLinus Torvalds 
29311da177e4SLinus Torvalds 	if (!tp->window_clamp)
29321da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
29330dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
2934f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2935f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2936f5fff5dcSTom Quetchenbach 
29371da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
29381da177e4SLinus Torvalds 
2939e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
2940e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2941e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2942e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
2943e88c64f0SHagen Paul Pfeifer 
29441da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
29451da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
29461da177e4SLinus Torvalds 				  &tp->rcv_wnd,
29471da177e4SLinus Torvalds 				  &tp->window_clamp,
2948bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
294931d12926Slaurent chavey 				  &rcv_wscale,
295031d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
29511da177e4SLinus Torvalds 
29521da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
29531da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
29541da177e4SLinus Torvalds 
29551da177e4SLinus Torvalds 	sk->sk_err = 0;
29561da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
29571da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2958ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
29591da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
29601da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
296133f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
2962370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
2963ee995283SPavel Emelyanov 
2964ee995283SPavel Emelyanov 	if (likely(!tp->repair))
29651da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
2966c7781a6eSAndrew Vagin 	else
2967c7781a6eSAndrew Vagin 		tp->rcv_tstamp = tcp_time_stamp;
2968ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
2969ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
29701da177e4SLinus Torvalds 
2971463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2972463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
29731da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
29741da177e4SLinus Torvalds }
29751da177e4SLinus Torvalds 
2976783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2977783237e8SYuchung Cheng {
2978783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2979783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2980783237e8SYuchung Cheng 
2981783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
2982f4a775d1SEric Dumazet 	__skb_header_release(skb);
2983783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
2984783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
2985783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
2986783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
2987783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
2988783237e8SYuchung Cheng }
2989783237e8SYuchung Cheng 
2990783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
2991783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
2992783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2993783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
2994783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
2995783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
2996783237e8SYuchung Cheng  */
2997783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2998783237e8SYuchung Cheng {
2999783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3000783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3001aab48743SYuchung Cheng 	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
3002783237e8SYuchung Cheng 	struct sk_buff *syn_data = NULL, *data;
3003aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
3004783237e8SYuchung Cheng 
300567da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3006aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
3007aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
3008aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
3009aab48743SYuchung Cheng 	if (syn_loss > 1 &&
3010aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
3011aab48743SYuchung Cheng 		fo->cookie.len = -1;
3012aab48743SYuchung Cheng 		goto fallback;
3013aab48743SYuchung Cheng 	}
3014aab48743SYuchung Cheng 
301567da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
301667da22d2SYuchung Cheng 		fo->cookie.len = -1;
301767da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
3018783237e8SYuchung Cheng 		goto fallback;
3019783237e8SYuchung Cheng 
3020783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3021783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3022783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3023783237e8SYuchung Cheng 	 */
3024783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
3025783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
30261b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3027783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3028783237e8SYuchung Cheng 
3029f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3030f5ddcbbbSEric Dumazet 
3031f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3032f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3033f5ddcbbbSEric Dumazet 
3034f5ddcbbbSEric Dumazet 	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
3035783237e8SYuchung Cheng 				   sk->sk_allocation);
3036783237e8SYuchung Cheng 	if (syn_data == NULL)
3037783237e8SYuchung Cheng 		goto fallback;
3038783237e8SYuchung Cheng 
3039783237e8SYuchung Cheng 	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
3040783237e8SYuchung Cheng 		struct iovec *iov = &fo->data->msg_iov[i];
3041783237e8SYuchung Cheng 		unsigned char __user *from = iov->iov_base;
3042783237e8SYuchung Cheng 		int len = iov->iov_len;
3043783237e8SYuchung Cheng 
3044783237e8SYuchung Cheng 		if (syn_data->len + len > space)
3045783237e8SYuchung Cheng 			len = space - syn_data->len;
3046783237e8SYuchung Cheng 		else if (i + 1 == iovlen)
3047783237e8SYuchung Cheng 			/* No more data pending in inet_wait_for_connect() */
3048783237e8SYuchung Cheng 			fo->data = NULL;
3049783237e8SYuchung Cheng 
3050783237e8SYuchung Cheng 		if (skb_add_data(syn_data, from, len))
3051783237e8SYuchung Cheng 			goto fallback;
3052783237e8SYuchung Cheng 	}
3053783237e8SYuchung Cheng 
3054783237e8SYuchung Cheng 	/* Queue a data-only packet after the regular SYN for retransmission */
3055783237e8SYuchung Cheng 	data = pskb_copy(syn_data, sk->sk_allocation);
3056783237e8SYuchung Cheng 	if (data == NULL)
3057783237e8SYuchung Cheng 		goto fallback;
3058783237e8SYuchung Cheng 	TCP_SKB_CB(data)->seq++;
3059783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
3060783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
3061783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, data);
3062783237e8SYuchung Cheng 	fo->copied = data->len;
3063783237e8SYuchung Cheng 
3064431a9124SEric Dumazet 	/* syn_data is about to be sent, we need to take current time stamps
3065431a9124SEric Dumazet 	 * for the packets that are in write queue : SYN packet and DATA
3066431a9124SEric Dumazet 	 */
3067431a9124SEric Dumazet 	skb_mstamp_get(&syn->skb_mstamp);
3068431a9124SEric Dumazet 	data->skb_mstamp = syn->skb_mstamp;
3069431a9124SEric Dumazet 
3070783237e8SYuchung Cheng 	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
307167da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
3072f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3073783237e8SYuchung Cheng 		goto done;
3074783237e8SYuchung Cheng 	}
3075783237e8SYuchung Cheng 	syn_data = NULL;
3076783237e8SYuchung Cheng 
3077783237e8SYuchung Cheng fallback:
3078783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3079783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3080783237e8SYuchung Cheng 		fo->cookie.len = 0;
3081783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3082783237e8SYuchung Cheng 	if (err)
3083783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3084783237e8SYuchung Cheng 	kfree_skb(syn_data);
3085783237e8SYuchung Cheng done:
3086783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3087783237e8SYuchung Cheng 	return err;
3088783237e8SYuchung Cheng }
3089783237e8SYuchung Cheng 
309067edfef7SAndi Kleen /* Build a SYN and send it off. */
30911da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
30921da177e4SLinus Torvalds {
30931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30941da177e4SLinus Torvalds 	struct sk_buff *buff;
3095ee586811SEric Paris 	int err;
30961da177e4SLinus Torvalds 
30971da177e4SLinus Torvalds 	tcp_connect_init(sk);
30981da177e4SLinus Torvalds 
30992b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
31002b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
31012b916477SAndrey Vagin 		return 0;
31022b916477SAndrey Vagin 	}
31032b916477SAndrey Vagin 
3104d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
31051da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
31061da177e4SLinus Torvalds 		return -ENOBUFS;
31071da177e4SLinus Torvalds 
31081da177e4SLinus Torvalds 	/* Reserve space for headers. */
31091da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
31101da177e4SLinus Torvalds 
3111a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
31127faee5c0SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp;
3113783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3114735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
31151da177e4SLinus Torvalds 
3116783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3117783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3118783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3119ee586811SEric Paris 	if (err == -ECONNREFUSED)
3120ee586811SEric Paris 		return err;
3121bd37a088SWei Yongjun 
3122bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3123bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3124bd37a088SWei Yongjun 	 */
3125bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3126bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
312781cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
31281da177e4SLinus Torvalds 
31291da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
31303f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
31313f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
31321da177e4SLinus Torvalds 	return 0;
31331da177e4SLinus Torvalds }
31344bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
31351da177e4SLinus Torvalds 
31361da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
31371da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
31381da177e4SLinus Torvalds  * for details.
31391da177e4SLinus Torvalds  */
31401da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
31411da177e4SLinus Torvalds {
3142463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3143463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
31441da177e4SLinus Torvalds 	unsigned long timeout;
31451da177e4SLinus Torvalds 
31469890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
31479890092eSFlorian Westphal 
31481da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3149463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
31501da177e4SLinus Torvalds 		int max_ato = HZ / 2;
31511da177e4SLinus Torvalds 
3152056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3153056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
31541da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
31551da177e4SLinus Torvalds 
31561da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
31571da177e4SLinus Torvalds 
31581da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3159463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
31601da177e4SLinus Torvalds 		 * directly.
31611da177e4SLinus Torvalds 		 */
3162740b0f18SEric Dumazet 		if (tp->srtt_us) {
3163740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3164740b0f18SEric Dumazet 					TCP_DELACK_MIN);
31651da177e4SLinus Torvalds 
31661da177e4SLinus Torvalds 			if (rtt < max_ato)
31671da177e4SLinus Torvalds 				max_ato = rtt;
31681da177e4SLinus Torvalds 		}
31691da177e4SLinus Torvalds 
31701da177e4SLinus Torvalds 		ato = min(ato, max_ato);
31711da177e4SLinus Torvalds 	}
31721da177e4SLinus Torvalds 
31731da177e4SLinus Torvalds 	/* Stay within the limit we were given */
31741da177e4SLinus Torvalds 	timeout = jiffies + ato;
31751da177e4SLinus Torvalds 
31761da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3177463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
31781da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
31791da177e4SLinus Torvalds 		 * send ACK now.
31801da177e4SLinus Torvalds 		 */
3181463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3182463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
31831da177e4SLinus Torvalds 			tcp_send_ack(sk);
31841da177e4SLinus Torvalds 			return;
31851da177e4SLinus Torvalds 		}
31861da177e4SLinus Torvalds 
3187463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3188463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
31891da177e4SLinus Torvalds 	}
3190463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3191463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3192463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
31931da177e4SLinus Torvalds }
31941da177e4SLinus Torvalds 
31951da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
31961da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
31971da177e4SLinus Torvalds {
31981da177e4SLinus Torvalds 	struct sk_buff *buff;
31991da177e4SLinus Torvalds 
3200058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3201058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3202058dc334SIlpo Järvinen 		return;
3203058dc334SIlpo Järvinen 
32049890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
32059890092eSFlorian Westphal 
32061da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
32071da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
32081da177e4SLinus Torvalds 	 * sock.
32091da177e4SLinus Torvalds 	 */
321099a1dec7SMel Gorman 	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
32111da177e4SLinus Torvalds 	if (buff == NULL) {
3212463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3213463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
32143f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
32153f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
32161da177e4SLinus Torvalds 		return;
32171da177e4SLinus Torvalds 	}
32181da177e4SLinus Torvalds 
32191da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
32201da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3221a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
32221da177e4SLinus Torvalds 
32231da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
32247faee5c0SEric Dumazet 	skb_mstamp_get(&buff->skb_mstamp);
322599a1dec7SMel Gorman 	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
32261da177e4SLinus Torvalds }
3227e3118e83SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_send_ack);
32281da177e4SLinus Torvalds 
32291da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
32301da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
32311da177e4SLinus Torvalds  *
32321da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
32331da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
32341da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
32351da177e4SLinus Torvalds  *
32361da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
32371da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
32381da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
32391da177e4SLinus Torvalds  */
32401da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
32411da177e4SLinus Torvalds {
32421da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32431da177e4SLinus Torvalds 	struct sk_buff *skb;
32441da177e4SLinus Torvalds 
32451da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
324699a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
32471da177e4SLinus Torvalds 	if (skb == NULL)
32481da177e4SLinus Torvalds 		return -1;
32491da177e4SLinus Torvalds 
32501da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
32511da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32521da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
32531da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
32541da177e4SLinus Torvalds 	 * send it.
32551da177e4SLinus Torvalds 	 */
3256a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
32577faee5c0SEric Dumazet 	skb_mstamp_get(&skb->skb_mstamp);
3258dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
32591da177e4SLinus Torvalds }
32601da177e4SLinus Torvalds 
3261ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3262ee995283SPavel Emelyanov {
3263ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3264ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3265ee995283SPavel Emelyanov 		tcp_xmit_probe_skb(sk, 0);
3266ee995283SPavel Emelyanov 	}
3267ee995283SPavel Emelyanov }
3268ee995283SPavel Emelyanov 
326967edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
32701da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
32711da177e4SLinus Torvalds {
32721da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32731da177e4SLinus Torvalds 	struct sk_buff *skb;
32741da177e4SLinus Torvalds 
3275058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3276058dc334SIlpo Järvinen 		return -1;
3277058dc334SIlpo Järvinen 
3278fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
327990840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
32801da177e4SLinus Torvalds 		int err;
32810c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
328290840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
32831da177e4SLinus Torvalds 
32841da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
32851da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
32861da177e4SLinus Torvalds 
32871da177e4SLinus Torvalds 		/* We are probing the opening of a window
32881da177e4SLinus Torvalds 		 * but the window size is != 0
32891da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
32901da177e4SLinus Torvalds 		 */
32911da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
32921da177e4SLinus Torvalds 		    skb->len > mss) {
32931da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
32944de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
32956cc55e09SOctavian Purdila 			if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
32961da177e4SLinus Torvalds 				return -1;
32971da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
3298846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
32991da177e4SLinus Torvalds 
33004de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3301dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
330266f5fe62SIlpo Järvinen 		if (!err)
330366f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
33041da177e4SLinus Torvalds 		return err;
33051da177e4SLinus Torvalds 	} else {
330633f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
33074828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
33081da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
33091da177e4SLinus Torvalds 	}
33101da177e4SLinus Torvalds }
33111da177e4SLinus Torvalds 
33121da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
33131da177e4SLinus Torvalds  * a partial packet else a zero probe.
33141da177e4SLinus Torvalds  */
33151da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
33161da177e4SLinus Torvalds {
3317463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
33181da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3319fcdd1cf4SEric Dumazet 	unsigned long probe_max;
33201da177e4SLinus Torvalds 	int err;
33211da177e4SLinus Torvalds 
33221da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
33231da177e4SLinus Torvalds 
3324fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
33251da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
33266687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3327463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
33281da177e4SLinus Torvalds 		return;
33291da177e4SLinus Torvalds 	}
33301da177e4SLinus Torvalds 
33311da177e4SLinus Torvalds 	if (err <= 0) {
3332463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3333463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
33346687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3335fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
33361da177e4SLinus Torvalds 	} else {
33371da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
33386687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
33391da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
33401da177e4SLinus Torvalds 		 *
33411da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
33421da177e4SLinus Torvalds 		 */
33436687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
33446687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3345fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
33461da177e4SLinus Torvalds 	}
3347fcdd1cf4SEric Dumazet 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3348fcdd1cf4SEric Dumazet 				  inet_csk_rto_backoff(icsk, probe_max),
3349fcdd1cf4SEric Dumazet 				  TCP_RTO_MAX);
33501da177e4SLinus Torvalds }
33515db92c99SOctavian Purdila 
33525db92c99SOctavian Purdila int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
33535db92c99SOctavian Purdila {
33545db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
33555db92c99SOctavian Purdila 	struct flowi fl;
33565db92c99SOctavian Purdila 	int res;
33575db92c99SOctavian Purdila 
33585db92c99SOctavian Purdila 	res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
33595db92c99SOctavian Purdila 	if (!res) {
33605db92c99SOctavian Purdila 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
33615db92c99SOctavian Purdila 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
33625db92c99SOctavian Purdila 	}
33635db92c99SOctavian Purdila 	return res;
33645db92c99SOctavian Purdila }
33655db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3366