xref: /linux/net/ipv4/tcp_output.c (revision b3d051477cf94e9d71d6acadb8a90de15237b9c1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
53c39c4c6aSWei Liu /* Default TSQ limit of four TSO segments */
54c39c4c6aSWei Liu int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
6235089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
63ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6435089bb2SDavid S. Miller 
6546d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6646d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
67519855c5SWilliam Allen Simpson 
6867edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
69cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
706ff03ac3SIlpo Järvinen {
716ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
726ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7366f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
749e412ba7SIlpo Järvinen 
75fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
761da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
778512430eSIlpo Järvinen 
7866f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
796ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
806a5dc9e5SEric Dumazet 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
81750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
821da177e4SLinus Torvalds 	}
83f19c29e3SYuchung Cheng 
84f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
85f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
866a5dc9e5SEric Dumazet }
871da177e4SLinus Torvalds 
881da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
891da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
901da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
911da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
921da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
931da177e4SLinus Torvalds  */
94cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
951da177e4SLinus Torvalds {
96cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
979e412ba7SIlpo Järvinen 
9890840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
991da177e4SLinus Torvalds 		return tp->snd_nxt;
1001da177e4SLinus Torvalds 	else
10190840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1021da177e4SLinus Torvalds }
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1051da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1061da177e4SLinus Torvalds  *
1071da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1081da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1091da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1101da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1111da177e4SLinus Torvalds  *    large MSS.
1121da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1131da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1141da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1151da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1161da177e4SLinus Torvalds  *    probably even Jumbo".
1171da177e4SLinus Torvalds  */
1181da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1191da177e4SLinus Torvalds {
1201da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
121cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1221da177e4SLinus Torvalds 	int mss = tp->advmss;
1231da177e4SLinus Torvalds 
1240dbaee3bSDavid S. Miller 	if (dst) {
1250dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1260dbaee3bSDavid S. Miller 
1270dbaee3bSDavid S. Miller 		if (metric < mss) {
1280dbaee3bSDavid S. Miller 			mss = metric;
1291da177e4SLinus Torvalds 			tp->advmss = mss;
1301da177e4SLinus Torvalds 		}
1310dbaee3bSDavid S. Miller 	}
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds 	return (__u16)mss;
1341da177e4SLinus Torvalds }
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1376f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1386f021c62SEric Dumazet  */
1396f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1401da177e4SLinus Torvalds {
141463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1426f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1431da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1441da177e4SLinus Torvalds 
1456687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1461da177e4SLinus Torvalds 
1476687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1481da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1491da177e4SLinus Torvalds 
150463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1511da177e4SLinus Torvalds 		cwnd >>= 1;
1521da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1531da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1541da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds 
15767edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15840efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
159cf533ea5SEric Dumazet 				struct sock *sk)
1601da177e4SLinus Torvalds {
161463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
162463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1631da177e4SLinus Torvalds 
16405c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16505c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16605c5a46dSNeal Cardwell 
1671da177e4SLinus Torvalds 	tp->lsndtime = now;
1681da177e4SLinus Torvalds 
1691da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1701da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1711da177e4SLinus Torvalds 	 */
1722251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
173463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds 
17667edfef7SAndi Kleen /* Account for an ACK we sent. */
17740efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1781da177e4SLinus Torvalds {
179463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
180463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1811da177e4SLinus Torvalds }
1821da177e4SLinus Torvalds 
18385f16525SYuchung Cheng 
18485f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
18585f16525SYuchung Cheng {
18685f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1879ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
18885f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
18985f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19085f16525SYuchung Cheng 	 */
19185f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
19285f16525SYuchung Cheng 
19385f16525SYuchung Cheng 	if (mss > 1460)
19485f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
19585f16525SYuchung Cheng 	return init_rwnd;
19685f16525SYuchung Cheng }
19785f16525SYuchung Cheng 
1981da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1991da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2001da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2011da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2021da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2031da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2041da177e4SLinus Torvalds  */
2051da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2061da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20731d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20831d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2091da177e4SLinus Torvalds {
2101da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2131da177e4SLinus Torvalds 	if (*window_clamp == 0)
2141da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2151da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2181da177e4SLinus Torvalds 	if (space > mss)
2191da177e4SLinus Torvalds 		space = (space / mss) * mss;
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22215d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22315d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22415d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22515d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22615d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22715d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2281da177e4SLinus Torvalds 	 */
22915d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2301da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23115d99e02SRick Jones 	else
23215d99e02SRick Jones 		(*rcv_wnd) = space;
23315d99e02SRick Jones 
2341da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2351da177e4SLinus Torvalds 	if (wscale_ok) {
2361da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2371da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2381da177e4SLinus Torvalds 		 */
2391da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
240316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2411da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2421da177e4SLinus Torvalds 			space >>= 1;
2431da177e4SLinus Torvalds 			(*rcv_wscale)++;
2441da177e4SLinus Torvalds 		}
2451da177e4SLinus Torvalds 	}
2461da177e4SLinus Torvalds 
2471da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
24885f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
24985f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
250b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2511da177e4SLinus Torvalds 	}
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2541da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2551da177e4SLinus Torvalds }
2564bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2571da177e4SLinus Torvalds 
2581da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2591da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2601da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2611da177e4SLinus Torvalds  * frame.
2621da177e4SLinus Torvalds  */
26340efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2641da177e4SLinus Torvalds {
2651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2668e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2671da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2681da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds 	/* Never shrink the offered window */
2711da177e4SLinus Torvalds 	if (new_win < cur_win) {
2721da177e4SLinus Torvalds 		/* Danger Will Robinson!
2731da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2741da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2751da177e4SLinus Torvalds 		 * window in time.  --DaveM
2761da177e4SLinus Torvalds 		 *
2771da177e4SLinus Torvalds 		 * Relax Will Robinson.
2781da177e4SLinus Torvalds 		 */
2798e165e20SFlorian Westphal 		if (new_win == 0)
2808e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2818e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
282607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2831da177e4SLinus Torvalds 	}
2841da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2851da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2881da177e4SLinus Torvalds 	 * scaled window.
2891da177e4SLinus Torvalds 	 */
29015d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2911da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2921da177e4SLinus Torvalds 	else
2931da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2961da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2998e165e20SFlorian Westphal 	if (new_win == 0) {
3001da177e4SLinus Torvalds 		tp->pred_flags = 0;
3018e165e20SFlorian Westphal 		if (old_win)
3028e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3038e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3048e165e20SFlorian Westphal 	} else if (old_win == 0) {
3058e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3068e165e20SFlorian Westphal 	}
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	return new_win;
3091da177e4SLinus Torvalds }
3101da177e4SLinus Torvalds 
31167edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
312735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
313bdf1ee5dSIlpo Järvinen {
31430e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31530e502a3SDaniel Borkmann 
3164de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
317bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3184de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31930e502a3SDaniel Borkmann 	else if (tcp_ca_needs_ecn(sk))
32030e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
321bdf1ee5dSIlpo Järvinen }
322bdf1ee5dSIlpo Järvinen 
32367edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
324735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
325bdf1ee5dSIlpo Järvinen {
326bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
327f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
328f7b3bec6SFlorian Westphal 		       tcp_ca_needs_ecn(sk);
329f7b3bec6SFlorian Westphal 
330f7b3bec6SFlorian Westphal 	if (!use_ecn) {
331f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
332f7b3bec6SFlorian Westphal 
333f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
334f7b3bec6SFlorian Westphal 			use_ecn = true;
335f7b3bec6SFlorian Westphal 	}
336bdf1ee5dSIlpo Järvinen 
337bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
338f7b3bec6SFlorian Westphal 
339f7b3bec6SFlorian Westphal 	if (use_ecn) {
3404de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
341bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34230e502a3SDaniel Borkmann 		if (tcp_ca_needs_ecn(sk))
34330e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
344bdf1ee5dSIlpo Järvinen 	}
345bdf1ee5dSIlpo Järvinen }
346bdf1ee5dSIlpo Järvinen 
34749213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34849213555SDaniel Borkmann {
34949213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
35049213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
35149213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35249213555SDaniel Borkmann 		 */
35349213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
35449213555SDaniel Borkmann }
35549213555SDaniel Borkmann 
356735d3831SFlorian Westphal static void
3576ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
358bdf1ee5dSIlpo Järvinen {
3596ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
360bdf1ee5dSIlpo Järvinen 		th->ece = 1;
361bdf1ee5dSIlpo Järvinen }
362bdf1ee5dSIlpo Järvinen 
36367edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
36467edfef7SAndi Kleen  * be sent.
36567edfef7SAndi Kleen  */
366735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
367bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
368bdf1ee5dSIlpo Järvinen {
369bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
370bdf1ee5dSIlpo Järvinen 
371bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
372bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
373bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
374bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
375bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
376bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
377bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
378bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
379bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
380bdf1ee5dSIlpo Järvinen 			}
38130e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
382bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
383bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
384bdf1ee5dSIlpo Järvinen 		}
385bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
386bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
387bdf1ee5dSIlpo Järvinen 	}
388bdf1ee5dSIlpo Järvinen }
389bdf1ee5dSIlpo Järvinen 
390e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
391e870a8efSIlpo Järvinen  * auto increment end seqno.
392e870a8efSIlpo Järvinen  */
393e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
394e870a8efSIlpo Järvinen {
3952e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
396e870a8efSIlpo Järvinen 	skb->csum = 0;
397e870a8efSIlpo Järvinen 
3984de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
399e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
400e870a8efSIlpo Järvinen 
401cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
402e870a8efSIlpo Järvinen 
403e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
404a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
405e870a8efSIlpo Järvinen 		seq++;
406e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
407e870a8efSIlpo Järvinen }
408e870a8efSIlpo Järvinen 
409a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41033f5f57eSIlpo Järvinen {
41133f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41233f5f57eSIlpo Järvinen }
41333f5f57eSIlpo Järvinen 
41433ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
41533ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41633ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41789e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4182100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41933ad798cSAdam Langley 
42033ad798cSAdam Langley struct tcp_out_options {
4212100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4222100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
42333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
42433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
425bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
426bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4272100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4282100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
42933ad798cSAdam Langley };
43033ad798cSAdam Langley 
43167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
43267edfef7SAndi Kleen  *
43367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
434fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
435fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4368e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
437fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
438fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
439fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
440fd6149d3SIlpo Järvinen  *
441fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
442fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
443fd6149d3SIlpo Järvinen  */
44433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
445bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
446bd0388aeSWilliam Allen Simpson {
4472100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
448bd0388aeSWilliam Allen Simpson 
449bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4501a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4511a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
452bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
453bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
45433ad798cSAdam Langley 		ptr += 4;
45533ad798cSAdam Langley 	}
45633ad798cSAdam Langley 
457fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
458fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
459fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
460fd6149d3SIlpo Järvinen 			       opts->mss);
461fd6149d3SIlpo Järvinen 	}
462fd6149d3SIlpo Järvinen 
463bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
464bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
46533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
46633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
46733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
46833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
469bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
47033ad798cSAdam Langley 		} else {
471496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
47240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
47340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
47440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
47540efc6faSStephen Hemminger 		}
47633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
47733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
47833ad798cSAdam Langley 	}
47933ad798cSAdam Langley 
480bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
48133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
48333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
48433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
48533ad798cSAdam Langley 	}
48633ad798cSAdam Langley 
487bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
48833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
49033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
49133ad798cSAdam Langley 			       opts->ws);
49233ad798cSAdam Langley 	}
49333ad798cSAdam Langley 
49433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
49533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
49633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
49740efc6faSStephen Hemminger 		int this_sack;
49840efc6faSStephen Hemminger 
49940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
50040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
50140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
50233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
50340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5042de979bdSStephen Hemminger 
50533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
50633ad798cSAdam Langley 		     ++this_sack) {
50740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
50840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
50940efc6faSStephen Hemminger 		}
5102de979bdSStephen Hemminger 
51140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
51240efc6faSStephen Hemminger 	}
5132100c8d2SYuchung Cheng 
5142100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5152100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5167f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5177f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5182100c8d2SYuchung Cheng 
5197f9b838bSDaniel Lee 		if (foc->exp) {
5207f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5217f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5222100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5237f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5247f9b838bSDaniel Lee 		} else {
5257f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5267f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5277f9b838bSDaniel Lee 			*p++ = len;
5282100c8d2SYuchung Cheng 		}
5297f9b838bSDaniel Lee 
5307f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5317f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5327f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5337f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5347f9b838bSDaniel Lee 		}
5357f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5362100c8d2SYuchung Cheng 	}
53740efc6faSStephen Hemminger }
53840efc6faSStephen Hemminger 
53967edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
54067edfef7SAndi Kleen  * network wire format yet.
54167edfef7SAndi Kleen  */
54295c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
54333ad798cSAdam Langley 				struct tcp_out_options *opts,
544cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
545cf533ea5SEric Dumazet {
54633ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
54795c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
548783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
54933ad798cSAdam Langley 
550cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
55133ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
55233ad798cSAdam Langley 	if (*md5) {
55333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
554bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
555cfb6eeb4SYOSHIFUJI Hideaki 	}
55633ad798cSAdam Langley #else
55733ad798cSAdam Langley 	*md5 = NULL;
558cfb6eeb4SYOSHIFUJI Hideaki #endif
55933ad798cSAdam Langley 
56033ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
56133ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
56233ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
56333ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
56433ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
56533ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
56633ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
56733ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
56833ad798cSAdam Langley 	 * going out.  */
56933ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
570bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
57133ad798cSAdam Langley 
57251456b29SIan Morris 	if (likely(sysctl_tcp_timestamps && !*md5)) {
57333ad798cSAdam Langley 		opts->options |= OPTION_TS;
5747faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
57533ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
576bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
57733ad798cSAdam Langley 	}
578bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
57933ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
58089e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
581bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
58233ad798cSAdam Langley 	}
583bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
58433ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
585b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
586bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
58733ad798cSAdam Langley 	}
58833ad798cSAdam Langley 
589783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
5902646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
5912646c831SDaniel Lee 
5922646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
5932646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
594783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
595783237e8SYuchung Cheng 		if (remaining >= need) {
596783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
597783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
598783237e8SYuchung Cheng 			remaining -= need;
599783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6002646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
601783237e8SYuchung Cheng 		}
602783237e8SYuchung Cheng 	}
603bd0388aeSWilliam Allen Simpson 
604bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
60533ad798cSAdam Langley }
60633ad798cSAdam Langley 
60767edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
60837bfbddaSEric Dumazet static unsigned int tcp_synack_options(struct request_sock *req,
60995c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
61033ad798cSAdam Langley 				       struct tcp_out_options *opts,
61180f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6128336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6134957faadSWilliam Allen Simpson {
61433ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
61595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
61633ad798cSAdam Langley 
61733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
61880f03e27SEric Dumazet 	if (md5) {
61933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6204957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6214957faadSWilliam Allen Simpson 
6224957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6234957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6244957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6254957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6264957faadSWilliam Allen Simpson 		 */
627de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
62833ad798cSAdam Langley 	}
62933ad798cSAdam Langley #endif
63033ad798cSAdam Langley 
6314957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
63233ad798cSAdam Langley 	opts->mss = mss;
6334957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
63433ad798cSAdam Langley 
63533ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
63633ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
63789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6384957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
63933ad798cSAdam Langley 	}
640de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
64133ad798cSAdam Langley 		opts->options |= OPTION_TS;
6427faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb);
64333ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6444957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
64533ad798cSAdam Langley 	}
64633ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
64733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
648de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6494957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
65033ad798cSAdam Langley 	}
6517f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
6527f9b838bSDaniel Lee 		u32 need = foc->len;
6537f9b838bSDaniel Lee 
6547f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6557f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
6568336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6578336886fSJerry Chu 		if (remaining >= need) {
6588336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6598336886fSJerry Chu 			opts->fastopen_cookie = foc;
6608336886fSJerry Chu 			remaining -= need;
6618336886fSJerry Chu 		}
6628336886fSJerry Chu 	}
6634957faadSWilliam Allen Simpson 
6644957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
66533ad798cSAdam Langley }
66633ad798cSAdam Langley 
66767edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
66867edfef7SAndi Kleen  * final wire format yet.
66967edfef7SAndi Kleen  */
67095c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
67133ad798cSAdam Langley 					struct tcp_out_options *opts,
672cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
673cf533ea5SEric Dumazet {
67433ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
67595c96174SEric Dumazet 	unsigned int size = 0;
676cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
67733ad798cSAdam Langley 
6785843ef42SAndi Kleen 	opts->options = 0;
6795843ef42SAndi Kleen 
68033ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
68133ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
68233ad798cSAdam Langley 	if (unlikely(*md5)) {
68333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
68433ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
68533ad798cSAdam Langley 	}
68633ad798cSAdam Langley #else
68733ad798cSAdam Langley 	*md5 = NULL;
68833ad798cSAdam Langley #endif
68933ad798cSAdam Langley 
69033ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
69133ad798cSAdam Langley 		opts->options |= OPTION_TS;
6927faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
69333ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
69433ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
69533ad798cSAdam Langley 	}
69633ad798cSAdam Langley 
697cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
698cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
69995c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
70033ad798cSAdam Langley 		opts->num_sack_blocks =
70195c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
70233ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
70333ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
70433ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
70533ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
70633ad798cSAdam Langley 	}
70733ad798cSAdam Langley 
70833ad798cSAdam Langley 	return size;
70940efc6faSStephen Hemminger }
7101da177e4SLinus Torvalds 
71146d3ceabSEric Dumazet 
71246d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
71346d3ceabSEric Dumazet  *
71446d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
71546d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
71646d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
71746d3ceabSEric Dumazet  *
71846d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
71946d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7208e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
72146d3ceabSEric Dumazet  *
72246d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
72346d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
72446d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
72546d3ceabSEric Dumazet  */
72646d3ceabSEric Dumazet struct tsq_tasklet {
72746d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
72846d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
72946d3ceabSEric Dumazet };
73046d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
73146d3ceabSEric Dumazet 
7326f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
7336f458dfbSEric Dumazet {
7346f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7356f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
7366f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
737bf06200eSJohn Ogness 		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
738bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7396f458dfbSEric Dumazet }
74046d3ceabSEric Dumazet /*
7418e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
74246d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7438e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
74446d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
74546d3ceabSEric Dumazet  */
74646d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
74746d3ceabSEric Dumazet {
74846d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
74946d3ceabSEric Dumazet 	LIST_HEAD(list);
75046d3ceabSEric Dumazet 	unsigned long flags;
75146d3ceabSEric Dumazet 	struct list_head *q, *n;
75246d3ceabSEric Dumazet 	struct tcp_sock *tp;
75346d3ceabSEric Dumazet 	struct sock *sk;
75446d3ceabSEric Dumazet 
75546d3ceabSEric Dumazet 	local_irq_save(flags);
75646d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
75746d3ceabSEric Dumazet 	local_irq_restore(flags);
75846d3ceabSEric Dumazet 
75946d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
76046d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
76146d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
76246d3ceabSEric Dumazet 
76346d3ceabSEric Dumazet 		sk = (struct sock *)tp;
76446d3ceabSEric Dumazet 		bh_lock_sock(sk);
76546d3ceabSEric Dumazet 
76646d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
7676f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
76846d3ceabSEric Dumazet 		} else {
76946d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
7706f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
77146d3ceabSEric Dumazet 		}
77246d3ceabSEric Dumazet 		bh_unlock_sock(sk);
77346d3ceabSEric Dumazet 
77446d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
77546d3ceabSEric Dumazet 		sk_free(sk);
77646d3ceabSEric Dumazet 	}
77746d3ceabSEric Dumazet }
77846d3ceabSEric Dumazet 
7796f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
7806f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
781563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
782563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
78346d3ceabSEric Dumazet /**
78446d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
78546d3ceabSEric Dumazet  * @sk: socket
78646d3ceabSEric Dumazet  *
78746d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
78846d3ceabSEric Dumazet  * actions before socket release.
78946d3ceabSEric Dumazet  */
79046d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
79146d3ceabSEric Dumazet {
79246d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
7936f458dfbSEric Dumazet 	unsigned long flags, nflags;
79446d3ceabSEric Dumazet 
7956f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
7966f458dfbSEric Dumazet 	do {
7976f458dfbSEric Dumazet 		flags = tp->tsq_flags;
7986f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
7996f458dfbSEric Dumazet 			return;
8006f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8016f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
8026f458dfbSEric Dumazet 
8036f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
8046f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
8056f458dfbSEric Dumazet 
806c3f9b018SEric Dumazet 	/* Here begins the tricky part :
807c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
808c3f9b018SEric Dumazet 	 * 1) BH disabled
809c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
810c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
811c3f9b018SEric Dumazet 	 *
812c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
813c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
814c3f9b018SEric Dumazet 	 */
815c3f9b018SEric Dumazet 	sock_release_ownership(sk);
816c3f9b018SEric Dumazet 
817144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
8186f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
819144d56e9SEric Dumazet 		__sock_put(sk);
820144d56e9SEric Dumazet 	}
821144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
8226f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
823144d56e9SEric Dumazet 		__sock_put(sk);
824144d56e9SEric Dumazet 	}
825144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
8264fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
827144d56e9SEric Dumazet 		__sock_put(sk);
828144d56e9SEric Dumazet 	}
82946d3ceabSEric Dumazet }
83046d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
83146d3ceabSEric Dumazet 
83246d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
83346d3ceabSEric Dumazet {
83446d3ceabSEric Dumazet 	int i;
83546d3ceabSEric Dumazet 
83646d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
83746d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
83846d3ceabSEric Dumazet 
83946d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
84046d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
84146d3ceabSEric Dumazet 			     tcp_tasklet_func,
84246d3ceabSEric Dumazet 			     (unsigned long)tsq);
84346d3ceabSEric Dumazet 	}
84446d3ceabSEric Dumazet }
84546d3ceabSEric Dumazet 
84646d3ceabSEric Dumazet /*
84746d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8488e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
84946d3ceabSEric Dumazet  * hold qdisc lock.
85046d3ceabSEric Dumazet  */
851d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
85246d3ceabSEric Dumazet {
85346d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
85446d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
8559b462d02SEric Dumazet 	int wmem;
8569b462d02SEric Dumazet 
8579b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
8589b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
8599b462d02SEric Dumazet 	 */
8609b462d02SEric Dumazet 	wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
8619b462d02SEric Dumazet 
8629b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
8639b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
8649b462d02SEric Dumazet 	 * This gives :
8659b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
8669b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
8679b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
8689b462d02SEric Dumazet 	 */
8699b462d02SEric Dumazet 	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
8709b462d02SEric Dumazet 		goto out;
87146d3ceabSEric Dumazet 
87246d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
87346d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
87446d3ceabSEric Dumazet 		unsigned long flags;
87546d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
87646d3ceabSEric Dumazet 
87746d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
87846d3ceabSEric Dumazet 		local_irq_save(flags);
879903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
88046d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
88146d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
88246d3ceabSEric Dumazet 		local_irq_restore(flags);
8839b462d02SEric Dumazet 		return;
88446d3ceabSEric Dumazet 	}
8859b462d02SEric Dumazet out:
8869b462d02SEric Dumazet 	sk_free(sk);
88746d3ceabSEric Dumazet }
88846d3ceabSEric Dumazet 
8891da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
8901da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
8911da177e4SLinus Torvalds  * transmission and possible later retransmissions.
8921da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
8931da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
8941da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
8951da177e4SLinus Torvalds  * device.
8961da177e4SLinus Torvalds  *
8971da177e4SLinus Torvalds  * We are working here with either a clone of the original
8981da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
8991da177e4SLinus Torvalds  */
900056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
901056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
9021da177e4SLinus Torvalds {
9036687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
904dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
905dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
906dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
90733ad798cSAdam Langley 	struct tcp_out_options opts;
90895c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
909cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
9101da177e4SLinus Torvalds 	struct tcphdr *th;
9111da177e4SLinus Torvalds 	int err;
9121da177e4SLinus Torvalds 
913dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
914dfb4b9dcSDavid S. Miller 
915ccdbb6e9SEric Dumazet 	if (clone_it) {
916740b0f18SEric Dumazet 		skb_mstamp_get(&skb->skb_mstamp);
917dfb4b9dcSDavid S. Miller 
918dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
919dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
920dfb4b9dcSDavid S. Miller 		else
921dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
922dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
923dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
924dfb4b9dcSDavid S. Miller 	}
925dfb4b9dcSDavid S. Miller 
926dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
927dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
928dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
92933ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
9301da177e4SLinus Torvalds 
9314de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
93233ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
93333ad798cSAdam Langley 	else
93433ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
93533ad798cSAdam Langley 							   &md5);
93633ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
9371da177e4SLinus Torvalds 
938547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
939b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
940b2532eb9SEric Dumazet 	 * which holds one reference to sk_wmem_alloc.
941b2532eb9SEric Dumazet 	 *
942b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
943b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
944547669d4SEric Dumazet 	 */
945b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
9461da177e4SLinus Torvalds 
947aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
948aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
94946d3ceabSEric Dumazet 
95046d3ceabSEric Dumazet 	skb_orphan(skb);
95146d3ceabSEric Dumazet 	skb->sk = sk;
95298781965SEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
953b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
95446d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
957aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
958c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
959c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
9601da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
9611da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
962df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
9634de075e0SEric Dumazet 					tcb->tcp_flags);
964dfb4b9dcSDavid S. Miller 
9654de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
9661da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
9671da177e4SLinus Torvalds 		 * is never scaled.
9681da177e4SLinus Torvalds 		 */
969600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
9701da177e4SLinus Torvalds 	} else {
9711da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
9721da177e4SLinus Torvalds 	}
9731da177e4SLinus Torvalds 	th->check		= 0;
9741da177e4SLinus Torvalds 	th->urg_ptr		= 0;
9751da177e4SLinus Torvalds 
97633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
9777691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
9787691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
9791da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
9801da177e4SLinus Torvalds 			th->urg = 1;
9817691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
9820eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
9837691367dSHerbert Xu 			th->urg = 1;
9847691367dSHerbert Xu 		}
9851da177e4SLinus Torvalds 	}
9861da177e4SLinus Torvalds 
987bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
98851466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
9894de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
990735d3831SFlorian Westphal 		tcp_ecn_send(sk, skb, tcp_header_size);
991dfb4b9dcSDavid S. Miller 
992cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
993cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
994cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
995a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
996bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
99739f8e58eSEric Dumazet 					       md5, sk, skb);
998cfb6eeb4SYOSHIFUJI Hideaki 	}
999cfb6eeb4SYOSHIFUJI Hideaki #endif
1000cfb6eeb4SYOSHIFUJI Hideaki 
1001bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
10021da177e4SLinus Torvalds 
10034de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1004fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
10051da177e4SLinus Torvalds 
1006a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1007cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1008a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1009a44d6eacSMartin KaFai Lau 	}
10101da177e4SLinus Torvalds 
1011bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1012aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1013aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
10141da177e4SLinus Torvalds 
10152efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1016f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1017cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1018f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1019cd7d8498SEric Dumazet 
10207faee5c0SEric Dumazet 	/* Our usage of tstamp should remain private */
10217faee5c0SEric Dumazet 	skb->tstamp.tv64 = 0;
1022971f10ecSEric Dumazet 
1023971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1024971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1025971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1026971f10ecSEric Dumazet 
1027b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
10287faee5c0SEric Dumazet 
102983de47cdSHua Zhong 	if (likely(err <= 0))
10301da177e4SLinus Torvalds 		return err;
10311da177e4SLinus Torvalds 
10325ee2c941SChristoph Paasch 	tcp_enter_cwr(sk);
10331da177e4SLinus Torvalds 
1034b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
10351da177e4SLinus Torvalds }
10361da177e4SLinus Torvalds 
103767edfef7SAndi Kleen /* This routine just queues the buffer for sending.
10381da177e4SLinus Torvalds  *
10391da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
10401da177e4SLinus Torvalds  * otherwise socket can stall.
10411da177e4SLinus Torvalds  */
10421da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
10431da177e4SLinus Torvalds {
10441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
10471da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1048f4a775d1SEric Dumazet 	__skb_header_release(skb);
1049fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
10503ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
10513ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
10521da177e4SLinus Torvalds }
10531da177e4SLinus Torvalds 
105467edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
10555bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1056f6302d1dSDavid S. Miller {
10578f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1058f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1059f6302d1dSDavid S. Miller 		 * non-TSO case.
1060f6302d1dSDavid S. Miller 		 */
1061cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1062f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1063f6302d1dSDavid S. Miller 	} else {
1064cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1065f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
10661da177e4SLinus Torvalds 	}
10671da177e4SLinus Torvalds }
10681da177e4SLinus Torvalds 
106991fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
107068f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
107191fed7a1SIlpo Järvinen  */
1072cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
107391fed7a1SIlpo Järvinen 				   int decr)
107491fed7a1SIlpo Järvinen {
1075a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1076a47e5a98SIlpo Järvinen 
1077dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
107891fed7a1SIlpo Järvinen 		return;
107991fed7a1SIlpo Järvinen 
10806859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
108191fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
108291fed7a1SIlpo Järvinen }
108391fed7a1SIlpo Järvinen 
1084797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1085797108d1SIlpo Järvinen  * tweaks to fix counters
1086797108d1SIlpo Järvinen  */
1087cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1088797108d1SIlpo Järvinen {
1089797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1090797108d1SIlpo Järvinen 
1091797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1092797108d1SIlpo Järvinen 
1093797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1094797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1095797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1096797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1097797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1098797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1099797108d1SIlpo Järvinen 
1100797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1101797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1102797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1103797108d1SIlpo Järvinen 
1104797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1105797108d1SIlpo Järvinen 
1106797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1107797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
110852cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1109797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1110797108d1SIlpo Järvinen 
1111797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1112797108d1SIlpo Järvinen }
1113797108d1SIlpo Järvinen 
1114490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1115490cc7d0SWillem de Bruijn {
1116490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1117490cc7d0SWillem de Bruijn 
1118490cc7d0SWillem de Bruijn 	if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1119490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1120490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1121490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1122490cc7d0SWillem de Bruijn 
1123490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1124490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1125490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1126490cc7d0SWillem de Bruijn 	}
1127490cc7d0SWillem de Bruijn }
1128490cc7d0SWillem de Bruijn 
11291da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
11301da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
11311da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
11321da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
11331da177e4SLinus Torvalds  */
1134056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
11356cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
11361da177e4SLinus Torvalds {
11371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11381da177e4SLinus Torvalds 	struct sk_buff *buff;
11396475be16SDavid S. Miller 	int nsize, old_factor;
1140b60b49eaSHerbert Xu 	int nlen;
11419ce01461SIlpo Järvinen 	u8 flags;
11421da177e4SLinus Torvalds 
11432fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
11442fceec13SIlpo Järvinen 		return -EINVAL;
11456a438bbeSStephen Hemminger 
11461da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
11471da177e4SLinus Torvalds 	if (nsize < 0)
11481da177e4SLinus Torvalds 		nsize = 0;
11491da177e4SLinus Torvalds 
11506cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
11511da177e4SLinus Torvalds 		return -ENOMEM;
11521da177e4SLinus Torvalds 
11531da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1154eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
115551456b29SIan Morris 	if (!buff)
11561da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1157ef5cb973SHerbert Xu 
11583ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
11593ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1160b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1161b60b49eaSHerbert Xu 	buff->truesize += nlen;
1162b60b49eaSHerbert Xu 	skb->truesize -= nlen;
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
11651da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
11661da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
11671da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
11681da177e4SLinus Torvalds 
11691da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
11704de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
11714de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
11724de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1173e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
11741da177e4SLinus Torvalds 
117584fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
11761da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1177056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1178056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
11791da177e4SLinus Torvalds 						       nsize, 0);
11801da177e4SLinus Torvalds 
11811da177e4SLinus Torvalds 		skb_trim(skb, len);
11821da177e4SLinus Torvalds 
11831da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
11841da177e4SLinus Torvalds 	} else {
118584fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
11861da177e4SLinus Torvalds 		skb_split(skb, buff, len);
11871da177e4SLinus Torvalds 	}
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
11901da177e4SLinus Torvalds 
1191a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1192490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
11931da177e4SLinus Torvalds 
11946475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
11956475be16SDavid S. Miller 
11961da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
11975bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
11985bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
11991da177e4SLinus Torvalds 
12006475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
12016475be16SDavid S. Miller 	 * adjust the various packet counters.
12026475be16SDavid S. Miller 	 */
1203cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
12046475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
12056475be16SDavid S. Miller 			tcp_skb_pcount(buff);
12061da177e4SLinus Torvalds 
1207797108d1SIlpo Järvinen 		if (diff)
1208797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
12091da177e4SLinus Torvalds 	}
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1212f4a775d1SEric Dumazet 	__skb_header_release(buff);
1213fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
12141da177e4SLinus Torvalds 
12151da177e4SLinus Torvalds 	return 0;
12161da177e4SLinus Torvalds }
12171da177e4SLinus Torvalds 
12181da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
12191da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
12201da177e4SLinus Torvalds  * immediately discarded.
12211da177e4SLinus Torvalds  */
1222f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
12231da177e4SLinus Torvalds {
12247b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
12251da177e4SLinus Torvalds 	int i, k, eat;
12261da177e4SLinus Torvalds 
12274fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
12284fa48bf3SEric Dumazet 	if (eat) {
12294fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
12304fa48bf3SEric Dumazet 		len -= eat;
12314fa48bf3SEric Dumazet 		if (!len)
12324fa48bf3SEric Dumazet 			return;
12334fa48bf3SEric Dumazet 	}
12341da177e4SLinus Torvalds 	eat = len;
12351da177e4SLinus Torvalds 	k = 0;
12367b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
12377b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
12387b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
12399e903e08SEric Dumazet 
12409e903e08SEric Dumazet 		if (size <= eat) {
1241aff65da0SIan Campbell 			skb_frag_unref(skb, i);
12429e903e08SEric Dumazet 			eat -= size;
12431da177e4SLinus Torvalds 		} else {
12447b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
12451da177e4SLinus Torvalds 			if (eat) {
12467b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
12477b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
12481da177e4SLinus Torvalds 				eat = 0;
12491da177e4SLinus Torvalds 			}
12501da177e4SLinus Torvalds 			k++;
12511da177e4SLinus Torvalds 		}
12521da177e4SLinus Torvalds 	}
12537b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
12541da177e4SLinus Torvalds 
125527a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
12561da177e4SLinus Torvalds 	skb->data_len -= len;
12571da177e4SLinus Torvalds 	skb->len = skb->data_len;
12581da177e4SLinus Torvalds }
12591da177e4SLinus Torvalds 
126067edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
12611da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
12621da177e4SLinus Torvalds {
126314bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
12641da177e4SLinus Torvalds 		return -ENOMEM;
12651da177e4SLinus Torvalds 
12664fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
12671da177e4SLinus Torvalds 
12681da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
126984fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds 	skb->truesize	     -= len;
12721da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
12733ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
12741da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
12751da177e4SLinus Torvalds 
12765b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
12771da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
12785bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
12791da177e4SLinus Torvalds 
12801da177e4SLinus Torvalds 	return 0;
12811da177e4SLinus Torvalds }
12821da177e4SLinus Torvalds 
12831b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
12841b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
12855d424d5aSJohn Heffner {
1286cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1287cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12885d424d5aSJohn Heffner 	int mss_now;
12895d424d5aSJohn Heffner 
12905d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
12915d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
12925d424d5aSJohn Heffner 	 */
12935d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
12945d424d5aSJohn Heffner 
129567469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
129667469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
129767469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
129867469601SEric Dumazet 
129967469601SEric Dumazet 		if (dst && dst_allfrag(dst))
130067469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
130167469601SEric Dumazet 	}
130267469601SEric Dumazet 
13035d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
13045d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
13055d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
13065d424d5aSJohn Heffner 
13075d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
13085d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
13095d424d5aSJohn Heffner 
13105d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
13115d424d5aSJohn Heffner 	if (mss_now < 48)
13125d424d5aSJohn Heffner 		mss_now = 48;
13135d424d5aSJohn Heffner 	return mss_now;
13145d424d5aSJohn Heffner }
13155d424d5aSJohn Heffner 
13161b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
13171b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
13181b63edd6SYuchung Cheng {
13191b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
13201b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
13211b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
13221b63edd6SYuchung Cheng }
13231b63edd6SYuchung Cheng 
13245d424d5aSJohn Heffner /* Inverse of above */
132567469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
13265d424d5aSJohn Heffner {
1327cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1328cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
13295d424d5aSJohn Heffner 	int mtu;
13305d424d5aSJohn Heffner 
13315d424d5aSJohn Heffner 	mtu = mss +
13325d424d5aSJohn Heffner 	      tp->tcp_header_len +
13335d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
13345d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
13355d424d5aSJohn Heffner 
133667469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
133767469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
133867469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
133967469601SEric Dumazet 
134067469601SEric Dumazet 		if (dst && dst_allfrag(dst))
134167469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
134267469601SEric Dumazet 	}
13435d424d5aSJohn Heffner 	return mtu;
13445d424d5aSJohn Heffner }
13455d424d5aSJohn Heffner 
134667edfef7SAndi Kleen /* MTU probing init per socket */
13475d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
13485d424d5aSJohn Heffner {
13495d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
13505d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1351b0f9ca53SFan Du 	struct net *net = sock_net(sk);
13525d424d5aSJohn Heffner 
1353b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
13545d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
13555d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1356b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
13575d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
135805cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
135905cbc0dbSFan Du 		icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
13605d424d5aSJohn Heffner }
13614bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
13625d424d5aSJohn Heffner 
13631da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
13641da177e4SLinus Torvalds 
13651da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
13661da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
13671da177e4SLinus Torvalds 
13681da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1369caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
13701da177e4SLinus Torvalds    It also does not include TCP options.
13711da177e4SLinus Torvalds 
1372d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
13731da177e4SLinus Torvalds 
13741da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
13751da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
13761da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
13771da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
13781da177e4SLinus Torvalds 
13791da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
13801da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
13811da177e4SLinus Torvalds 
1382d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1383d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
13841da177e4SLinus Torvalds  */
13851da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
13861da177e4SLinus Torvalds {
13871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1388d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
13895d424d5aSJohn Heffner 	int mss_now;
13901da177e4SLinus Torvalds 
13915d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
13925d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
13931da177e4SLinus Torvalds 
13945d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1395409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
13961da177e4SLinus Torvalds 
13971da177e4SLinus Torvalds 	/* And store cached results */
1398d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
13995d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
14005d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1401c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
14021da177e4SLinus Torvalds 
14031da177e4SLinus Torvalds 	return mss_now;
14041da177e4SLinus Torvalds }
14054bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
14061da177e4SLinus Torvalds 
14071da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
14081da177e4SLinus Torvalds  * and even PMTU discovery events into account.
14091da177e4SLinus Torvalds  */
14100c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
14111da177e4SLinus Torvalds {
1412cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1413cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1414c1b4a7e6SDavid S. Miller 	u32 mss_now;
141595c96174SEric Dumazet 	unsigned int header_len;
141633ad798cSAdam Langley 	struct tcp_out_options opts;
141733ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
14181da177e4SLinus Torvalds 
1419c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1420c1b4a7e6SDavid S. Miller 
14211da177e4SLinus Torvalds 	if (dst) {
14221da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1423d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
14241da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
14251da177e4SLinus Torvalds 	}
14261da177e4SLinus Torvalds 
142733ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
142833ad798cSAdam Langley 		     sizeof(struct tcphdr);
142933ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
143033ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
143133ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
143233ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
143333ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
143433ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
143533ad798cSAdam Langley 		mss_now -= delta;
143633ad798cSAdam Langley 	}
1437cfb6eeb4SYOSHIFUJI Hideaki 
14381da177e4SLinus Torvalds 	return mss_now;
14391da177e4SLinus Torvalds }
14401da177e4SLinus Torvalds 
144186fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
144286fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
144386fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
144486fd14adSWeiping Pan  */
144586fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1446a762a980SDavid S. Miller {
14479e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1448a762a980SDavid S. Miller 
144986fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
145086fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
145186fd14adSWeiping Pan 		/* Limited by application or receiver window. */
145286fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
145386fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
145486fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
145586fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
145686fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
145786fd14adSWeiping Pan 		}
145886fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
145986fd14adSWeiping Pan 	}
146086fd14adSWeiping Pan 	tp->snd_cwnd_stamp = tcp_time_stamp;
146186fd14adSWeiping Pan }
146286fd14adSWeiping Pan 
1463ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1464a762a980SDavid S. Miller {
1465a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1466a762a980SDavid S. Miller 
1467ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1468ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1469ca8a2263SNeal Cardwell 	 */
1470ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1471ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1472ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1473ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1474ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1475ca8a2263SNeal Cardwell 	}
1476e114a710SEric Dumazet 
147724901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1478a762a980SDavid S. Miller 		/* Network is feed fully. */
1479a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1480a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1481a762a980SDavid S. Miller 	} else {
1482a762a980SDavid S. Miller 		/* Network starves. */
1483a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1484a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1485a762a980SDavid S. Miller 
148615d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
148715d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1488a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1489a762a980SDavid S. Miller 	}
1490a762a980SDavid S. Miller }
1491a762a980SDavid S. Miller 
1492d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1493d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1494d4589926SEric Dumazet {
1495d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1496d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1497d4589926SEric Dumazet }
1498d4589926SEric Dumazet 
1499d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1500d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1501d4589926SEric Dumazet  * The test is really :
1502d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1503d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1504d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1505d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
15060e3a4803SIlpo Järvinen  */
1507d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1508d4589926SEric Dumazet 				const struct sk_buff *skb)
1509d4589926SEric Dumazet {
1510d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1511d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1512d4589926SEric Dumazet }
1513d4589926SEric Dumazet 
1514d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1515d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1516d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1517d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1518d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1519d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1520d4589926SEric Dumazet  */
1521d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1522cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1523d4589926SEric Dumazet {
1524d4589926SEric Dumazet 	return partial &&
1525d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1526d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1527d4589926SEric Dumazet }
1528605ad7f1SEric Dumazet 
1529605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1530605ad7f1SEric Dumazet  * to send one TSO packet per ms
1531605ad7f1SEric Dumazet  */
1532605ad7f1SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
1533605ad7f1SEric Dumazet {
1534605ad7f1SEric Dumazet 	u32 bytes, segs;
1535605ad7f1SEric Dumazet 
1536605ad7f1SEric Dumazet 	bytes = min(sk->sk_pacing_rate >> 10,
1537605ad7f1SEric Dumazet 		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1538605ad7f1SEric Dumazet 
1539605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1540605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1541605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1542605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1543605ad7f1SEric Dumazet 	 */
1544605ad7f1SEric Dumazet 	segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs);
1545605ad7f1SEric Dumazet 
1546605ad7f1SEric Dumazet 	return min_t(u32, segs, sk->sk_gso_max_segs);
1547605ad7f1SEric Dumazet }
1548605ad7f1SEric Dumazet 
1549d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1550d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1551d4589926SEric Dumazet 					const struct sk_buff *skb,
1552d4589926SEric Dumazet 					unsigned int mss_now,
1553d4589926SEric Dumazet 					unsigned int max_segs,
1554d4589926SEric Dumazet 					int nonagle)
1555c1b4a7e6SDavid S. Miller {
1556cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1557d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1558c1b4a7e6SDavid S. Miller 
155990840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
15601485348dSBen Hutchings 	max_len = mss_now * max_segs;
15610e3a4803SIlpo Järvinen 
15621485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
15631485348dSBen Hutchings 		return max_len;
15640e3a4803SIlpo Järvinen 
15655ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
15665ea3a748SIlpo Järvinen 
15671485348dSBen Hutchings 	if (max_len <= needed)
15681485348dSBen Hutchings 		return max_len;
15690e3a4803SIlpo Järvinen 
1570d4589926SEric Dumazet 	partial = needed % mss_now;
1571d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1572d4589926SEric Dumazet 	 * to include this last segment in this skb.
1573d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1574d4589926SEric Dumazet 	 */
1575cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1576d4589926SEric Dumazet 		return needed - partial;
1577d4589926SEric Dumazet 
1578d4589926SEric Dumazet 	return needed;
1579c1b4a7e6SDavid S. Miller }
1580c1b4a7e6SDavid S. Miller 
1581c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1582c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1583c1b4a7e6SDavid S. Miller  */
1584cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1585cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1586c1b4a7e6SDavid S. Miller {
1587d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1588c1b4a7e6SDavid S. Miller 
1589c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
15904de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
15914de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1592c1b4a7e6SDavid S. Miller 		return 1;
1593c1b4a7e6SDavid S. Miller 
1594c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1595c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1596d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1597c1b4a7e6SDavid S. Miller 		return 0;
1598d649a7a8SEric Dumazet 
1599d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1600d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1601d649a7a8SEric Dumazet 	 */
1602d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1603d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1604c1b4a7e6SDavid S. Miller }
1605c1b4a7e6SDavid S. Miller 
1606b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
160767edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1608c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1609c1b4a7e6SDavid S. Miller  */
16105bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1611c1b4a7e6SDavid S. Miller {
1612c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1613c1b4a7e6SDavid S. Miller 
1614f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
16155bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1616c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1617c1b4a7e6SDavid S. Miller 	}
1618c1b4a7e6SDavid S. Miller 	return tso_segs;
1619c1b4a7e6SDavid S. Miller }
1620c1b4a7e6SDavid S. Miller 
1621c1b4a7e6SDavid S. Miller 
1622a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1623c1b4a7e6SDavid S. Miller  * sent now.
1624c1b4a7e6SDavid S. Miller  */
1625a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1626c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1627c1b4a7e6SDavid S. Miller {
1628c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1629c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1630c1b4a7e6SDavid S. Miller 	 *
1631c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1632c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1633c1b4a7e6SDavid S. Miller 	 */
1634c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1635a2a385d6SEric Dumazet 		return true;
1636c1b4a7e6SDavid S. Miller 
16379b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
16389b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1639a2a385d6SEric Dumazet 		return true;
1640c1b4a7e6SDavid S. Miller 
1641cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1642a2a385d6SEric Dumazet 		return true;
1643c1b4a7e6SDavid S. Miller 
1644a2a385d6SEric Dumazet 	return false;
1645c1b4a7e6SDavid S. Miller }
1646c1b4a7e6SDavid S. Miller 
1647c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1648a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1649a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1650056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1651c1b4a7e6SDavid S. Miller {
1652c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1653c1b4a7e6SDavid S. Miller 
1654c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1655c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1656c1b4a7e6SDavid S. Miller 
165790840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1658c1b4a7e6SDavid S. Miller }
1659c1b4a7e6SDavid S. Miller 
1660fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1661c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1662c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1663c1b4a7e6SDavid S. Miller  */
1664cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1665c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1666c1b4a7e6SDavid S. Miller {
1667cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1668c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1669c1b4a7e6SDavid S. Miller 
16705bbb432cSEric Dumazet 	tcp_init_tso_segs(skb, cur_mss);
1671c1b4a7e6SDavid S. Miller 
1672c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1673c1b4a7e6SDavid S. Miller 		return 0;
1674c1b4a7e6SDavid S. Miller 
1675c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1676056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1677c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1678c1b4a7e6SDavid S. Miller 
1679c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1680c1b4a7e6SDavid S. Miller }
1681c1b4a7e6SDavid S. Miller 
168267edfef7SAndi Kleen /* Test if sending is allowed right now. */
1683a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1684c1b4a7e6SDavid S. Miller {
1685cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1686fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1687c1b4a7e6SDavid S. Miller 
1688a02cec21SEric Dumazet 	return skb &&
16890c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1690c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1691a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1692c1b4a7e6SDavid S. Miller }
1693c1b4a7e6SDavid S. Miller 
1694c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1695c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1696c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1697c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1698c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1699c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1700c1b4a7e6SDavid S. Miller  */
1701056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1702c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1703c1b4a7e6SDavid S. Miller {
1704c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1705c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
17069ce01461SIlpo Järvinen 	u8 flags;
1707c1b4a7e6SDavid S. Miller 
1708c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1709c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
17106cc55e09SOctavian Purdila 		return tcp_fragment(sk, skb, len, mss_now, gfp);
1711c1b4a7e6SDavid S. Miller 
1712eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
171351456b29SIan Morris 	if (unlikely(!buff))
1714c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1715c1b4a7e6SDavid S. Miller 
17163ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
17173ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1718b60b49eaSHerbert Xu 	buff->truesize += nlen;
1719c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1720c1b4a7e6SDavid S. Miller 
1721c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1722c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1723c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1724c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1725c1b4a7e6SDavid S. Miller 
1726c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
17274de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
17284de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
17294de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1730c1b4a7e6SDavid S. Miller 
1731c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1732c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1733c1b4a7e6SDavid S. Miller 
173484fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1735c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1736490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1737c1b4a7e6SDavid S. Miller 
1738c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
17395bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
17405bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1741c1b4a7e6SDavid S. Miller 
1742c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1743f4a775d1SEric Dumazet 	__skb_header_release(buff);
1744fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1745c1b4a7e6SDavid S. Miller 
1746c1b4a7e6SDavid S. Miller 	return 0;
1747c1b4a7e6SDavid S. Miller }
1748c1b4a7e6SDavid S. Miller 
1749c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1750c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1751c1b4a7e6SDavid S. Miller  *
1752c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1753c1b4a7e6SDavid S. Miller  */
1754ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1755605ad7f1SEric Dumazet 				 bool *is_cwnd_limited, u32 max_segs)
1756c1b4a7e6SDavid S. Miller {
17576687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
175850c8339eSEric Dumazet 	u32 age, send_win, cong_win, limit, in_flight;
175950c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
176050c8339eSEric Dumazet 	struct skb_mstamp now;
176150c8339eSEric Dumazet 	struct sk_buff *head;
1762ad9f4f50SEric Dumazet 	int win_divisor;
1763c1b4a7e6SDavid S. Miller 
17644de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1765ae8064acSJohn Heffner 		goto send_now;
1766c1b4a7e6SDavid S. Miller 
176799d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1768ae8064acSJohn Heffner 		goto send_now;
1769ae8064acSJohn Heffner 
17705f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
17715f852eb5SEric Dumazet 	 * only if the last write was recent.
17725f852eb5SEric Dumazet 	 */
17735f852eb5SEric Dumazet 	if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
1774ae8064acSJohn Heffner 		goto send_now;
1775908a75c1SDavid S. Miller 
1776c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1777c1b4a7e6SDavid S. Miller 
1778056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1779c1b4a7e6SDavid S. Miller 
178090840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1781c1b4a7e6SDavid S. Miller 
1782c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1783c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1784c1b4a7e6SDavid S. Miller 
1785c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1786c1b4a7e6SDavid S. Miller 
1787ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1788605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1789ae8064acSJohn Heffner 		goto send_now;
1790ba244fe9SDavid S. Miller 
179162ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
179262ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
179362ad2761SIlpo Järvinen 		goto send_now;
179462ad2761SIlpo Järvinen 
1795ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1796ad9f4f50SEric Dumazet 	if (win_divisor) {
1797c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1798c1b4a7e6SDavid S. Miller 
1799c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1800c1b4a7e6SDavid S. Miller 		 * just use it.
1801c1b4a7e6SDavid S. Miller 		 */
1802ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1803c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1804ae8064acSJohn Heffner 			goto send_now;
1805c1b4a7e6SDavid S. Miller 	} else {
1806c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1807c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1808c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1809c1b4a7e6SDavid S. Miller 		 * then send now.
1810c1b4a7e6SDavid S. Miller 		 */
18116b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1812ae8064acSJohn Heffner 			goto send_now;
1813c1b4a7e6SDavid S. Miller 	}
1814c1b4a7e6SDavid S. Miller 
181550c8339eSEric Dumazet 	head = tcp_write_queue_head(sk);
181650c8339eSEric Dumazet 	skb_mstamp_get(&now);
181750c8339eSEric Dumazet 	age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
181850c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
181950c8339eSEric Dumazet 	if (age < (tp->srtt_us >> 4))
182050c8339eSEric Dumazet 		goto send_now;
182150c8339eSEric Dumazet 
18225f852eb5SEric Dumazet 	/* Ok, it looks like it is advisable to defer. */
1823ae8064acSJohn Heffner 
1824d2e1339fSBendik Rønning Opstad 	if (cong_win < send_win && cong_win <= skb->len)
1825ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1826ca8a2263SNeal Cardwell 
1827a2a385d6SEric Dumazet 	return true;
1828ae8064acSJohn Heffner 
1829ae8064acSJohn Heffner send_now:
1830a2a385d6SEric Dumazet 	return false;
1831c1b4a7e6SDavid S. Miller }
1832c1b4a7e6SDavid S. Miller 
183305cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
183405cbc0dbSFan Du {
183505cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
183605cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
183705cbc0dbSFan Du 	struct net *net = sock_net(sk);
183805cbc0dbSFan Du 	u32 interval;
183905cbc0dbSFan Du 	s32 delta;
184005cbc0dbSFan Du 
184105cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
184205cbc0dbSFan Du 	delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
184305cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
184405cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
184505cbc0dbSFan Du 
184605cbc0dbSFan Du 		/* Update current search range */
184705cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
184805cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
184905cbc0dbSFan Du 			sizeof(struct tcphdr) +
185005cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
185105cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
185205cbc0dbSFan Du 
185305cbc0dbSFan Du 		/* Update probe time stamp */
185405cbc0dbSFan Du 		icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
185505cbc0dbSFan Du 	}
185605cbc0dbSFan Du }
185705cbc0dbSFan Du 
18585d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
185967edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
186067edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
186167edfef7SAndi Kleen  * changes resulting in larger path MTUs.
186267edfef7SAndi Kleen  *
18635d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
18645d424d5aSJohn Heffner  *         1 if a probe was sent,
1865056834d9SIlpo Järvinen  *         -1 otherwise
1866056834d9SIlpo Järvinen  */
18675d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
18685d424d5aSJohn Heffner {
18695d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
18705d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
18715d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
18726b58e0a5SFan Du 	struct net *net = sock_net(sk);
18735d424d5aSJohn Heffner 	int len;
18745d424d5aSJohn Heffner 	int probe_size;
187591cc17c0SIlpo Järvinen 	int size_needed;
18765d424d5aSJohn Heffner 	int copy;
18775d424d5aSJohn Heffner 	int mss_now;
18786b58e0a5SFan Du 	int interval;
18795d424d5aSJohn Heffner 
18805d424d5aSJohn Heffner 	/* Not currently probing/verifying,
18815d424d5aSJohn Heffner 	 * not in recovery,
18825d424d5aSJohn Heffner 	 * have enough cwnd, and
18835d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
18845d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
18855d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
18865d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
18875d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1888cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
18895d424d5aSJohn Heffner 		return -1;
18905d424d5aSJohn Heffner 
18916b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
18926b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
18936b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
18946b58e0a5SFan Du 	 */
18950c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
18966b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
18976b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
189891cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
18996b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
190005cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
190105cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
190205cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
190305cbc0dbSFan Du 	 */
19046b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
190505cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
190605cbc0dbSFan Du 		/* Check whether enough time has elaplased for
190705cbc0dbSFan Du 		 * another round of probing.
190805cbc0dbSFan Du 		 */
190905cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
19105d424d5aSJohn Heffner 		return -1;
19115d424d5aSJohn Heffner 	}
19125d424d5aSJohn Heffner 
19135d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
19147f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
19155d424d5aSJohn Heffner 		return -1;
19165d424d5aSJohn Heffner 
191791cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
19185d424d5aSJohn Heffner 		return -1;
191990840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
19205d424d5aSJohn Heffner 		return 0;
19215d424d5aSJohn Heffner 
1922d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1923d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1924d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
19255d424d5aSJohn Heffner 			return -1;
19265d424d5aSJohn Heffner 		else
19275d424d5aSJohn Heffner 			return 0;
19285d424d5aSJohn Heffner 	}
19295d424d5aSJohn Heffner 
19305d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
1931eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
193251456b29SIan Morris 	if (!nskb)
19335d424d5aSJohn Heffner 		return -1;
19343ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
19353ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
19365d424d5aSJohn Heffner 
1937fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
19385d424d5aSJohn Heffner 
19395d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
19405d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
19414de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
19425d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
19435d424d5aSJohn Heffner 	nskb->csum = 0;
194484fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
19455d424d5aSJohn Heffner 
194650c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
194750c4817eSIlpo Järvinen 
19485d424d5aSJohn Heffner 	len = 0;
1949234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
19505d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
19515d424d5aSJohn Heffner 		if (nskb->ip_summed)
19525d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
19535d424d5aSJohn Heffner 		else
19545d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1955056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1956056834d9SIlpo Järvinen 							    copy, nskb->csum);
19575d424d5aSJohn Heffner 
19585d424d5aSJohn Heffner 		if (skb->len <= copy) {
19595d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
19605d424d5aSJohn Heffner 			 * Throw it away. */
19614de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1962fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
19633ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
19645d424d5aSJohn Heffner 		} else {
19654de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1966a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
19675d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
19685d424d5aSJohn Heffner 				skb_pull(skb, copy);
196984fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1970056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1971056834d9SIlpo Järvinen 								 skb->len, 0);
19725d424d5aSJohn Heffner 			} else {
19735d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
19745bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
19755d424d5aSJohn Heffner 			}
19765d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
19775d424d5aSJohn Heffner 		}
19785d424d5aSJohn Heffner 
19795d424d5aSJohn Heffner 		len += copy;
1980234b6860SIlpo Järvinen 
1981234b6860SIlpo Järvinen 		if (len >= probe_size)
1982234b6860SIlpo Järvinen 			break;
19835d424d5aSJohn Heffner 	}
19845bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
19855d424d5aSJohn Heffner 
19865d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
19877faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
19887faee5c0SEric Dumazet 	 */
19895d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
19905d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
19915d424d5aSJohn Heffner 		 * effectively two packets. */
19925d424d5aSJohn Heffner 		tp->snd_cwnd--;
199366f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
19945d424d5aSJohn Heffner 
19955d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
19960e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
19970e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
19985d424d5aSJohn Heffner 
19995d424d5aSJohn Heffner 		return 1;
20005d424d5aSJohn Heffner 	}
20015d424d5aSJohn Heffner 
20025d424d5aSJohn Heffner 	return -1;
20035d424d5aSJohn Heffner }
20045d424d5aSJohn Heffner 
20051da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
20061da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
20071da177e4SLinus Torvalds  * window for us.
20081da177e4SLinus Torvalds  *
2009f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2010f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2011f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2012f8269a49SIlpo Järvinen  *
20136ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
20146ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
20156ba8a3b1SNandita Dukkipati 
2016a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2017a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
20181da177e4SLinus Torvalds  */
2019a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2020d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
20211da177e4SLinus Torvalds {
20221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
202392df7b51SDavid S. Miller 	struct sk_buff *skb;
2024c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2025c1b4a7e6SDavid S. Miller 	int cwnd_quota;
20265d424d5aSJohn Heffner 	int result;
2027ca8a2263SNeal Cardwell 	bool is_cwnd_limited = false;
2028605ad7f1SEric Dumazet 	u32 max_segs;
20291da177e4SLinus Torvalds 
2030c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
20315d424d5aSJohn Heffner 
2032d5dd9175SIlpo Järvinen 	if (!push_one) {
20335d424d5aSJohn Heffner 		/* Do MTU probing. */
2034d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2035d5dd9175SIlpo Järvinen 		if (!result) {
2036a2a385d6SEric Dumazet 			return false;
20375d424d5aSJohn Heffner 		} else if (result > 0) {
20385d424d5aSJohn Heffner 			sent_pkts = 1;
20395d424d5aSJohn Heffner 		}
2040d5dd9175SIlpo Järvinen 	}
20415d424d5aSJohn Heffner 
2042605ad7f1SEric Dumazet 	max_segs = tcp_tso_autosize(sk, mss_now);
2043fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2044c8ac3774SHerbert Xu 		unsigned int limit;
2045c8ac3774SHerbert Xu 
20465bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2047c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2048c1b4a7e6SDavid S. Miller 
20499d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
20507faee5c0SEric Dumazet 			/* "skb_mstamp" is used as a start point for the retransmit timer */
20517faee5c0SEric Dumazet 			skb_mstamp_get(&skb->skb_mstamp);
2052ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
20539d186cacSAndrey Vagin 		}
2054ec342325SAndrew Vagin 
2055b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
20566ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
20576ba8a3b1SNandita Dukkipati 			if (push_one == 2)
20586ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
20596ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
20606ba8a3b1SNandita Dukkipati 			else
2061b68e9f85SHerbert Xu 				break;
20626ba8a3b1SNandita Dukkipati 		}
2063b68e9f85SHerbert Xu 
2064b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
2065b68e9f85SHerbert Xu 			break;
2066b68e9f85SHerbert Xu 
2067d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2068aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2069aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2070aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2071aa93466bSDavid S. Miller 				break;
2072c1b4a7e6SDavid S. Miller 		} else {
2073ca8a2263SNeal Cardwell 			if (!push_one &&
2074605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2075605ad7f1SEric Dumazet 						 max_segs))
2076aa93466bSDavid S. Miller 				break;
2077c1b4a7e6SDavid S. Miller 		}
2078aa93466bSDavid S. Miller 
2079605ad7f1SEric Dumazet 		limit = mss_now;
2080d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2081605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2082605ad7f1SEric Dumazet 						    min_t(unsigned int,
2083605ad7f1SEric Dumazet 							  cwnd_quota,
2084605ad7f1SEric Dumazet 							  max_segs),
2085605ad7f1SEric Dumazet 						    nonagle);
2086605ad7f1SEric Dumazet 
2087605ad7f1SEric Dumazet 		if (skb->len > limit &&
2088605ad7f1SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2089605ad7f1SEric Dumazet 			break;
2090605ad7f1SEric Dumazet 
2091c9eeec26SEric Dumazet 		/* TCP Small Queues :
2092c9eeec26SEric Dumazet 		 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2093c9eeec26SEric Dumazet 		 * This allows for :
2094c9eeec26SEric Dumazet 		 *  - better RTT estimation and ACK scheduling
2095c9eeec26SEric Dumazet 		 *  - faster recovery
2096c9eeec26SEric Dumazet 		 *  - high rates
209798e09386SEric Dumazet 		 * Alas, some drivers / subsystems require a fair amount
209898e09386SEric Dumazet 		 * of queued bytes to ensure line rate.
209998e09386SEric Dumazet 		 * One example is wifi aggregation (802.11 AMPDU)
210046d3ceabSEric Dumazet 		 */
2101605ad7f1SEric Dumazet 		limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
2102605ad7f1SEric Dumazet 		limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
2103c9eeec26SEric Dumazet 
2104c9eeec26SEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
210546d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
2106bf06200eSJohn Ogness 			/* It is possible TX completion already happened
2107bf06200eSJohn Ogness 			 * before we set TSQ_THROTTLED, so we must
2108bf06200eSJohn Ogness 			 * test again the condition.
2109bf06200eSJohn Ogness 			 */
21104e857c58SPeter Zijlstra 			smp_mb__after_atomic();
2111bf06200eSJohn Ogness 			if (atomic_read(&sk->sk_wmem_alloc) > limit)
211246d3ceabSEric Dumazet 				break;
211346d3ceabSEric Dumazet 		}
2114c9eeec26SEric Dumazet 
2115d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
21161da177e4SLinus Torvalds 			break;
21171da177e4SLinus Torvalds 
2118ec342325SAndrew Vagin repair:
21191da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
21201da177e4SLinus Torvalds 		 * This call will increment packets_out.
21211da177e4SLinus Torvalds 		 */
212266f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
21231da177e4SLinus Torvalds 
21241da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2125a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2126d5dd9175SIlpo Järvinen 
2127d5dd9175SIlpo Järvinen 		if (push_one)
2128d5dd9175SIlpo Järvinen 			break;
21291da177e4SLinus Torvalds 	}
21301da177e4SLinus Torvalds 
2131aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2132684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2133684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
21346ba8a3b1SNandita Dukkipati 
21356ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
21366ba8a3b1SNandita Dukkipati 		if (push_one != 2)
21376ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
2138d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2139ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2140a2a385d6SEric Dumazet 		return false;
21411da177e4SLinus Torvalds 	}
2142b340b264SYuchung Cheng 	return !tp->packets_out && tcp_send_head(sk);
21436ba8a3b1SNandita Dukkipati }
21446ba8a3b1SNandita Dukkipati 
21456ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
21466ba8a3b1SNandita Dukkipati {
21476ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
21486ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
21496ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
2150740b0f18SEric Dumazet 	u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
21516ba8a3b1SNandita Dukkipati 
21526ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
21536ba8a3b1SNandita Dukkipati 		return false;
21546ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
21556ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
21566ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
21576ba8a3b1SNandita Dukkipati 		return false;
21586ba8a3b1SNandita Dukkipati 	}
21596ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
21606ba8a3b1SNandita Dukkipati 	 * finishes.
21616ba8a3b1SNandita Dukkipati 	 */
2162f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
21636ba8a3b1SNandita Dukkipati 		return false;
21646ba8a3b1SNandita Dukkipati 
21656ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
21666ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
21676ba8a3b1SNandita Dukkipati 		return false;
21686ba8a3b1SNandita Dukkipati 
21696ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
21706ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
21716ba8a3b1SNandita Dukkipati 	 */
2172f9b99582SYuchung Cheng 	if (sysctl_tcp_early_retrans < 3 || !tp->packets_out ||
21736ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
21746ba8a3b1SNandita Dukkipati 		return false;
21756ba8a3b1SNandita Dukkipati 
21766ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
21776ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
21786ba8a3b1SNandita Dukkipati 		return false;
21796ba8a3b1SNandita Dukkipati 
21806ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
2181f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2182f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
21836ba8a3b1SNandita Dukkipati 	 */
2184f9b99582SYuchung Cheng 	timeout = rtt << 1 ? : TCP_TIMEOUT_INIT;
21856ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
21866ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
21876ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
21886ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
21896ba8a3b1SNandita Dukkipati 
21906ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
21916ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
21926ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
21936ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
21946ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
21956ba8a3b1SNandita Dukkipati 		if (delta > 0)
21966ba8a3b1SNandita Dukkipati 			timeout = delta;
21976ba8a3b1SNandita Dukkipati 	}
21986ba8a3b1SNandita Dukkipati 
21996ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
22006ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
22016ba8a3b1SNandita Dukkipati 	return true;
22026ba8a3b1SNandita Dukkipati }
22036ba8a3b1SNandita Dukkipati 
22041f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
22051f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
22061f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
22071f3279aeSEric Dumazet  * Note: This is called from BH context only.
22081f3279aeSEric Dumazet  */
22091f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
22101f3279aeSEric Dumazet 				    const struct sk_buff *skb)
22111f3279aeSEric Dumazet {
221239bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
22131f3279aeSEric Dumazet 		NET_INC_STATS_BH(sock_net(sk),
22141f3279aeSEric Dumazet 				 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
22151f3279aeSEric Dumazet 		return true;
22161f3279aeSEric Dumazet 	}
22171f3279aeSEric Dumazet 	return false;
22181f3279aeSEric Dumazet }
22191f3279aeSEric Dumazet 
2220b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
22216ba8a3b1SNandita Dukkipati  * retransmit the last segment.
22226ba8a3b1SNandita Dukkipati  */
22236ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
22246ba8a3b1SNandita Dukkipati {
22259b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
22266ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
22276ba8a3b1SNandita Dukkipati 	int pcount;
22286ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
22296ba8a3b1SNandita Dukkipati 
2230b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
2231b340b264SYuchung Cheng 	if (skb) {
2232b340b264SYuchung Cheng 		if (tcp_snd_wnd_test(tp, skb, mss)) {
2233b340b264SYuchung Cheng 			pcount = tp->packets_out;
2234b340b264SYuchung Cheng 			tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2235b340b264SYuchung Cheng 			if (tp->packets_out > pcount)
2236b340b264SYuchung Cheng 				goto probe_sent;
22376ba8a3b1SNandita Dukkipati 			goto rearm_timer;
22386ba8a3b1SNandita Dukkipati 		}
2239b340b264SYuchung Cheng 		skb = tcp_write_queue_prev(sk, skb);
2240b340b264SYuchung Cheng 	} else {
2241b340b264SYuchung Cheng 		skb = tcp_write_queue_tail(sk);
2242b340b264SYuchung Cheng 	}
22436ba8a3b1SNandita Dukkipati 
22449b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
22459b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
22469b717a8dSNandita Dukkipati 		goto rearm_timer;
22479b717a8dSNandita Dukkipati 
22486ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
22496ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
22506ba8a3b1SNandita Dukkipati 		goto rearm_timer;
22516ba8a3b1SNandita Dukkipati 
22521f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
22531f3279aeSEric Dumazet 		goto rearm_timer;
22541f3279aeSEric Dumazet 
22556ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
22566ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
22576ba8a3b1SNandita Dukkipati 		goto rearm_timer;
22586ba8a3b1SNandita Dukkipati 
22596ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
22606cc55e09SOctavian Purdila 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
22616cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
22626ba8a3b1SNandita Dukkipati 			goto rearm_timer;
2263b340b264SYuchung Cheng 		skb = tcp_write_queue_next(sk, skb);
22646ba8a3b1SNandita Dukkipati 	}
22656ba8a3b1SNandita Dukkipati 
22666ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
22676ba8a3b1SNandita Dukkipati 		goto rearm_timer;
22686ba8a3b1SNandita Dukkipati 
2269b340b264SYuchung Cheng 	if (__tcp_retransmit_skb(sk, skb))
2270b340b264SYuchung Cheng 		goto rearm_timer;
22716ba8a3b1SNandita Dukkipati 
22729b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
22739b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
22749b717a8dSNandita Dukkipati 
2275b340b264SYuchung Cheng probe_sent:
2276fcd16c0aSYuchung Cheng 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2277fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2278fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2279b340b264SYuchung Cheng rearm_timer:
2280fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
22811da177e4SLinus Torvalds }
22821da177e4SLinus Torvalds 
2283a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2284a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2285a762a980SDavid S. Miller  * The socket must be locked by the caller.
2286a762a980SDavid S. Miller  */
22879e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
22889e412ba7SIlpo Järvinen 			       int nonagle)
2289a762a980SDavid S. Miller {
2290726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2291726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2292726e07a8SIlpo Järvinen 	 * all will be happy.
2293726e07a8SIlpo Järvinen 	 */
2294726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2295726e07a8SIlpo Järvinen 		return;
2296726e07a8SIlpo Järvinen 
229799a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
22987450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
22999e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2300a762a980SDavid S. Miller }
2301a762a980SDavid S. Miller 
2302c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2303c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2304c1b4a7e6SDavid S. Miller  */
2305c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2306c1b4a7e6SDavid S. Miller {
2307fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2308c1b4a7e6SDavid S. Miller 
2309c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2310c1b4a7e6SDavid S. Miller 
2311d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2312c1b4a7e6SDavid S. Miller }
2313c1b4a7e6SDavid S. Miller 
23141da177e4SLinus Torvalds /* This function returns the amount that we can raise the
23151da177e4SLinus Torvalds  * usable window based on the following constraints
23161da177e4SLinus Torvalds  *
23171da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
23181da177e4SLinus Torvalds  * 2. We limit memory per socket
23191da177e4SLinus Torvalds  *
23201da177e4SLinus Torvalds  * RFC 1122:
23211da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
23221da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
23231da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
23241da177e4SLinus Torvalds  *
23251da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
23261da177e4SLinus Torvalds  * it at least MSS bytes.
23271da177e4SLinus Torvalds  *
23281da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
23291da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
23301da177e4SLinus Torvalds  *
23311da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
23321da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
23331da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
23341da177e4SLinus Torvalds  * window to always advance by a single byte.
23351da177e4SLinus Torvalds  *
23361da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
23371da177e4SLinus Torvalds  * then this will not be a problem.
23381da177e4SLinus Torvalds  *
23391da177e4SLinus Torvalds  * BSD seems to make the following compromise:
23401da177e4SLinus Torvalds  *
23411da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
23421da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
23431da177e4SLinus Torvalds  *	then set the window to 0.
23441da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
23451da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
23461da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
23471da177e4SLinus Torvalds  *
23481da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
23491da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
23501da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
23511da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
23521da177e4SLinus Torvalds  * because the pipeline is full.
23531da177e4SLinus Torvalds  *
23541da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
23551da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
23561da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
23571da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
23581da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
23591da177e4SLinus Torvalds  *
23601da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
23611da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
23621da177e4SLinus Torvalds  *
23631da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
23641da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
23651da177e4SLinus Torvalds  */
23661da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
23671da177e4SLinus Torvalds {
2368463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
23691da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2370caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
23711da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
23721da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
23731da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
23741da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
23751da177e4SLinus Torvalds 	 */
2376463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
23771da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
237886c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
237986c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
23801da177e4SLinus Torvalds 	int window;
23811da177e4SLinus Torvalds 
23821da177e4SLinus Torvalds 	if (mss > full_space)
23831da177e4SLinus Torvalds 		mss = full_space;
23841da177e4SLinus Torvalds 
2385b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2386463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
23871da177e4SLinus Torvalds 
2388b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2389056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2390056834d9SIlpo Järvinen 					       4U * tp->advmss);
23911da177e4SLinus Torvalds 
239286c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
239386c1a045SFlorian Westphal 		 * increase it due to wscale.
239486c1a045SFlorian Westphal 		 */
239586c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
239686c1a045SFlorian Westphal 
239786c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
239886c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
239986c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
240086c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
240186c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
240286c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
240386c1a045SFlorian Westphal 		 */
240486c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
24051da177e4SLinus Torvalds 			return 0;
24061da177e4SLinus Torvalds 	}
24071da177e4SLinus Torvalds 
24081da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
24091da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
24101da177e4SLinus Torvalds 
24111da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
24121da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
24131da177e4SLinus Torvalds 	 */
24141da177e4SLinus Torvalds 	window = tp->rcv_wnd;
24151da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
24161da177e4SLinus Torvalds 		window = free_space;
24171da177e4SLinus Torvalds 
24181da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
24191da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
24201da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
24211da177e4SLinus Torvalds 		 */
24221da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
24231da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
24241da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
24251da177e4SLinus Torvalds 	} else {
24261da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
24271da177e4SLinus Torvalds 		 * Window clamp already applied above.
24281da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
24291da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
24301da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
24311da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
24321da177e4SLinus Torvalds 		 * is too small.
24331da177e4SLinus Torvalds 		 */
24341da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
24351da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
243684565070SJohn Heffner 		else if (mss == full_space &&
2437b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
243884565070SJohn Heffner 			window = free_space;
24391da177e4SLinus Torvalds 	}
24401da177e4SLinus Torvalds 
24411da177e4SLinus Torvalds 	return window;
24421da177e4SLinus Torvalds }
24431da177e4SLinus Torvalds 
24444a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
24454a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
24461da177e4SLinus Torvalds {
24471da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2448fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2449058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
24501da177e4SLinus Torvalds 
2451058dc334SIlpo Järvinen 	skb_size = skb->len;
2452058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
24531da177e4SLinus Torvalds 
2454058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
24551da177e4SLinus Torvalds 
24566859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2457a6963a6bSIlpo Järvinen 
2458fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
24591da177e4SLinus Torvalds 
2460058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
24611a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
24621da177e4SLinus Torvalds 
246352d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
246452d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
24651da177e4SLinus Torvalds 
246684fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
24671da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
24681da177e4SLinus Torvalds 
24691da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
24701da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
24711da177e4SLinus Torvalds 
2472e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
24734de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
24741da177e4SLinus Torvalds 
24751da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
24761da177e4SLinus Torvalds 	 * packet counting does not break.
24771da177e4SLinus Torvalds 	 */
24784828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2479b7689205SIlpo Järvinen 
2480b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2481ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2482ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2483ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2484b7689205SIlpo Järvinen 
2485797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2486797108d1SIlpo Järvinen 
24873ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
24881da177e4SLinus Torvalds }
24891da177e4SLinus Torvalds 
249067edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2491a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
24924a17fc3aSIlpo Järvinen {
24934a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2494a2a385d6SEric Dumazet 		return false;
24954a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
24964a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2497a2a385d6SEric Dumazet 		return false;
24984a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2499a2a385d6SEric Dumazet 		return false;
25004a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2501a2a385d6SEric Dumazet 		return false;
25024a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
25034a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2504a2a385d6SEric Dumazet 		return false;
25054a17fc3aSIlpo Järvinen 
2506a2a385d6SEric Dumazet 	return true;
25074a17fc3aSIlpo Järvinen }
25084a17fc3aSIlpo Järvinen 
250967edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
251067edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
251167edfef7SAndi Kleen  */
25124a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
25134a17fc3aSIlpo Järvinen 				     int space)
25144a17fc3aSIlpo Järvinen {
25154a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
25164a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2517a2a385d6SEric Dumazet 	bool first = true;
25184a17fc3aSIlpo Järvinen 
25194a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
25204a17fc3aSIlpo Järvinen 		return;
25214de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
25224a17fc3aSIlpo Järvinen 		return;
25234a17fc3aSIlpo Järvinen 
25244a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
25254a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
25264a17fc3aSIlpo Järvinen 			break;
25274a17fc3aSIlpo Järvinen 
25284a17fc3aSIlpo Järvinen 		space -= skb->len;
25294a17fc3aSIlpo Järvinen 
25304a17fc3aSIlpo Järvinen 		if (first) {
2531a2a385d6SEric Dumazet 			first = false;
25324a17fc3aSIlpo Järvinen 			continue;
25334a17fc3aSIlpo Järvinen 		}
25344a17fc3aSIlpo Järvinen 
25354a17fc3aSIlpo Järvinen 		if (space < 0)
25364a17fc3aSIlpo Järvinen 			break;
25374a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
25384a17fc3aSIlpo Järvinen 		 * the data in the second
25394a17fc3aSIlpo Järvinen 		 */
2540a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
25414a17fc3aSIlpo Järvinen 			break;
25424a17fc3aSIlpo Järvinen 
25434a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
25444a17fc3aSIlpo Järvinen 			break;
25454a17fc3aSIlpo Järvinen 
25464a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
25474a17fc3aSIlpo Järvinen 	}
25484a17fc3aSIlpo Järvinen }
25494a17fc3aSIlpo Järvinen 
25501da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
25511da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
25521da177e4SLinus Torvalds  * error occurred which prevented the send.
25531da177e4SLinus Torvalds  */
255493b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
25551da177e4SLinus Torvalds {
25561da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25575d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
25587d227cd2SSridhar Samudrala 	unsigned int cur_mss;
2559c84a5711SYuchung Cheng 	int err;
25601da177e4SLinus Torvalds 
25615d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
25625d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
25635d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
25645d424d5aSJohn Heffner 	}
25655d424d5aSJohn Heffner 
25661da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2567caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
25681da177e4SLinus Torvalds 	 */
25691da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
25701da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
25711da177e4SLinus Torvalds 		return -EAGAIN;
25721da177e4SLinus Torvalds 
25731f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25741f3279aeSEric Dumazet 		return -EBUSY;
25751f3279aeSEric Dumazet 
25761da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
25771da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
25781da177e4SLinus Torvalds 			BUG();
25791da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
25801da177e4SLinus Torvalds 			return -ENOMEM;
25811da177e4SLinus Torvalds 	}
25821da177e4SLinus Torvalds 
25837d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
25847d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
25857d227cd2SSridhar Samudrala 
25860c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
25877d227cd2SSridhar Samudrala 
25881da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
25891da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
25901da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
25911da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
25921da177e4SLinus Torvalds 	 */
25939d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
25949d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
25951da177e4SLinus Torvalds 		return -EAGAIN;
25961da177e4SLinus Torvalds 
25971da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
25986cc55e09SOctavian Purdila 		if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
25991da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
260002276f3cSIlpo Järvinen 	} else {
26019eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
26029eb9362eSIlpo Järvinen 
26039eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
2604c52e2421SEric Dumazet 			if (skb_unclone(skb, GFP_ATOMIC))
2605c52e2421SEric Dumazet 				return -ENOMEM;
26065bbb432cSEric Dumazet 			tcp_init_tso_segs(skb, cur_mss);
26079eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
26089eb9362eSIlpo Järvinen 		}
26091da177e4SLinus Torvalds 	}
26101da177e4SLinus Torvalds 
261149213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
261249213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
261349213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
261449213555SDaniel Borkmann 
26151da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
26161da177e4SLinus Torvalds 
26171da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
26181da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
26191da177e4SLinus Torvalds 	 */
26201da177e4SLinus Torvalds 
262150bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
262250bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
262350bceae9SThomas Graf 	 * beyond what csum_start can cover.
262450bceae9SThomas Graf 	 */
262550bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
262650bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
2627117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2628117632e6SEric Dumazet 						   GFP_ATOMIC);
2629c84a5711SYuchung Cheng 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2630117632e6SEric Dumazet 			     -ENOBUFS;
2631117632e6SEric Dumazet 	} else {
2632c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2633117632e6SEric Dumazet 	}
2634c84a5711SYuchung Cheng 
2635fc9f3501SEric Dumazet 	if (likely(!err)) {
2636c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2637fc9f3501SEric Dumazet 		/* Update global TCP statistics. */
2638fc9f3501SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2639fc9f3501SEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2640fc9f3501SEric Dumazet 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2641fc9f3501SEric Dumazet 		tp->total_retrans++;
2642fc9f3501SEric Dumazet 	}
2643c84a5711SYuchung Cheng 	return err;
264493b174adSYuchung Cheng }
264593b174adSYuchung Cheng 
264693b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
264793b174adSYuchung Cheng {
264893b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
264993b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
26501da177e4SLinus Torvalds 
26511da177e4SLinus Torvalds 	if (err == 0) {
26521da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
26531da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2654e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
26551da177e4SLinus Torvalds 		}
26561da177e4SLinus Torvalds #endif
26571da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
26581da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
26591da177e4SLinus Torvalds 
26601da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
26611da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
26627faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
26631da177e4SLinus Torvalds 
26641f3279aeSEric Dumazet 	} else if (err != -EBUSY) {
266524ab6becSYuchung Cheng 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
26661da177e4SLinus Torvalds 	}
26676e08d5e3SYuchung Cheng 
26686e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
26696e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
26706e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
26711da177e4SLinus Torvalds 	return err;
26721da177e4SLinus Torvalds }
26731da177e4SLinus Torvalds 
267467edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
267567edfef7SAndi Kleen  * window/congestion state.
267667edfef7SAndi Kleen  */
2677a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2678b5afe7bcSIlpo Järvinen {
2679b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2680cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2681b5afe7bcSIlpo Järvinen 
2682b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2683b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2684a2a385d6SEric Dumazet 		return false;
2685b5afe7bcSIlpo Järvinen 
2686b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2687b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2688a2a385d6SEric Dumazet 		return false;
2689b5afe7bcSIlpo Järvinen 
2690b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2691b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2692b5afe7bcSIlpo Järvinen 	 *
2693b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2694b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2695b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2696b5afe7bcSIlpo Järvinen 	 */
2697b5afe7bcSIlpo Järvinen 
2698b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2699a2a385d6SEric Dumazet 		return false;
2700b5afe7bcSIlpo Järvinen 
2701a2a385d6SEric Dumazet 	return true;
2702b5afe7bcSIlpo Järvinen }
2703b5afe7bcSIlpo Järvinen 
27041da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
27051da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
27061da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
27071da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
27081da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
27091da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
27101da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
27111da177e4SLinus Torvalds  */
27121da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
27131da177e4SLinus Torvalds {
27146687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
27151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27161da177e4SLinus Torvalds 	struct sk_buff *skb;
27170e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2718618d9f25SIlpo Järvinen 	u32 last_lost;
271961eb55f4SIlpo Järvinen 	int mib_idx;
27200e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
27216a438bbeSStephen Hemminger 
272245e77d31SIlpo Järvinen 	if (!tp->packets_out)
272345e77d31SIlpo Järvinen 		return;
272445e77d31SIlpo Järvinen 
272508ebd172SIlpo Järvinen 	if (!tp->lost_out)
272608ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
272708ebd172SIlpo Järvinen 
2728618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
27296a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2730618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2731618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2732618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2733618d9f25SIlpo Järvinen 	} else {
2734fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2735618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2736618d9f25SIlpo Järvinen 	}
27371da177e4SLinus Torvalds 
2738fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
27391da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
27401da177e4SLinus Torvalds 
2741fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2742fe067e8aSDavid S. Miller 			break;
27436a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
274451456b29SIan Morris 		if (!hole)
27456a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
27466a438bbeSStephen Hemminger 
27471da177e4SLinus Torvalds 		/* Assume this retransmit will generate
27481da177e4SLinus Torvalds 		 * only one packet for congestion window
27491da177e4SLinus Torvalds 		 * calculation purposes.  This works because
27501da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
27511da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
27521da177e4SLinus Torvalds 		 * packet counting works out.
27531da177e4SLinus Torvalds 		 */
27541da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
27551da177e4SLinus Torvalds 			return;
27560e1c54c2SIlpo Järvinen 
27570e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
27580e1c54c2SIlpo Järvinen begin_fwd:
27590e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2760006f582cSIlpo Järvinen 				break;
27610e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
27620e1c54c2SIlpo Järvinen 
27630e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2764618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
27650e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
27660e1c54c2SIlpo Järvinen 				break;
27670e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
276800db4124SIan Morris 			if (hole) {
27690e1c54c2SIlpo Järvinen 				skb = hole;
27700e1c54c2SIlpo Järvinen 				hole = NULL;
27710e1c54c2SIlpo Järvinen 			}
27720e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
27730e1c54c2SIlpo Järvinen 			goto begin_fwd;
27740e1c54c2SIlpo Järvinen 
27750e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
277651456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
27770e1c54c2SIlpo Järvinen 				hole = skb;
277861eb55f4SIlpo Järvinen 			continue;
27791da177e4SLinus Torvalds 
27800e1c54c2SIlpo Järvinen 		} else {
2781618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
27820e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
27830e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
27840e1c54c2SIlpo Järvinen 			else
27850e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
27860e1c54c2SIlpo Järvinen 		}
27870e1c54c2SIlpo Järvinen 
27880e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
278961eb55f4SIlpo Järvinen 			continue;
279040b215e5SPavel Emelyanov 
279124ab6becSYuchung Cheng 		if (tcp_retransmit_skb(sk, skb))
27921da177e4SLinus Torvalds 			return;
279324ab6becSYuchung Cheng 
2794de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
27951da177e4SLinus Torvalds 
2796684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2797a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2798a262f0cdSNandita Dukkipati 
2799fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2800463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
28013f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
28023f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
28031da177e4SLinus Torvalds 	}
28041da177e4SLinus Torvalds }
28051da177e4SLinus Torvalds 
2806d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
2807d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
2808845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
2809845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
2810a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
2811a6c5ea4cSEric Dumazet  * with edge trigger epoll()
2812d83769a5SEric Dumazet  */
2813a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
2814d83769a5SEric Dumazet {
2815e805605cSJohannes Weiner 	int amt;
2816d83769a5SEric Dumazet 
2817d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
2818d83769a5SEric Dumazet 		return;
2819d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
2820d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2821e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
2822e805605cSJohannes Weiner 
2823baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2824baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
2825d83769a5SEric Dumazet }
2826d83769a5SEric Dumazet 
2827845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
2828845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
28291da177e4SLinus Torvalds  */
28301da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
28311da177e4SLinus Torvalds {
2832845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
28331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
28341da177e4SLinus Torvalds 
2835845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
2836845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
2837845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
2838845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
28391da177e4SLinus Torvalds 	 */
2840b8da51ebSEric Dumazet 	if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
2841845704a5SEric Dumazet coalesce:
2842845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
2843845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
28441da177e4SLinus Torvalds 		tp->write_seq++;
2845845704a5SEric Dumazet 		if (!tcp_send_head(sk)) {
2846845704a5SEric Dumazet 			/* This means tskb was already sent.
2847845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
2848845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
2849845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
2850845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
2851845704a5SEric Dumazet 			 */
2852845704a5SEric Dumazet 			tp->snd_nxt++;
2853845704a5SEric Dumazet 			return;
2854845704a5SEric Dumazet 		}
28551da177e4SLinus Torvalds 	} else {
2856845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
2857845704a5SEric Dumazet 		if (unlikely(!skb)) {
2858845704a5SEric Dumazet 			if (tskb)
2859845704a5SEric Dumazet 				goto coalesce;
2860845704a5SEric Dumazet 			return;
28611da177e4SLinus Torvalds 		}
2862d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
2863a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
28641da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2865e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2866a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
28671da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
28681da177e4SLinus Torvalds 	}
2869845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
28701da177e4SLinus Torvalds }
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
28731da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
28741da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
287565bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
28761da177e4SLinus Torvalds  */
2877dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
28781da177e4SLinus Torvalds {
28791da177e4SLinus Torvalds 	struct sk_buff *skb;
28801da177e4SLinus Torvalds 
28811da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
28821da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
28831da177e4SLinus Torvalds 	if (!skb) {
28844e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
28851da177e4SLinus Torvalds 		return;
28861da177e4SLinus Torvalds 	}
28871da177e4SLinus Torvalds 
28881da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
28891da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2890e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2891a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
2892675ee231SEric Dumazet 	skb_mstamp_get(&skb->skb_mstamp);
28931da177e4SLinus Torvalds 	/* Send it off. */
2894dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
28954e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
289626af65cbSSridhar Samudrala 
289781cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
28981da177e4SLinus Torvalds }
28991da177e4SLinus Torvalds 
290067edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
290167edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
29021da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
29031da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
29041da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
29051da177e4SLinus Torvalds  */
29061da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
29071da177e4SLinus Torvalds {
29081da177e4SLinus Torvalds 	struct sk_buff *skb;
29091da177e4SLinus Torvalds 
2910fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
291151456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
291291df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
29131da177e4SLinus Torvalds 		return -EFAULT;
29141da177e4SLinus Torvalds 	}
29154de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
29161da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
29171da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
291851456b29SIan Morris 			if (!nskb)
29191da177e4SLinus Torvalds 				return -ENOMEM;
2920fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
2921f4a775d1SEric Dumazet 			__skb_header_release(nskb);
2922fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
29233ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
29243ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
29253ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
29261da177e4SLinus Torvalds 			skb = nskb;
29271da177e4SLinus Torvalds 		}
29281da177e4SLinus Torvalds 
29294de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2930735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
29311da177e4SLinus Torvalds 	}
2932dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
29331da177e4SLinus Torvalds }
29341da177e4SLinus Torvalds 
29354aea39c1SEric Dumazet /**
29364aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
29374aea39c1SEric Dumazet  * sk: listener socket
29384aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
29394aea39c1SEric Dumazet  * req: request_sock pointer
29404aea39c1SEric Dumazet  *
29414aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
29424aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
29434aea39c1SEric Dumazet  */
29445d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
2945e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
2946ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
2947*b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
29481da177e4SLinus Torvalds {
29492e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
29505d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
295180f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
29525d062de7SEric Dumazet 	struct tcp_out_options opts;
29535d062de7SEric Dumazet 	struct sk_buff *skb;
2954bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
29555d062de7SEric Dumazet 	struct tcphdr *th;
29565d062de7SEric Dumazet 	u16 user_mss;
2957f5fff5dcSTom Quetchenbach 	int mss;
29581da177e4SLinus Torvalds 
2959ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
29604aea39c1SEric Dumazet 	if (unlikely(!skb)) {
29614aea39c1SEric Dumazet 		dst_release(dst);
29621da177e4SLinus Torvalds 		return NULL;
29634aea39c1SEric Dumazet 	}
29641da177e4SLinus Torvalds 	/* Reserve space for headers. */
29651da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
29661da177e4SLinus Torvalds 
2967*b3d05147SEric Dumazet 	switch (synack_type) {
2968*b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
29699e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
2970*b3d05147SEric Dumazet 		break;
2971*b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
2972*b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
2973*b3d05147SEric Dumazet 		 * to avoid false sharing.
2974*b3d05147SEric Dumazet 		 */
2975*b3d05147SEric Dumazet 		break;
2976*b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
2977ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
2978ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
2979ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
2980ca6fb065SEric Dumazet 		 */
2981ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
2982*b3d05147SEric Dumazet 		break;
2983ca6fb065SEric Dumazet 	}
29844aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
29851da177e4SLinus Torvalds 
29860dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
29875d062de7SEric Dumazet 	user_mss = READ_ONCE(tp->rx_opt.user_mss);
29885d062de7SEric Dumazet 	if (user_mss && user_mss < mss)
29895d062de7SEric Dumazet 		mss = user_mss;
2990f5fff5dcSTom Quetchenbach 
299133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
29928b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
29938b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
29947faee5c0SEric Dumazet 		skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
29958b5f12d0SFlorian Westphal 	else
29968b5f12d0SFlorian Westphal #endif
29977faee5c0SEric Dumazet 	skb_mstamp_get(&skb->skb_mstamp);
299880f03e27SEric Dumazet 
299980f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
300080f03e27SEric Dumazet 	rcu_read_lock();
3001fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
300280f03e27SEric Dumazet #endif
300358d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
300437bfbddaSEric Dumazet 	tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
300537bfbddaSEric Dumazet 			  sizeof(*th);
300633ad798cSAdam Langley 
3007aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3008aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
30091da177e4SLinus Torvalds 
3010aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
30111da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
30121da177e4SLinus Torvalds 	th->syn = 1;
30131da177e4SLinus Torvalds 	th->ack = 1;
30146ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3015b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3016634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3017e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
3018e870a8efSIlpo Järvinen 	 * not even correctly set)
3019e870a8efSIlpo Järvinen 	 */
3020e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
3021a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
30224957faadSWilliam Allen Simpson 
30231da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
30248336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
30258336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
30261da177e4SLinus Torvalds 
30271da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3028ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
30295d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
30301da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
3031a0b8486cSEric Dumazet 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
3032cfb6eeb4SYOSHIFUJI Hideaki 
3033cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3034cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
303580f03e27SEric Dumazet 	if (md5)
3036bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
303739f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
303880f03e27SEric Dumazet 	rcu_read_unlock();
3039cfb6eeb4SYOSHIFUJI Hideaki #endif
3040cfb6eeb4SYOSHIFUJI Hideaki 
3041b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
3042b50edd78SEric Dumazet 	skb->tstamp.tv64 = 0;
30431da177e4SLinus Torvalds 	return skb;
30441da177e4SLinus Torvalds }
30454bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
30461da177e4SLinus Torvalds 
304781164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
304881164413SDaniel Borkmann {
304981164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
305081164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
305181164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
305281164413SDaniel Borkmann 
305381164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
305481164413SDaniel Borkmann 		return;
305581164413SDaniel Borkmann 
305681164413SDaniel Borkmann 	rcu_read_lock();
305781164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
305881164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
305981164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
306081164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
306181164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
306281164413SDaniel Borkmann 	}
306381164413SDaniel Borkmann 	rcu_read_unlock();
306481164413SDaniel Borkmann }
306581164413SDaniel Borkmann 
306667edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3067f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
30681da177e4SLinus Torvalds {
3069cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
30701da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30711da177e4SLinus Torvalds 	__u8 rcv_wscale;
30721da177e4SLinus Torvalds 
30731da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
30741da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
30751da177e4SLinus Torvalds 	 */
30761da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
3077bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
30781da177e4SLinus Torvalds 
3079cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
308000db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3081cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3082cfb6eeb4SYOSHIFUJI Hideaki #endif
3083cfb6eeb4SYOSHIFUJI Hideaki 
30841da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
30851da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
30861da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
30871da177e4SLinus Torvalds 	tp->max_window = 0;
30885d424d5aSJohn Heffner 	tcp_mtup_init(sk);
30891da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
30901da177e4SLinus Torvalds 
309181164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
309281164413SDaniel Borkmann 
30931da177e4SLinus Torvalds 	if (!tp->window_clamp)
30941da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
30950dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
3096f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
3097f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
3098f5fff5dcSTom Quetchenbach 
30991da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
31001da177e4SLinus Torvalds 
3101e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3102e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3103e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3104e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3105e88c64f0SHagen Paul Pfeifer 
31061da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
31071da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
31081da177e4SLinus Torvalds 				  &tp->rcv_wnd,
31091da177e4SLinus Torvalds 				  &tp->window_clamp,
3110bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
311131d12926Slaurent chavey 				  &rcv_wscale,
311231d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
31131da177e4SLinus Torvalds 
31141da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
31151da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
31161da177e4SLinus Torvalds 
31171da177e4SLinus Torvalds 	sk->sk_err = 0;
31181da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
31191da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3120ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
31211da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
31221da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
312333f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3124370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3125ee995283SPavel Emelyanov 
3126ee995283SPavel Emelyanov 	if (likely(!tp->repair))
31271da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3128c7781a6eSAndrew Vagin 	else
3129c7781a6eSAndrew Vagin 		tp->rcv_tstamp = tcp_time_stamp;
3130ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3131ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
31321da177e4SLinus Torvalds 
3133463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
3134463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
31351da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
31361da177e4SLinus Torvalds }
31371da177e4SLinus Torvalds 
3138783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3139783237e8SYuchung Cheng {
3140783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3141783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3142783237e8SYuchung Cheng 
3143783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3144f4a775d1SEric Dumazet 	__skb_header_release(skb);
3145783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
3146783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3147783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3148783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3149783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3150783237e8SYuchung Cheng }
3151783237e8SYuchung Cheng 
3152783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3153783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3154783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3155783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3156783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3157783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3158783237e8SYuchung Cheng  */
3159783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3160783237e8SYuchung Cheng {
3161783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3162783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
316307e100f9SEric Dumazet 	int syn_loss = 0, space, err = 0;
3164aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
3165355a901eSEric Dumazet 	struct sk_buff *syn_data;
3166783237e8SYuchung Cheng 
316767da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3168aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
3169aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
3170aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
3171aab48743SYuchung Cheng 	if (syn_loss > 1 &&
3172aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
3173aab48743SYuchung Cheng 		fo->cookie.len = -1;
3174aab48743SYuchung Cheng 		goto fallback;
3175aab48743SYuchung Cheng 	}
3176aab48743SYuchung Cheng 
317767da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
317867da22d2SYuchung Cheng 		fo->cookie.len = -1;
317967da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
3180783237e8SYuchung Cheng 		goto fallback;
3181783237e8SYuchung Cheng 
3182783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3183783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3184783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3185783237e8SYuchung Cheng 	 */
3186783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
3187783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
31881b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3189783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3190783237e8SYuchung Cheng 
3191f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3192f5ddcbbbSEric Dumazet 
3193f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3194f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3195f5ddcbbbSEric Dumazet 
3196eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3197355a901eSEric Dumazet 	if (!syn_data)
3198783237e8SYuchung Cheng 		goto fallback;
3199355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3200355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
320107e100f9SEric Dumazet 	if (space) {
320207e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
320357be5bdaSAl Viro 					    &fo->data->msg_iter);
320457be5bdaSAl Viro 		if (unlikely(!copied)) {
3205355a901eSEric Dumazet 			kfree_skb(syn_data);
3206783237e8SYuchung Cheng 			goto fallback;
3207783237e8SYuchung Cheng 		}
320857be5bdaSAl Viro 		if (copied != space) {
320957be5bdaSAl Viro 			skb_trim(syn_data, copied);
321057be5bdaSAl Viro 			space = copied;
321157be5bdaSAl Viro 		}
321207e100f9SEric Dumazet 	}
3213355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3214355a901eSEric Dumazet 	if (space == fo->size)
3215355a901eSEric Dumazet 		fo->data = NULL;
3216355a901eSEric Dumazet 	fo->copied = space;
3217783237e8SYuchung Cheng 
3218355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
3219355a901eSEric Dumazet 
3220355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3221355a901eSEric Dumazet 
3222355a901eSEric Dumazet 	syn->skb_mstamp = syn_data->skb_mstamp;
3223355a901eSEric Dumazet 
3224355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3225355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3226355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3227355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3228431a9124SEric Dumazet 	 */
3229355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3230355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3231355a901eSEric Dumazet 	if (!err) {
323267da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
3233f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3234783237e8SYuchung Cheng 		goto done;
3235783237e8SYuchung Cheng 	}
3236783237e8SYuchung Cheng 
3237783237e8SYuchung Cheng fallback:
3238783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3239783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3240783237e8SYuchung Cheng 		fo->cookie.len = 0;
3241783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3242783237e8SYuchung Cheng 	if (err)
3243783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3244783237e8SYuchung Cheng done:
3245783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3246783237e8SYuchung Cheng 	return err;
3247783237e8SYuchung Cheng }
3248783237e8SYuchung Cheng 
324967edfef7SAndi Kleen /* Build a SYN and send it off. */
32501da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
32511da177e4SLinus Torvalds {
32521da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32531da177e4SLinus Torvalds 	struct sk_buff *buff;
3254ee586811SEric Paris 	int err;
32551da177e4SLinus Torvalds 
32561da177e4SLinus Torvalds 	tcp_connect_init(sk);
32571da177e4SLinus Torvalds 
32582b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
32592b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
32602b916477SAndrey Vagin 		return 0;
32612b916477SAndrey Vagin 	}
32622b916477SAndrey Vagin 
3263eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3264355a901eSEric Dumazet 	if (unlikely(!buff))
32651da177e4SLinus Torvalds 		return -ENOBUFS;
32661da177e4SLinus Torvalds 
3267a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
32687faee5c0SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp;
3269783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3270735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
32711da177e4SLinus Torvalds 
3272783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3273783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3274783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3275ee586811SEric Paris 	if (err == -ECONNREFUSED)
3276ee586811SEric Paris 		return err;
3277bd37a088SWei Yongjun 
3278bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3279bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3280bd37a088SWei Yongjun 	 */
3281bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3282bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
328381cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
32841da177e4SLinus Torvalds 
32851da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
32863f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
32873f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
32881da177e4SLinus Torvalds 	return 0;
32891da177e4SLinus Torvalds }
32904bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
32911da177e4SLinus Torvalds 
32921da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
32931da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
32941da177e4SLinus Torvalds  * for details.
32951da177e4SLinus Torvalds  */
32961da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
32971da177e4SLinus Torvalds {
3298463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3299463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
33001da177e4SLinus Torvalds 	unsigned long timeout;
33011da177e4SLinus Torvalds 
33029890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
33039890092eSFlorian Westphal 
33041da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3305463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
33061da177e4SLinus Torvalds 		int max_ato = HZ / 2;
33071da177e4SLinus Torvalds 
3308056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3309056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
33101da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
33111da177e4SLinus Torvalds 
33121da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
33131da177e4SLinus Torvalds 
33141da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3315463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
33161da177e4SLinus Torvalds 		 * directly.
33171da177e4SLinus Torvalds 		 */
3318740b0f18SEric Dumazet 		if (tp->srtt_us) {
3319740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3320740b0f18SEric Dumazet 					TCP_DELACK_MIN);
33211da177e4SLinus Torvalds 
33221da177e4SLinus Torvalds 			if (rtt < max_ato)
33231da177e4SLinus Torvalds 				max_ato = rtt;
33241da177e4SLinus Torvalds 		}
33251da177e4SLinus Torvalds 
33261da177e4SLinus Torvalds 		ato = min(ato, max_ato);
33271da177e4SLinus Torvalds 	}
33281da177e4SLinus Torvalds 
33291da177e4SLinus Torvalds 	/* Stay within the limit we were given */
33301da177e4SLinus Torvalds 	timeout = jiffies + ato;
33311da177e4SLinus Torvalds 
33321da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3333463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
33341da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
33351da177e4SLinus Torvalds 		 * send ACK now.
33361da177e4SLinus Torvalds 		 */
3337463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3338463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
33391da177e4SLinus Torvalds 			tcp_send_ack(sk);
33401da177e4SLinus Torvalds 			return;
33411da177e4SLinus Torvalds 		}
33421da177e4SLinus Torvalds 
3343463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3344463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
33451da177e4SLinus Torvalds 	}
3346463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3347463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3348463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
33491da177e4SLinus Torvalds }
33501da177e4SLinus Torvalds 
33511da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
33521da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
33531da177e4SLinus Torvalds {
33541da177e4SLinus Torvalds 	struct sk_buff *buff;
33551da177e4SLinus Torvalds 
3356058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3357058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3358058dc334SIlpo Järvinen 		return;
3359058dc334SIlpo Järvinen 
33609890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
33619890092eSFlorian Westphal 
33621da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
33631da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
33641da177e4SLinus Torvalds 	 * sock.
33651da177e4SLinus Torvalds 	 */
33667450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
33677450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
33687450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3369463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3370463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
33713f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
33723f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
33731da177e4SLinus Torvalds 		return;
33741da177e4SLinus Torvalds 	}
33751da177e4SLinus Torvalds 
33761da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
33771da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3378a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
33791da177e4SLinus Torvalds 
338098781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
338198781965SEric Dumazet 	 * too much.
338298781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
338398781965SEric Dumazet 	 * We also avoid tcp_wfree() overhead (cache line miss accessing
338498781965SEric Dumazet 	 * tp->tsq_flags) by using regular sock_wfree()
338598781965SEric Dumazet 	 */
338698781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
338798781965SEric Dumazet 
33881da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
33897faee5c0SEric Dumazet 	skb_mstamp_get(&buff->skb_mstamp);
33907450aaf6SEric Dumazet 	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
33911da177e4SLinus Torvalds }
3392e3118e83SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_send_ack);
33931da177e4SLinus Torvalds 
33941da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
33951da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
33961da177e4SLinus Torvalds  *
33971da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
33981da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
33991da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
34001da177e4SLinus Torvalds  *
34011da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
34021da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
34031da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
34041da177e4SLinus Torvalds  */
3405e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
34061da177e4SLinus Torvalds {
34071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34081da177e4SLinus Torvalds 	struct sk_buff *skb;
34091da177e4SLinus Torvalds 
34101da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
34117450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
34127450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
341351456b29SIan Morris 	if (!skb)
34141da177e4SLinus Torvalds 		return -1;
34151da177e4SLinus Torvalds 
34161da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
34171da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
34181da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
34191da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
34201da177e4SLinus Torvalds 	 * send it.
34211da177e4SLinus Torvalds 	 */
3422a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
34237faee5c0SEric Dumazet 	skb_mstamp_get(&skb->skb_mstamp);
3424e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
34257450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
34261da177e4SLinus Torvalds }
34271da177e4SLinus Torvalds 
3428ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3429ee995283SPavel Emelyanov {
3430ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3431ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3432e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3433ee995283SPavel Emelyanov 	}
3434ee995283SPavel Emelyanov }
3435ee995283SPavel Emelyanov 
343667edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3437e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
34381da177e4SLinus Torvalds {
34391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34401da177e4SLinus Torvalds 	struct sk_buff *skb;
34411da177e4SLinus Torvalds 
3442058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3443058dc334SIlpo Järvinen 		return -1;
3444058dc334SIlpo Järvinen 
344500db4124SIan Morris 	skb = tcp_send_head(sk);
344600db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
34471da177e4SLinus Torvalds 		int err;
34480c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
344990840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
34501da177e4SLinus Torvalds 
34511da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
34521da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
34531da177e4SLinus Torvalds 
34541da177e4SLinus Torvalds 		/* We are probing the opening of a window
34551da177e4SLinus Torvalds 		 * but the window size is != 0
34561da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
34571da177e4SLinus Torvalds 		 */
34581da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
34591da177e4SLinus Torvalds 		    skb->len > mss) {
34601da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
34614de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
34626cc55e09SOctavian Purdila 			if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
34631da177e4SLinus Torvalds 				return -1;
34641da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
34655bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
34661da177e4SLinus Torvalds 
34674de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3468dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
346966f5fe62SIlpo Järvinen 		if (!err)
347066f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
34711da177e4SLinus Torvalds 		return err;
34721da177e4SLinus Torvalds 	} else {
347333f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3474e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3475e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
34761da177e4SLinus Torvalds 	}
34771da177e4SLinus Torvalds }
34781da177e4SLinus Torvalds 
34791da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
34801da177e4SLinus Torvalds  * a partial packet else a zero probe.
34811da177e4SLinus Torvalds  */
34821da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
34831da177e4SLinus Torvalds {
3484463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
34851da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3486c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3487fcdd1cf4SEric Dumazet 	unsigned long probe_max;
34881da177e4SLinus Torvalds 	int err;
34891da177e4SLinus Torvalds 
3490e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
34911da177e4SLinus Torvalds 
3492fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
34931da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
34946687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3495463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
34961da177e4SLinus Torvalds 		return;
34971da177e4SLinus Torvalds 	}
34981da177e4SLinus Torvalds 
34991da177e4SLinus Torvalds 	if (err <= 0) {
3500c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3501463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
35026687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3503fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
35041da177e4SLinus Torvalds 	} else {
35051da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
35066687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
35071da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
35081da177e4SLinus Torvalds 		 *
35091da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
35101da177e4SLinus Torvalds 		 */
35116687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
35126687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3513fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
35141da177e4SLinus Torvalds 	}
3515fcdd1cf4SEric Dumazet 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
351621c8fe99SEric Dumazet 				  tcp_probe0_when(sk, probe_max),
3517fcdd1cf4SEric Dumazet 				  TCP_RTO_MAX);
35181da177e4SLinus Torvalds }
35195db92c99SOctavian Purdila 
3520ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
35215db92c99SOctavian Purdila {
35225db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
35235db92c99SOctavian Purdila 	struct flowi fl;
35245db92c99SOctavian Purdila 	int res;
35255db92c99SOctavian Purdila 
352658d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3527*b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
35285db92c99SOctavian Purdila 	if (!res) {
35295db92c99SOctavian Purdila 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
35305db92c99SOctavian Purdila 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
35315db92c99SOctavian Purdila 	}
35325db92c99SOctavian Purdila 	return res;
35335db92c99SOctavian Purdila }
35345db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3535