xref: /linux/net/ipv4/tcp_output.c (revision 9799ccb0e984a5c1311b22a212e7ff96e8b736de)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
4460e2a778SUrsula Braun #include <linux/static_key.h>
451da177e4SLinus Torvalds 
46e086101bSCong Wang #include <trace/events/tcp.h>
4735089bb2SDavid S. Miller 
48*9799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
49*9799ccb0SEric Dumazet  * ensuring monotically increasing values.
50*9799ccb0SEric Dumazet  */
51*9799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
52*9799ccb0SEric Dumazet {
53*9799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
54*9799ccb0SEric Dumazet 
55*9799ccb0SEric Dumazet 	/* departure time for next data packet */
56*9799ccb0SEric Dumazet 	if (val > tp->tcp_wstamp_ns)
57*9799ccb0SEric Dumazet 		tp->tcp_wstamp_ns = val;
58*9799ccb0SEric Dumazet 
59*9799ccb0SEric Dumazet 	val = div_u64(val, NSEC_PER_USEC);
60*9799ccb0SEric Dumazet 	if (val > tp->tcp_mstamp)
61*9799ccb0SEric Dumazet 		tp->tcp_mstamp = val;
62*9799ccb0SEric Dumazet }
63*9799ccb0SEric Dumazet 
6446d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6546d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
66519855c5SWilliam Allen Simpson 
6767edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6875c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
696ff03ac3SIlpo Järvinen {
706ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
716ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7266f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
739e412ba7SIlpo Järvinen 
741da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
758512430eSIlpo Järvinen 
7675c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7775c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7875c119afSEric Dumazet 
7966f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
80bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
81750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
82f19c29e3SYuchung Cheng 
83f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
84f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
856a5dc9e5SEric Dumazet }
861da177e4SLinus Torvalds 
87a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
88a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
891da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
901da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
911da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
921da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
931da177e4SLinus Torvalds  */
94cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
951da177e4SLinus Torvalds {
96cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
979e412ba7SIlpo Järvinen 
98a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
99a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
100a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1011da177e4SLinus Torvalds 		return tp->snd_nxt;
1021da177e4SLinus Torvalds 	else
10390840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1041da177e4SLinus Torvalds }
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1071da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1081da177e4SLinus Torvalds  *
1091da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1101da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1111da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1121da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1131da177e4SLinus Torvalds  *    large MSS.
1141da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1151da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1161da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1171da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1181da177e4SLinus Torvalds  *    probably even Jumbo".
1191da177e4SLinus Torvalds  */
1201da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1211da177e4SLinus Torvalds {
1221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
123cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1241da177e4SLinus Torvalds 	int mss = tp->advmss;
1251da177e4SLinus Torvalds 
1260dbaee3bSDavid S. Miller 	if (dst) {
1270dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1280dbaee3bSDavid S. Miller 
1290dbaee3bSDavid S. Miller 		if (metric < mss) {
1300dbaee3bSDavid S. Miller 			mss = metric;
1311da177e4SLinus Torvalds 			tp->advmss = mss;
1321da177e4SLinus Torvalds 		}
1330dbaee3bSDavid S. Miller 	}
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds 	return (__u16)mss;
1361da177e4SLinus Torvalds }
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1396f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1406f021c62SEric Dumazet  */
1416f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1421da177e4SLinus Torvalds {
143463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1446f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1451da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1461da177e4SLinus Torvalds 
1476687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1481da177e4SLinus Torvalds 
1496687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1501da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1511da177e4SLinus Torvalds 
152463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1531da177e4SLinus Torvalds 		cwnd >>= 1;
1541da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
155c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1561da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1571da177e4SLinus Torvalds }
1581da177e4SLinus Torvalds 
15967edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16040efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
161cf533ea5SEric Dumazet 				struct sock *sk)
1621da177e4SLinus Torvalds {
163463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
164d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1651da177e4SLinus Torvalds 
16605c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16705c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16805c5a46dSNeal Cardwell 
1691da177e4SLinus Torvalds 	tp->lsndtime = now;
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1721da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1731da177e4SLinus Torvalds 	 */
1742251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
175463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds 
17867edfef7SAndi Kleen /* Account for an ACK we sent. */
17927cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
18027cde44aSYuchung Cheng 				      u32 rcv_nxt)
1811da177e4SLinus Torvalds {
1825d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1835d9f4262SEric Dumazet 
1845d9f4262SEric Dumazet 	if (unlikely(tp->compressed_ack)) {
185200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
186200d95f4SEric Dumazet 			      tp->compressed_ack);
1875d9f4262SEric Dumazet 		tp->compressed_ack = 0;
1885d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1895d9f4262SEric Dumazet 			__sock_put(sk);
1905d9f4262SEric Dumazet 	}
19127cde44aSYuchung Cheng 
19227cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19327cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
194463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
195463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1961da177e4SLinus Torvalds }
1971da177e4SLinus Torvalds 
19885f16525SYuchung Cheng 
19985f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
20085f16525SYuchung Cheng {
20185f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
2029ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
20385f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
20485f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
20585f16525SYuchung Cheng 	 */
20685f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
20785f16525SYuchung Cheng 
20885f16525SYuchung Cheng 	if (mss > 1460)
20985f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
21085f16525SYuchung Cheng 	return init_rwnd;
21185f16525SYuchung Cheng }
21285f16525SYuchung Cheng 
2131da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2141da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2151da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2161da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2171da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2181da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2191da177e4SLinus Torvalds  */
220ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2211da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
22231d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
22331d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2241da177e4SLinus Torvalds {
2251da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2281da177e4SLinus Torvalds 	if (*window_clamp == 0)
229589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2301da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2311da177e4SLinus Torvalds 
2321da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2331da177e4SLinus Torvalds 	if (space > mss)
234589c49cbSGao Feng 		space = rounddown(space, mss);
2351da177e4SLinus Torvalds 
2361da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
23715d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
23815d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
23915d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
24015d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
24115d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
24215d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2431da177e4SLinus Torvalds 	 */
244ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2451da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
24615d99e02SRick Jones 	else
24715d99e02SRick Jones 		(*rcv_wnd) = space;
24815d99e02SRick Jones 
2491da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2501da177e4SLinus Torvalds 	if (wscale_ok) {
251589c49cbSGao Feng 		/* Set window scaling on max possible window */
252356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
253f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
254316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
255589c49cbSGao Feng 		while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
2561da177e4SLinus Torvalds 			space >>= 1;
2571da177e4SLinus Torvalds 			(*rcv_wscale)++;
2581da177e4SLinus Torvalds 		}
2591da177e4SLinus Torvalds 	}
2601da177e4SLinus Torvalds 
26185f16525SYuchung Cheng 	if (!init_rcv_wnd) /* Use default unless specified otherwise */
26285f16525SYuchung Cheng 		init_rcv_wnd = tcp_default_init_rwnd(mss);
263b1afde60SNandita Dukkipati 	*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
266589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2671da177e4SLinus Torvalds }
2684bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2711da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2721da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2731da177e4SLinus Torvalds  * frame.
2741da177e4SLinus Torvalds  */
27540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2761da177e4SLinus Torvalds {
2771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2788e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2791da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2801da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2811da177e4SLinus Torvalds 
2821da177e4SLinus Torvalds 	/* Never shrink the offered window */
2831da177e4SLinus Torvalds 	if (new_win < cur_win) {
2841da177e4SLinus Torvalds 		/* Danger Will Robinson!
2851da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2861da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2871da177e4SLinus Torvalds 		 * window in time.  --DaveM
2881da177e4SLinus Torvalds 		 *
2891da177e4SLinus Torvalds 		 * Relax Will Robinson.
2901da177e4SLinus Torvalds 		 */
2918e165e20SFlorian Westphal 		if (new_win == 0)
2928e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2938e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
294607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2951da177e4SLinus Torvalds 	}
2961da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2971da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2981da177e4SLinus Torvalds 
2991da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
3001da177e4SLinus Torvalds 	 * scaled window.
3011da177e4SLinus Torvalds 	 */
302ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
303ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
3041da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
3051da177e4SLinus Torvalds 	else
3061da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3091da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3101da177e4SLinus Torvalds 
31131770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3128e165e20SFlorian Westphal 	if (new_win == 0) {
31331770e34SFlorian Westphal 		tp->pred_flags = 0;
3148e165e20SFlorian Westphal 		if (old_win)
3158e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3168e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3178e165e20SFlorian Westphal 	} else if (old_win == 0) {
3188e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3198e165e20SFlorian Westphal 	}
3201da177e4SLinus Torvalds 
3211da177e4SLinus Torvalds 	return new_win;
3221da177e4SLinus Torvalds }
3231da177e4SLinus Torvalds 
32467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
325735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
326bdf1ee5dSIlpo Järvinen {
32730e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
32830e502a3SDaniel Borkmann 
3294de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
330bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3314de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
33291b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
33391b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
33430e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
335bdf1ee5dSIlpo Järvinen }
336bdf1ee5dSIlpo Järvinen 
33767edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
338735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
339bdf1ee5dSIlpo Järvinen {
340bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
34191b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
342f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
34391b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
344f7b3bec6SFlorian Westphal 
345f7b3bec6SFlorian Westphal 	if (!use_ecn) {
346f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
347f7b3bec6SFlorian Westphal 
348f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
349f7b3bec6SFlorian Westphal 			use_ecn = true;
350f7b3bec6SFlorian Westphal 	}
351bdf1ee5dSIlpo Järvinen 
352bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
353f7b3bec6SFlorian Westphal 
354f7b3bec6SFlorian Westphal 	if (use_ecn) {
3554de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
356bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
35791b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
35830e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
359bdf1ee5dSIlpo Järvinen 	}
360bdf1ee5dSIlpo Järvinen }
361bdf1ee5dSIlpo Järvinen 
36249213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
36349213555SDaniel Borkmann {
36449213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
36549213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
36649213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
36749213555SDaniel Borkmann 		 */
36849213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
36949213555SDaniel Borkmann }
37049213555SDaniel Borkmann 
371735d3831SFlorian Westphal static void
3726ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
373bdf1ee5dSIlpo Järvinen {
3746ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
375bdf1ee5dSIlpo Järvinen 		th->ece = 1;
376bdf1ee5dSIlpo Järvinen }
377bdf1ee5dSIlpo Järvinen 
37867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
37967edfef7SAndi Kleen  * be sent.
38067edfef7SAndi Kleen  */
381735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
382ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
383bdf1ee5dSIlpo Järvinen {
384bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
385bdf1ee5dSIlpo Järvinen 
386bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
387bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
388bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
389bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
390bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
391bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
392bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
393ea1627c2SEric Dumazet 				th->cwr = 1;
394bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
395bdf1ee5dSIlpo Järvinen 			}
39630e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
397bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
398bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
399bdf1ee5dSIlpo Järvinen 		}
400bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
401ea1627c2SEric Dumazet 			th->ece = 1;
402bdf1ee5dSIlpo Järvinen 	}
403bdf1ee5dSIlpo Järvinen }
404bdf1ee5dSIlpo Järvinen 
405e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
406e870a8efSIlpo Järvinen  * auto increment end seqno.
407e870a8efSIlpo Järvinen  */
408e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
409e870a8efSIlpo Järvinen {
4102e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
411e870a8efSIlpo Järvinen 
4124de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
413e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
414e870a8efSIlpo Järvinen 
415cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
416e870a8efSIlpo Järvinen 
417e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
418a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
419e870a8efSIlpo Järvinen 		seq++;
420e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
421e870a8efSIlpo Järvinen }
422e870a8efSIlpo Järvinen 
423a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
42433f5f57eSIlpo Järvinen {
42533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
42633f5f57eSIlpo Järvinen }
42733f5f57eSIlpo Järvinen 
42833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
42933ad798cSAdam Langley #define OPTION_TS		(1 << 1)
43033ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
43189e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4322100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
43360e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
43460e2a778SUrsula Braun 
43560e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
43660e2a778SUrsula Braun {
43760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
43860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
43960e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
44060e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
44160e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
44260e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
44360e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
44460e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
44560e2a778SUrsula Braun 		}
44660e2a778SUrsula Braun 	}
44760e2a778SUrsula Braun #endif
44860e2a778SUrsula Braun }
44933ad798cSAdam Langley 
45033ad798cSAdam Langley struct tcp_out_options {
4512100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4522100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
45333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
45433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
455bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
456bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4572100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4582100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
45933ad798cSAdam Langley };
46033ad798cSAdam Langley 
46167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
46267edfef7SAndi Kleen  *
46367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
464fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
465fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4668e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
467fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
468fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
469fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
470fd6149d3SIlpo Järvinen  *
471fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
472fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
473fd6149d3SIlpo Järvinen  */
47433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
475bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
476bd0388aeSWilliam Allen Simpson {
4772100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
478bd0388aeSWilliam Allen Simpson 
479bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4801a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4811a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
482bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
483bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
48433ad798cSAdam Langley 		ptr += 4;
48533ad798cSAdam Langley 	}
48633ad798cSAdam Langley 
487fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
488fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
489fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
490fd6149d3SIlpo Järvinen 			       opts->mss);
491fd6149d3SIlpo Järvinen 	}
492fd6149d3SIlpo Järvinen 
493bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
494bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
49633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
49733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
49833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
499bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
50033ad798cSAdam Langley 		} else {
501496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
50240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
50340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
50440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
50540efc6faSStephen Hemminger 		}
50633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
50733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
50833ad798cSAdam Langley 	}
50933ad798cSAdam Langley 
510bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
51133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
51233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
51333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
51433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
51533ad798cSAdam Langley 	}
51633ad798cSAdam Langley 
517bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
51833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
51933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
52033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
52133ad798cSAdam Langley 			       opts->ws);
52233ad798cSAdam Langley 	}
52333ad798cSAdam Langley 
52433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
52533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
52633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
52740efc6faSStephen Hemminger 		int this_sack;
52840efc6faSStephen Hemminger 
52940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
53040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
53140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
53233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
53340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5342de979bdSStephen Hemminger 
53533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
53633ad798cSAdam Langley 		     ++this_sack) {
53740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
53840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
53940efc6faSStephen Hemminger 		}
5402de979bdSStephen Hemminger 
54140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
54240efc6faSStephen Hemminger 	}
5432100c8d2SYuchung Cheng 
5442100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5452100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5467f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5477f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5482100c8d2SYuchung Cheng 
5497f9b838bSDaniel Lee 		if (foc->exp) {
5507f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5517f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5522100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5537f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5547f9b838bSDaniel Lee 		} else {
5557f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5567f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5577f9b838bSDaniel Lee 			*p++ = len;
5582100c8d2SYuchung Cheng 		}
5597f9b838bSDaniel Lee 
5607f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5617f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5627f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5637f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5647f9b838bSDaniel Lee 		}
5657f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5662100c8d2SYuchung Cheng 	}
56760e2a778SUrsula Braun 
56860e2a778SUrsula Braun 	smc_options_write(ptr, &options);
56960e2a778SUrsula Braun }
57060e2a778SUrsula Braun 
57160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
57260e2a778SUrsula Braun 			   struct tcp_out_options *opts,
57360e2a778SUrsula Braun 			   unsigned int *remaining)
57460e2a778SUrsula Braun {
57560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57660e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57760e2a778SUrsula Braun 		if (tp->syn_smc) {
57860e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57960e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
58060e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
58160e2a778SUrsula Braun 			}
58260e2a778SUrsula Braun 		}
58360e2a778SUrsula Braun 	}
58460e2a778SUrsula Braun #endif
58560e2a778SUrsula Braun }
58660e2a778SUrsula Braun 
58760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
58860e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
58960e2a778SUrsula Braun 				struct tcp_out_options *opts,
59060e2a778SUrsula Braun 				unsigned int *remaining)
59160e2a778SUrsula Braun {
59260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
59360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
59460e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
59560e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
59660e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
59760e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
59860e2a778SUrsula Braun 			}
59960e2a778SUrsula Braun 		}
60060e2a778SUrsula Braun 	}
60160e2a778SUrsula Braun #endif
60240efc6faSStephen Hemminger }
60340efc6faSStephen Hemminger 
60467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
60567edfef7SAndi Kleen  * network wire format yet.
60667edfef7SAndi Kleen  */
60795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
60833ad798cSAdam Langley 				struct tcp_out_options *opts,
609cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
610cf533ea5SEric Dumazet {
61133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
61295c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
613783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
61433ad798cSAdam Langley 
6158c2320e8SEric Dumazet 	*md5 = NULL;
616cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
6178c2320e8SEric Dumazet 	if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
61833ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
61933ad798cSAdam Langley 		if (*md5) {
62033ad798cSAdam Langley 			opts->options |= OPTION_MD5;
621bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
622cfb6eeb4SYOSHIFUJI Hideaki 		}
6238c2320e8SEric Dumazet 	}
624cfb6eeb4SYOSHIFUJI Hideaki #endif
62533ad798cSAdam Langley 
62633ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
62733ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
62833ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
62933ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
63033ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
63133ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
63233ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
63333ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
63433ad798cSAdam Langley 	 * going out.  */
63533ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
636bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
63733ad798cSAdam Langley 
6385d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
63933ad798cSAdam Langley 		opts->options |= OPTION_TS;
6407faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
64133ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
642bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
64333ad798cSAdam Langley 	}
6449bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
64533ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
64689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
647bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
64833ad798cSAdam Langley 	}
649f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
65033ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
651b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
652bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
65333ad798cSAdam Langley 	}
65433ad798cSAdam Langley 
655783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6562646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6572646c831SDaniel Lee 
6582646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6592646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
660783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
661783237e8SYuchung Cheng 		if (remaining >= need) {
662783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
663783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
664783237e8SYuchung Cheng 			remaining -= need;
665783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6662646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
667783237e8SYuchung Cheng 		}
668783237e8SYuchung Cheng 	}
669bd0388aeSWilliam Allen Simpson 
67060e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
67160e2a778SUrsula Braun 
672bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
67333ad798cSAdam Langley }
67433ad798cSAdam Langley 
67567edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
67660e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
67760e2a778SUrsula Braun 				       struct request_sock *req,
67895c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
67933ad798cSAdam Langley 				       struct tcp_out_options *opts,
68080f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6818336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6824957faadSWilliam Allen Simpson {
68333ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
68495c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
68533ad798cSAdam Langley 
68633ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
68780f03e27SEric Dumazet 	if (md5) {
68833ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6894957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6904957faadSWilliam Allen Simpson 
6914957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6924957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6934957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6944957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6954957faadSWilliam Allen Simpson 		 */
696de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
69733ad798cSAdam Langley 	}
69833ad798cSAdam Langley #endif
69933ad798cSAdam Langley 
7004957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
70133ad798cSAdam Langley 	opts->mss = mss;
7024957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
70333ad798cSAdam Langley 
70433ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
70533ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
70689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
7074957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
70833ad798cSAdam Langley 	}
709de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
71033ad798cSAdam Langley 		opts->options |= OPTION_TS;
71195a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
71233ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
7134957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
71433ad798cSAdam Langley 	}
71533ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
71633ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
717de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
7184957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
71933ad798cSAdam Langley 	}
7207f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7217f9b838bSDaniel Lee 		u32 need = foc->len;
7227f9b838bSDaniel Lee 
7237f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7247f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7258336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7268336886fSJerry Chu 		if (remaining >= need) {
7278336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7288336886fSJerry Chu 			opts->fastopen_cookie = foc;
7298336886fSJerry Chu 			remaining -= need;
7308336886fSJerry Chu 		}
7318336886fSJerry Chu 	}
7324957faadSWilliam Allen Simpson 
73360e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
73460e2a778SUrsula Braun 
7354957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
73633ad798cSAdam Langley }
73733ad798cSAdam Langley 
73867edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
73967edfef7SAndi Kleen  * final wire format yet.
74067edfef7SAndi Kleen  */
74195c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
74233ad798cSAdam Langley 					struct tcp_out_options *opts,
743cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
744cf533ea5SEric Dumazet {
74533ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
74695c96174SEric Dumazet 	unsigned int size = 0;
747cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
74833ad798cSAdam Langley 
7495843ef42SAndi Kleen 	opts->options = 0;
7505843ef42SAndi Kleen 
7518c2320e8SEric Dumazet 	*md5 = NULL;
75233ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
7538c2320e8SEric Dumazet 	if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
75433ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7558c2320e8SEric Dumazet 		if (*md5) {
75633ad798cSAdam Langley 			opts->options |= OPTION_MD5;
75733ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
75833ad798cSAdam Langley 		}
7598c2320e8SEric Dumazet 	}
76033ad798cSAdam Langley #endif
76133ad798cSAdam Langley 
76233ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
76333ad798cSAdam Langley 		opts->options |= OPTION_TS;
7647faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
76533ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
76633ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
76733ad798cSAdam Langley 	}
76833ad798cSAdam Langley 
769cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
770cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
77195c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
77233ad798cSAdam Langley 		opts->num_sack_blocks =
77395c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
77433ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
77533ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
77633ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
77733ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
77833ad798cSAdam Langley 	}
77933ad798cSAdam Langley 
78033ad798cSAdam Langley 	return size;
78140efc6faSStephen Hemminger }
7821da177e4SLinus Torvalds 
78346d3ceabSEric Dumazet 
78446d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
78546d3ceabSEric Dumazet  *
78646d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
78746d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
78846d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
78946d3ceabSEric Dumazet  *
79046d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
79146d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7928e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
79346d3ceabSEric Dumazet  *
79446d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
79546d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
79646d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
79746d3ceabSEric Dumazet  */
79846d3ceabSEric Dumazet struct tsq_tasklet {
79946d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
80046d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
80146d3ceabSEric Dumazet };
80246d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
80346d3ceabSEric Dumazet 
80473a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
8056f458dfbSEric Dumazet {
8066f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
8076f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
808f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
809f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
810f9616c35SEric Dumazet 
811f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
8123a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
8133a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
814f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
8153a91d29fSKoichiro Den 		}
816f9616c35SEric Dumazet 
817f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
818bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8196f458dfbSEric Dumazet 	}
820f9616c35SEric Dumazet }
82173a6bab5SEric Dumazet 
82273a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
82373a6bab5SEric Dumazet {
82473a6bab5SEric Dumazet 	bh_lock_sock(sk);
82573a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
82673a6bab5SEric Dumazet 		tcp_tsq_write(sk);
82773a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
82873a6bab5SEric Dumazet 		sock_hold(sk);
82973a6bab5SEric Dumazet 	bh_unlock_sock(sk);
83073a6bab5SEric Dumazet }
83146d3ceabSEric Dumazet /*
8328e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
83346d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8348e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
83546d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
83646d3ceabSEric Dumazet  */
83746d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
83846d3ceabSEric Dumazet {
83946d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
84046d3ceabSEric Dumazet 	LIST_HEAD(list);
84146d3ceabSEric Dumazet 	unsigned long flags;
84246d3ceabSEric Dumazet 	struct list_head *q, *n;
84346d3ceabSEric Dumazet 	struct tcp_sock *tp;
84446d3ceabSEric Dumazet 	struct sock *sk;
84546d3ceabSEric Dumazet 
84646d3ceabSEric Dumazet 	local_irq_save(flags);
84746d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
84846d3ceabSEric Dumazet 	local_irq_restore(flags);
84946d3ceabSEric Dumazet 
85046d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
85146d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
85246d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
85346d3ceabSEric Dumazet 
85446d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8550a9648f1SEric Dumazet 		smp_mb__before_atomic();
8567aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8577aa5470cSEric Dumazet 
8586f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
85946d3ceabSEric Dumazet 		sk_free(sk);
86046d3ceabSEric Dumazet 	}
86146d3ceabSEric Dumazet }
86246d3ceabSEric Dumazet 
86340fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
86440fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
86540fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
86640fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
86746d3ceabSEric Dumazet /**
86846d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
86946d3ceabSEric Dumazet  * @sk: socket
87046d3ceabSEric Dumazet  *
87146d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
87246d3ceabSEric Dumazet  * actions before socket release.
87346d3ceabSEric Dumazet  */
87446d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
87546d3ceabSEric Dumazet {
8766f458dfbSEric Dumazet 	unsigned long flags, nflags;
87746d3ceabSEric Dumazet 
8786f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8796f458dfbSEric Dumazet 	do {
8807aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8816f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8826f458dfbSEric Dumazet 			return;
8836f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8847aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8856f458dfbSEric Dumazet 
88673a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
88773a6bab5SEric Dumazet 		tcp_tsq_write(sk);
88873a6bab5SEric Dumazet 		__sock_put(sk);
88973a6bab5SEric Dumazet 	}
890c3f9b018SEric Dumazet 	/* Here begins the tricky part :
891c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
892c3f9b018SEric Dumazet 	 * 1) BH disabled
893c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
894c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
895c3f9b018SEric Dumazet 	 *
896c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
897c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
898c3f9b018SEric Dumazet 	 */
899c3f9b018SEric Dumazet 	sock_release_ownership(sk);
900c3f9b018SEric Dumazet 
90140fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
9026f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
903144d56e9SEric Dumazet 		__sock_put(sk);
904144d56e9SEric Dumazet 	}
90540fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
9066f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
907144d56e9SEric Dumazet 		__sock_put(sk);
908144d56e9SEric Dumazet 	}
90940fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
9104fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
911144d56e9SEric Dumazet 		__sock_put(sk);
912144d56e9SEric Dumazet 	}
91346d3ceabSEric Dumazet }
91446d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
91546d3ceabSEric Dumazet 
91646d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
91746d3ceabSEric Dumazet {
91846d3ceabSEric Dumazet 	int i;
91946d3ceabSEric Dumazet 
92046d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
92146d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
92246d3ceabSEric Dumazet 
92346d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
92446d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
92546d3ceabSEric Dumazet 			     tcp_tasklet_func,
92646d3ceabSEric Dumazet 			     (unsigned long)tsq);
92746d3ceabSEric Dumazet 	}
92846d3ceabSEric Dumazet }
92946d3ceabSEric Dumazet 
93046d3ceabSEric Dumazet /*
93146d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9328e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
93346d3ceabSEric Dumazet  * hold qdisc lock.
93446d3ceabSEric Dumazet  */
935d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
93646d3ceabSEric Dumazet {
93746d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
93846d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
939408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9409b462d02SEric Dumazet 
9419b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9429b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9439b462d02SEric Dumazet 	 */
94414afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9459b462d02SEric Dumazet 
9469b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9479b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9489b462d02SEric Dumazet 	 * This gives :
9499b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9509b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9519b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9529b462d02SEric Dumazet 	 */
95314afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9549b462d02SEric Dumazet 		goto out;
95546d3ceabSEric Dumazet 
9567aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
95746d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
958a9b204d1SEric Dumazet 		bool empty;
95946d3ceabSEric Dumazet 
960408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
961408f0a6cSEric Dumazet 			goto out;
962408f0a6cSEric Dumazet 
96373a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9647aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
965408f0a6cSEric Dumazet 		if (nval != oval)
966408f0a6cSEric Dumazet 			continue;
967408f0a6cSEric Dumazet 
96846d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
96946d3ceabSEric Dumazet 		local_irq_save(flags);
970903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
971a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
97246d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
973a9b204d1SEric Dumazet 		if (empty)
97446d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
97546d3ceabSEric Dumazet 		local_irq_restore(flags);
9769b462d02SEric Dumazet 		return;
97746d3ceabSEric Dumazet 	}
9789b462d02SEric Dumazet out:
9799b462d02SEric Dumazet 	sk_free(sk);
98046d3ceabSEric Dumazet }
98146d3ceabSEric Dumazet 
98273a6bab5SEric Dumazet /* Note: Called under soft irq.
98373a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
984218af599SEric Dumazet  */
985218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
986218af599SEric Dumazet {
987218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
988218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
989218af599SEric Dumazet 
99073a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
99173a6bab5SEric Dumazet 	sock_put(sk);
992218af599SEric Dumazet 
993218af599SEric Dumazet 	return HRTIMER_NORESTART;
994218af599SEric Dumazet }
995218af599SEric Dumazet 
996218af599SEric Dumazet static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
997218af599SEric Dumazet {
998218af599SEric Dumazet 	u64 len_ns;
999218af599SEric Dumazet 	u32 rate;
1000218af599SEric Dumazet 
1001218af599SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
1002218af599SEric Dumazet 		return;
1003218af599SEric Dumazet 	rate = sk->sk_pacing_rate;
1004218af599SEric Dumazet 	if (!rate || rate == ~0U)
1005218af599SEric Dumazet 		return;
1006218af599SEric Dumazet 
1007218af599SEric Dumazet 	len_ns = (u64)skb->len * NSEC_PER_SEC;
1008218af599SEric Dumazet 	do_div(len_ns, rate);
1009218af599SEric Dumazet 	hrtimer_start(&tcp_sk(sk)->pacing_timer,
1010218af599SEric Dumazet 		      ktime_add_ns(ktime_get(), len_ns),
101173a6bab5SEric Dumazet 		      HRTIMER_MODE_ABS_PINNED_SOFT);
101273a6bab5SEric Dumazet 	sock_hold(sk);
1013218af599SEric Dumazet }
1014218af599SEric Dumazet 
1015e2080072SEric Dumazet static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
1016e2080072SEric Dumazet {
1017e2080072SEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
1018e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1019e2080072SEric Dumazet }
1020e2080072SEric Dumazet 
10211da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10221da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10231da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10241da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10251da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10261da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10271da177e4SLinus Torvalds  * device.
10281da177e4SLinus Torvalds  *
10291da177e4SLinus Torvalds  * We are working here with either a clone of the original
10301da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10311da177e4SLinus Torvalds  */
10322987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10332987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10341da177e4SLinus Torvalds {
10356687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1036dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1037dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1038dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
103933ad798cSAdam Langley 	struct tcp_out_options opts;
104095c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10418c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1042cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10431da177e4SLinus Torvalds 	struct tcphdr *th;
10441da177e4SLinus Torvalds 	int err;
10451da177e4SLinus Torvalds 
1046dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10476f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
1048dfb4b9dcSDavid S. Miller 
1049ccdbb6e9SEric Dumazet 	if (clone_it) {
10506f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10516f094b9eSLawrence Brakmo 			- tp->snd_una;
10528c72c65bSEric Dumazet 		oskb = skb;
1053e2080072SEric Dumazet 
1054e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1055e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1056e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1057dfb4b9dcSDavid S. Miller 			else
1058e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1059e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1060e2080072SEric Dumazet 
1061dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1062dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1063dfb4b9dcSDavid S. Miller 	}
10648c72c65bSEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
1065dfb4b9dcSDavid S. Miller 
1066dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1067dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
106833ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10691da177e4SLinus Torvalds 
10704de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
107133ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
107233ad798cSAdam Langley 	else
107333ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
107433ad798cSAdam Langley 							   &md5);
107533ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10761da177e4SLinus Torvalds 
1077547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1078b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
107973a6bab5SEric Dumazet 	 * which holds one reference to sk.
1080b2532eb9SEric Dumazet 	 *
1081b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1082b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1083547669d4SEric Dumazet 	 */
1084b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10851da177e4SLinus Torvalds 
108638ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
108738ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
108838ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
108938ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
109038ab52e8SEric Dumazet 	 */
109138ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
109238ab52e8SEric Dumazet 
1093aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1094aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
109546d3ceabSEric Dumazet 
109646d3ceabSEric Dumazet 	skb_orphan(skb);
109746d3ceabSEric Dumazet 	skb->sk = sk;
10981d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1099b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
110014afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
11011da177e4SLinus Torvalds 
1102c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1103c3a2e837SJulian Anastasov 
11041da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1105ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1106c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1107c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
11081da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
11092987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1110df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
11114de075e0SEric Dumazet 					tcb->tcp_flags);
1112dfb4b9dcSDavid S. Miller 
11131da177e4SLinus Torvalds 	th->check		= 0;
11141da177e4SLinus Torvalds 	th->urg_ptr		= 0;
11151da177e4SLinus Torvalds 
111633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11177691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11187691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11191da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11201da177e4SLinus Torvalds 			th->urg = 1;
11217691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11220eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11237691367dSHerbert Xu 			th->urg = 1;
11247691367dSHerbert Xu 		}
11251da177e4SLinus Torvalds 	}
11261da177e4SLinus Torvalds 
1127bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
112851466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1129ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1130ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1131ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1132ea1627c2SEric Dumazet 	} else {
1133ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1134ea1627c2SEric Dumazet 		 * is never scaled.
1135ea1627c2SEric Dumazet 		 */
1136ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1137ea1627c2SEric Dumazet 	}
1138cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1139cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1140cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1141a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1142bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
114339f8e58eSEric Dumazet 					       md5, sk, skb);
1144cfb6eeb4SYOSHIFUJI Hideaki 	}
1145cfb6eeb4SYOSHIFUJI Hideaki #endif
1146cfb6eeb4SYOSHIFUJI Hideaki 
1147bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11481da177e4SLinus Torvalds 
11494de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
115027cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11511da177e4SLinus Torvalds 
1152a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1153cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1154a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1155ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1156218af599SEric Dumazet 		tcp_internal_pacing(sk, skb);
1157a44d6eacSMartin KaFai Lau 	}
11581da177e4SLinus Torvalds 
1159bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1160aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1161aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11621da177e4SLinus Torvalds 
11632efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1164f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1165cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1166f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1167cd7d8498SEric Dumazet 
11687faee5c0SEric Dumazet 	/* Our usage of tstamp should remain private */
11692456e855SThomas Gleixner 	skb->tstamp = 0;
1170971f10ecSEric Dumazet 
1171971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1172971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1173971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1174971f10ecSEric Dumazet 
1175b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11767faee5c0SEric Dumazet 
11778c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11785ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11798c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11808c72c65bSEric Dumazet 	}
1181fc225799SEric Dumazet 	if (!err && oskb) {
1182e2080072SEric Dumazet 		tcp_update_skb_after_send(tp, oskb);
1183fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1184fc225799SEric Dumazet 	}
11858c72c65bSEric Dumazet 	return err;
11861da177e4SLinus Torvalds }
11871da177e4SLinus Torvalds 
11882987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11892987babbSYuchung Cheng 			    gfp_t gfp_mask)
11902987babbSYuchung Cheng {
11912987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11922987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11932987babbSYuchung Cheng }
11942987babbSYuchung Cheng 
119567edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11961da177e4SLinus Torvalds  *
11971da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11981da177e4SLinus Torvalds  * otherwise socket can stall.
11991da177e4SLinus Torvalds  */
12001da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
12011da177e4SLinus Torvalds {
12021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
12051da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1206f4a775d1SEric Dumazet 	__skb_header_release(skb);
1207fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
12083ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
12093ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
12101da177e4SLinus Torvalds }
12111da177e4SLinus Torvalds 
121267edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
12135bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1214f6302d1dSDavid S. Miller {
12154a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1216f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1217f6302d1dSDavid S. Miller 		 * non-TSO case.
1218f6302d1dSDavid S. Miller 		 */
1219cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1220f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1221f6302d1dSDavid S. Miller 	} else {
1222cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1223f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12241da177e4SLinus Torvalds 	}
12251da177e4SLinus Torvalds }
12261da177e4SLinus Torvalds 
1227797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1228797108d1SIlpo Järvinen  * tweaks to fix counters
1229797108d1SIlpo Järvinen  */
1230cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1231797108d1SIlpo Järvinen {
1232797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1233797108d1SIlpo Järvinen 
1234797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1235797108d1SIlpo Järvinen 
1236797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1237797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1238797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1239797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1240797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1241797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1242797108d1SIlpo Järvinen 
1243797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1244797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1245797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1246797108d1SIlpo Järvinen 
1247797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1248797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1249713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1250797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1251797108d1SIlpo Järvinen 
1252797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1253797108d1SIlpo Järvinen }
1254797108d1SIlpo Järvinen 
12550a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12560a2cf20cSSoheil Hassas Yeganeh {
12570a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12580a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12590a2cf20cSSoheil Hassas Yeganeh }
12600a2cf20cSSoheil Hassas Yeganeh 
1261490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1262490cc7d0SWillem de Bruijn {
1263490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1264490cc7d0SWillem de Bruijn 
12650a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1266490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1267490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1268490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1269490cc7d0SWillem de Bruijn 
1270490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1271490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1272490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1273b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1274b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1275490cc7d0SWillem de Bruijn 	}
1276490cc7d0SWillem de Bruijn }
1277490cc7d0SWillem de Bruijn 
1278a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1279a166140eSMartin KaFai Lau {
1280a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1281a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1282a166140eSMartin KaFai Lau }
1283a166140eSMartin KaFai Lau 
128475c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
128575c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
128675c119afSEric Dumazet 					 struct sk_buff *buff,
128775c119afSEric Dumazet 					 struct sock *sk,
128875c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
128975c119afSEric Dumazet {
129075c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
129175c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
129275c119afSEric Dumazet 	else
129375c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
129475c119afSEric Dumazet }
129575c119afSEric Dumazet 
12961da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12971da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12981da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12991da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
13001da177e4SLinus Torvalds  */
130175c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
130275c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
13036cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
13041da177e4SLinus Torvalds {
13051da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13061da177e4SLinus Torvalds 	struct sk_buff *buff;
13076475be16SDavid S. Miller 	int nsize, old_factor;
1308b60b49eaSHerbert Xu 	int nlen;
13099ce01461SIlpo Järvinen 	u8 flags;
13101da177e4SLinus Torvalds 
13112fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
13122fceec13SIlpo Järvinen 		return -EINVAL;
13136a438bbeSStephen Hemminger 
13141da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
13151da177e4SLinus Torvalds 	if (nsize < 0)
13161da177e4SLinus Torvalds 		nsize = 0;
13171da177e4SLinus Torvalds 
13186cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13191da177e4SLinus Torvalds 		return -ENOMEM;
13201da177e4SLinus Torvalds 
13211da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1322eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
132351456b29SIan Morris 	if (!buff)
13241da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1325ef5cb973SHerbert Xu 
13263ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
13273ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1328b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1329b60b49eaSHerbert Xu 	buff->truesize += nlen;
1330b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13311da177e4SLinus Torvalds 
13321da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13331da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13341da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13351da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13384de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13394de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13404de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1341e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1342a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13431da177e4SLinus Torvalds 
13441da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13451da177e4SLinus Torvalds 
134698be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13471da177e4SLinus Torvalds 
1348a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1349490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13501da177e4SLinus Torvalds 
13516475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13526475be16SDavid S. Miller 
13531da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13545bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13555bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13561da177e4SLinus Torvalds 
1357b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1358b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1359b9f64820SYuchung Cheng 
13606475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13616475be16SDavid S. Miller 	 * adjust the various packet counters.
13626475be16SDavid S. Miller 	 */
1363cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13646475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13656475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13661da177e4SLinus Torvalds 
1367797108d1SIlpo Järvinen 		if (diff)
1368797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13691da177e4SLinus Torvalds 	}
13701da177e4SLinus Torvalds 
13711da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1372f4a775d1SEric Dumazet 	__skb_header_release(buff);
137375c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1374f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1375e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13761da177e4SLinus Torvalds 
13771da177e4SLinus Torvalds 	return 0;
13781da177e4SLinus Torvalds }
13791da177e4SLinus Torvalds 
1380f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1381f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13821da177e4SLinus Torvalds  */
13837162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13841da177e4SLinus Torvalds {
13857b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13861da177e4SLinus Torvalds 	int i, k, eat;
13871da177e4SLinus Torvalds 
13884fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13894fa48bf3SEric Dumazet 	if (eat) {
13904fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13914fa48bf3SEric Dumazet 		len -= eat;
13924fa48bf3SEric Dumazet 		if (!len)
13937162fb24SEric Dumazet 			return 0;
13944fa48bf3SEric Dumazet 	}
13951da177e4SLinus Torvalds 	eat = len;
13961da177e4SLinus Torvalds 	k = 0;
13977b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13987b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13997b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
14009e903e08SEric Dumazet 
14019e903e08SEric Dumazet 		if (size <= eat) {
1402aff65da0SIan Campbell 			skb_frag_unref(skb, i);
14039e903e08SEric Dumazet 			eat -= size;
14041da177e4SLinus Torvalds 		} else {
14057b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
14061da177e4SLinus Torvalds 			if (eat) {
14077b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
14087b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
14091da177e4SLinus Torvalds 				eat = 0;
14101da177e4SLinus Torvalds 			}
14111da177e4SLinus Torvalds 			k++;
14121da177e4SLinus Torvalds 		}
14131da177e4SLinus Torvalds 	}
14147b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds 	skb->data_len -= len;
14171da177e4SLinus Torvalds 	skb->len = skb->data_len;
14187162fb24SEric Dumazet 	return len;
14191da177e4SLinus Torvalds }
14201da177e4SLinus Torvalds 
142167edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14221da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14231da177e4SLinus Torvalds {
14247162fb24SEric Dumazet 	u32 delta_truesize;
14257162fb24SEric Dumazet 
142614bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14271da177e4SLinus Torvalds 		return -ENOMEM;
14281da177e4SLinus Torvalds 
14297162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14301da177e4SLinus Torvalds 
14311da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
143284fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14331da177e4SLinus Torvalds 
14347162fb24SEric Dumazet 	if (delta_truesize) {
14357162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14367162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14377162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14381da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14397162fb24SEric Dumazet 	}
14401da177e4SLinus Torvalds 
14415b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14421da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14435bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds 	return 0;
14461da177e4SLinus Torvalds }
14471da177e4SLinus Torvalds 
14481b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14491b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14505d424d5aSJohn Heffner {
1451cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1452cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14535d424d5aSJohn Heffner 	int mss_now;
14545d424d5aSJohn Heffner 
14555d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14565d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14575d424d5aSJohn Heffner 	 */
14585d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14595d424d5aSJohn Heffner 
146067469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
146167469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
146267469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
146367469601SEric Dumazet 
146467469601SEric Dumazet 		if (dst && dst_allfrag(dst))
146567469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
146667469601SEric Dumazet 	}
146767469601SEric Dumazet 
14685d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14695d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14705d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14715d424d5aSJohn Heffner 
14725d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14735d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14745d424d5aSJohn Heffner 
14755d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14765d424d5aSJohn Heffner 	if (mss_now < 48)
14775d424d5aSJohn Heffner 		mss_now = 48;
14785d424d5aSJohn Heffner 	return mss_now;
14795d424d5aSJohn Heffner }
14805d424d5aSJohn Heffner 
14811b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14821b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14831b63edd6SYuchung Cheng {
14841b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14851b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14861b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14871b63edd6SYuchung Cheng }
14881b63edd6SYuchung Cheng 
14895d424d5aSJohn Heffner /* Inverse of above */
149067469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14915d424d5aSJohn Heffner {
1492cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1493cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14945d424d5aSJohn Heffner 	int mtu;
14955d424d5aSJohn Heffner 
14965d424d5aSJohn Heffner 	mtu = mss +
14975d424d5aSJohn Heffner 	      tp->tcp_header_len +
14985d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14995d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
15005d424d5aSJohn Heffner 
150167469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
150267469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
150367469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
150467469601SEric Dumazet 
150567469601SEric Dumazet 		if (dst && dst_allfrag(dst))
150667469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
150767469601SEric Dumazet 	}
15085d424d5aSJohn Heffner 	return mtu;
15095d424d5aSJohn Heffner }
1510556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
15115d424d5aSJohn Heffner 
151267edfef7SAndi Kleen /* MTU probing init per socket */
15135d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
15145d424d5aSJohn Heffner {
15155d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15165d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1517b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15185d424d5aSJohn Heffner 
1519b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15205d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15215d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1522b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15235d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
152405cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1525c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15265d424d5aSJohn Heffner }
15274bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15285d424d5aSJohn Heffner 
15291da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15321da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1535caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15361da177e4SLinus Torvalds    It also does not include TCP options.
15371da177e4SLinus Torvalds 
1538d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15411da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15421da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15431da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15441da177e4SLinus Torvalds 
15451da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15461da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15471da177e4SLinus Torvalds 
1548d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1549d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15501da177e4SLinus Torvalds  */
15511da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1554d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15555d424d5aSJohn Heffner 	int mss_now;
15561da177e4SLinus Torvalds 
15575d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15585d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15591da177e4SLinus Torvalds 
15605d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1561409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	/* And store cached results */
1564d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15655d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15665d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1567c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 	return mss_now;
15701da177e4SLinus Torvalds }
15714bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15741da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15751da177e4SLinus Torvalds  */
15760c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15771da177e4SLinus Torvalds {
1578cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1579cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1580c1b4a7e6SDavid S. Miller 	u32 mss_now;
158195c96174SEric Dumazet 	unsigned int header_len;
158233ad798cSAdam Langley 	struct tcp_out_options opts;
158333ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15841da177e4SLinus Torvalds 
1585c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1586c1b4a7e6SDavid S. Miller 
15871da177e4SLinus Torvalds 	if (dst) {
15881da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1589d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15901da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15911da177e4SLinus Torvalds 	}
15921da177e4SLinus Torvalds 
159333ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
159433ad798cSAdam Langley 		     sizeof(struct tcphdr);
159533ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
159633ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
159733ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
159833ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
159933ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
160033ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
160133ad798cSAdam Langley 		mss_now -= delta;
160233ad798cSAdam Langley 	}
1603cfb6eeb4SYOSHIFUJI Hideaki 
16041da177e4SLinus Torvalds 	return mss_now;
16051da177e4SLinus Torvalds }
16061da177e4SLinus Torvalds 
160786fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
160886fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
160986fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
161086fd14adSWeiping Pan  */
161186fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1612a762a980SDavid S. Miller {
16139e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1614a762a980SDavid S. Miller 
161586fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
161686fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
161786fd14adSWeiping Pan 		/* Limited by application or receiver window. */
161886fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
161986fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
162086fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
162186fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
162286fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
162386fd14adSWeiping Pan 		}
162486fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
162586fd14adSWeiping Pan 	}
1626c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
162786fd14adSWeiping Pan }
162886fd14adSWeiping Pan 
1629ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1630a762a980SDavid S. Miller {
16311b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1632a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1633a762a980SDavid S. Miller 
1634ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1635ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1636ca8a2263SNeal Cardwell 	 */
1637ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1638ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1639ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1640ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1641ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1642ca8a2263SNeal Cardwell 	}
1643e114a710SEric Dumazet 
164424901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1645a762a980SDavid S. Miller 		/* Network is feed fully. */
1646a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1647c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1648a762a980SDavid S. Miller 	} else {
1649a762a980SDavid S. Miller 		/* Network starves. */
1650a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1651a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1652a762a980SDavid S. Miller 
1653b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1654c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16551b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1656a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1657b0f71bd3SFrancis Yan 
1658b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1659b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1660b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1661b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
166275c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1663b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1664b0f71bd3SFrancis Yan 		 */
166575c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1666b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1667b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1668b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1669a762a980SDavid S. Miller 	}
1670a762a980SDavid S. Miller }
1671a762a980SDavid S. Miller 
1672d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1673d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1674d4589926SEric Dumazet {
1675d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1676d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1677d4589926SEric Dumazet }
1678d4589926SEric Dumazet 
1679d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1680d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1681d4589926SEric Dumazet  * The test is really :
1682d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1683d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1684d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1685d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16860e3a4803SIlpo Järvinen  */
1687d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1688d4589926SEric Dumazet 				const struct sk_buff *skb)
1689d4589926SEric Dumazet {
1690d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1691d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1692d4589926SEric Dumazet }
1693d4589926SEric Dumazet 
1694d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1695d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1696d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1697d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1698d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1699d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1700d4589926SEric Dumazet  */
1701d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1702cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1703d4589926SEric Dumazet {
1704d4589926SEric Dumazet 	return partial &&
1705d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1706d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1707d4589926SEric Dumazet }
1708605ad7f1SEric Dumazet 
1709605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1710605ad7f1SEric Dumazet  * to send one TSO packet per ms
1711605ad7f1SEric Dumazet  */
1712dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
17131b3878caSNeal Cardwell 			    int min_tso_segs)
1714605ad7f1SEric Dumazet {
1715605ad7f1SEric Dumazet 	u32 bytes, segs;
1716605ad7f1SEric Dumazet 
17173a9b76fdSEric Dumazet 	bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
1718605ad7f1SEric Dumazet 		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1719605ad7f1SEric Dumazet 
1720605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1721605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1722605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1723605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1724605ad7f1SEric Dumazet 	 */
17251b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1726605ad7f1SEric Dumazet 
1727350c9f48SEric Dumazet 	return segs;
1728605ad7f1SEric Dumazet }
1729605ad7f1SEric Dumazet 
1730ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1731ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1732ed6e7268SNeal Cardwell  */
1733ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1734ed6e7268SNeal Cardwell {
1735ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1736dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1737ed6e7268SNeal Cardwell 
1738dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1739dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1740dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1741dcb8c9b4SEric Dumazet 
1742dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1743350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1744ed6e7268SNeal Cardwell }
1745ed6e7268SNeal Cardwell 
1746d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1747d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1748d4589926SEric Dumazet 					const struct sk_buff *skb,
1749d4589926SEric Dumazet 					unsigned int mss_now,
1750d4589926SEric Dumazet 					unsigned int max_segs,
1751d4589926SEric Dumazet 					int nonagle)
1752c1b4a7e6SDavid S. Miller {
1753cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1754d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1755c1b4a7e6SDavid S. Miller 
175690840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17571485348dSBen Hutchings 	max_len = mss_now * max_segs;
17580e3a4803SIlpo Järvinen 
17591485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17601485348dSBen Hutchings 		return max_len;
17610e3a4803SIlpo Järvinen 
17625ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17635ea3a748SIlpo Järvinen 
17641485348dSBen Hutchings 	if (max_len <= needed)
17651485348dSBen Hutchings 		return max_len;
17660e3a4803SIlpo Järvinen 
1767d4589926SEric Dumazet 	partial = needed % mss_now;
1768d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1769d4589926SEric Dumazet 	 * to include this last segment in this skb.
1770d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1771d4589926SEric Dumazet 	 */
1772cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1773d4589926SEric Dumazet 		return needed - partial;
1774d4589926SEric Dumazet 
1775d4589926SEric Dumazet 	return needed;
1776c1b4a7e6SDavid S. Miller }
1777c1b4a7e6SDavid S. Miller 
1778c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1779c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1780c1b4a7e6SDavid S. Miller  */
1781cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1782cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1783c1b4a7e6SDavid S. Miller {
1784d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1785c1b4a7e6SDavid S. Miller 
1786c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17874de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17884de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1789c1b4a7e6SDavid S. Miller 		return 1;
1790c1b4a7e6SDavid S. Miller 
1791c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1792c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1793d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1794c1b4a7e6SDavid S. Miller 		return 0;
1795d649a7a8SEric Dumazet 
1796d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1797d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1798d649a7a8SEric Dumazet 	 */
1799d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1800d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1801c1b4a7e6SDavid S. Miller }
1802c1b4a7e6SDavid S. Miller 
1803b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
180467edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1805c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1806c1b4a7e6SDavid S. Miller  */
18075bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1808c1b4a7e6SDavid S. Miller {
1809c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1810c1b4a7e6SDavid S. Miller 
1811f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
18125bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1813c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1814c1b4a7e6SDavid S. Miller 	}
1815c1b4a7e6SDavid S. Miller 	return tso_segs;
1816c1b4a7e6SDavid S. Miller }
1817c1b4a7e6SDavid S. Miller 
1818c1b4a7e6SDavid S. Miller 
1819a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1820c1b4a7e6SDavid S. Miller  * sent now.
1821c1b4a7e6SDavid S. Miller  */
1822a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1823c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1824c1b4a7e6SDavid S. Miller {
1825c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1826c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1827c1b4a7e6SDavid S. Miller 	 *
1828c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1829c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1830c1b4a7e6SDavid S. Miller 	 */
1831c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1832a2a385d6SEric Dumazet 		return true;
1833c1b4a7e6SDavid S. Miller 
18349b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18359b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1836a2a385d6SEric Dumazet 		return true;
1837c1b4a7e6SDavid S. Miller 
1838cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1839a2a385d6SEric Dumazet 		return true;
1840c1b4a7e6SDavid S. Miller 
1841a2a385d6SEric Dumazet 	return false;
1842c1b4a7e6SDavid S. Miller }
1843c1b4a7e6SDavid S. Miller 
1844c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1845a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1846a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1847056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1848c1b4a7e6SDavid S. Miller {
1849c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1850c1b4a7e6SDavid S. Miller 
1851c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1852c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1853c1b4a7e6SDavid S. Miller 
185490840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1855c1b4a7e6SDavid S. Miller }
1856c1b4a7e6SDavid S. Miller 
1857c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1858c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1859c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1860c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1861c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1862c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1863c1b4a7e6SDavid S. Miller  */
186475c119afSEric Dumazet static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
186575c119afSEric Dumazet 			struct sk_buff *skb, unsigned int len,
1866c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1867c1b4a7e6SDavid S. Miller {
1868c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1869c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
18709ce01461SIlpo Järvinen 	u8 flags;
1871c1b4a7e6SDavid S. Miller 
1872c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1873c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
187475c119afSEric Dumazet 		return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
1875c1b4a7e6SDavid S. Miller 
1876eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
187751456b29SIan Morris 	if (unlikely(!buff))
1878c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1879c1b4a7e6SDavid S. Miller 
18803ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18813ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1882b60b49eaSHerbert Xu 	buff->truesize += nlen;
1883c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1884c1b4a7e6SDavid S. Miller 
1885c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1886c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1887c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1888c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1889c1b4a7e6SDavid S. Miller 
1890c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18914de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18924de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18934de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1894c1b4a7e6SDavid S. Miller 
1895c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1896c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1897c1b4a7e6SDavid S. Miller 
1898a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1899a166140eSMartin KaFai Lau 
190098be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1901c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1902490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1903c1b4a7e6SDavid S. Miller 
1904c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
19055bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
19065bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1907c1b4a7e6SDavid S. Miller 
1908c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1909f4a775d1SEric Dumazet 	__skb_header_release(buff);
191075c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1911c1b4a7e6SDavid S. Miller 
1912c1b4a7e6SDavid S. Miller 	return 0;
1913c1b4a7e6SDavid S. Miller }
1914c1b4a7e6SDavid S. Miller 
1915c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1916c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1917c1b4a7e6SDavid S. Miller  *
1918c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1919c1b4a7e6SDavid S. Miller  */
1920ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1921605ad7f1SEric Dumazet 				 bool *is_cwnd_limited, u32 max_segs)
1922c1b4a7e6SDavid S. Miller {
19236687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
192450c8339eSEric Dumazet 	u32 age, send_win, cong_win, limit, in_flight;
192550c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
192650c8339eSEric Dumazet 	struct sk_buff *head;
1927ad9f4f50SEric Dumazet 	int win_divisor;
1928c1b4a7e6SDavid S. Miller 
19294de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1930ae8064acSJohn Heffner 		goto send_now;
1931c1b4a7e6SDavid S. Miller 
193299d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1933ae8064acSJohn Heffner 		goto send_now;
1934ae8064acSJohn Heffner 
19355f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
19365f852eb5SEric Dumazet 	 * only if the last write was recent.
19375f852eb5SEric Dumazet 	 */
1938d635fbe2SEric Dumazet 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
1939ae8064acSJohn Heffner 		goto send_now;
1940908a75c1SDavid S. Miller 
1941c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1942c1b4a7e6SDavid S. Miller 
1943c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1944c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1945c1b4a7e6SDavid S. Miller 
194690840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1947c1b4a7e6SDavid S. Miller 
1948c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1949c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1950c1b4a7e6SDavid S. Miller 
1951c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1952c1b4a7e6SDavid S. Miller 
1953ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1954605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1955ae8064acSJohn Heffner 		goto send_now;
1956ba244fe9SDavid S. Miller 
195762ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
195862ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
195962ad2761SIlpo Järvinen 		goto send_now;
196062ad2761SIlpo Järvinen 
19615bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1962ad9f4f50SEric Dumazet 	if (win_divisor) {
1963c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1964c1b4a7e6SDavid S. Miller 
1965c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1966c1b4a7e6SDavid S. Miller 		 * just use it.
1967c1b4a7e6SDavid S. Miller 		 */
1968ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1969c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1970ae8064acSJohn Heffner 			goto send_now;
1971c1b4a7e6SDavid S. Miller 	} else {
1972c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1973c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1974c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1975c1b4a7e6SDavid S. Miller 		 * then send now.
1976c1b4a7e6SDavid S. Miller 		 */
19776b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1978ae8064acSJohn Heffner 			goto send_now;
1979c1b4a7e6SDavid S. Miller 	}
1980c1b4a7e6SDavid S. Miller 
198175c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
198275c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
198375c119afSEric Dumazet 	if (!head)
198475c119afSEric Dumazet 		goto send_now;
19852fd66ffbSEric Dumazet 	age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
198650c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
198750c8339eSEric Dumazet 	if (age < (tp->srtt_us >> 4))
198850c8339eSEric Dumazet 		goto send_now;
198950c8339eSEric Dumazet 
19905f852eb5SEric Dumazet 	/* Ok, it looks like it is advisable to defer. */
1991ae8064acSJohn Heffner 
1992d2e1339fSBendik Rønning Opstad 	if (cong_win < send_win && cong_win <= skb->len)
1993ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1994ca8a2263SNeal Cardwell 
1995a2a385d6SEric Dumazet 	return true;
1996ae8064acSJohn Heffner 
1997ae8064acSJohn Heffner send_now:
1998a2a385d6SEric Dumazet 	return false;
1999c1b4a7e6SDavid S. Miller }
2000c1b4a7e6SDavid S. Miller 
200105cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
200205cbc0dbSFan Du {
200305cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
200405cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
200505cbc0dbSFan Du 	struct net *net = sock_net(sk);
200605cbc0dbSFan Du 	u32 interval;
200705cbc0dbSFan Du 	s32 delta;
200805cbc0dbSFan Du 
200905cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
2010c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
201105cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
201205cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
201305cbc0dbSFan Du 
201405cbc0dbSFan Du 		/* Update current search range */
201505cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
201605cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
201705cbc0dbSFan Du 			sizeof(struct tcphdr) +
201805cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
201905cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
202005cbc0dbSFan Du 
202105cbc0dbSFan Du 		/* Update probe time stamp */
2022c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
202305cbc0dbSFan Du 	}
202405cbc0dbSFan Du }
202505cbc0dbSFan Du 
2026808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2027808cf9e3SIlya Lesokhin {
2028808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2029808cf9e3SIlya Lesokhin 
2030808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2031808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2032808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2033808cf9e3SIlya Lesokhin 			break;
2034808cf9e3SIlya Lesokhin 
2035808cf9e3SIlya Lesokhin 		if (unlikely(TCP_SKB_CB(skb)->eor))
2036808cf9e3SIlya Lesokhin 			return false;
2037808cf9e3SIlya Lesokhin 
2038808cf9e3SIlya Lesokhin 		len -= skb->len;
2039808cf9e3SIlya Lesokhin 	}
2040808cf9e3SIlya Lesokhin 
2041808cf9e3SIlya Lesokhin 	return true;
2042808cf9e3SIlya Lesokhin }
2043808cf9e3SIlya Lesokhin 
20445d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
204567edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
204667edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
204767edfef7SAndi Kleen  * changes resulting in larger path MTUs.
204867edfef7SAndi Kleen  *
20495d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20505d424d5aSJohn Heffner  *         1 if a probe was sent,
2051056834d9SIlpo Järvinen  *         -1 otherwise
2052056834d9SIlpo Järvinen  */
20535d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20545d424d5aSJohn Heffner {
20555d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
205612a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20575d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20586b58e0a5SFan Du 	struct net *net = sock_net(sk);
20595d424d5aSJohn Heffner 	int probe_size;
206091cc17c0SIlpo Järvinen 	int size_needed;
206112a59abcSEric Dumazet 	int copy, len;
20625d424d5aSJohn Heffner 	int mss_now;
20636b58e0a5SFan Du 	int interval;
20645d424d5aSJohn Heffner 
20655d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20665d424d5aSJohn Heffner 	 * not in recovery,
20675d424d5aSJohn Heffner 	 * have enough cwnd, and
206812a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
206912a59abcSEric Dumazet 	 */
207012a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20715d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20725d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20735d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
207412a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20755d424d5aSJohn Heffner 		return -1;
20765d424d5aSJohn Heffner 
20776b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20786b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20796b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20806b58e0a5SFan Du 	 */
20810c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
20826b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
20836b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
208491cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
20856b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
208605cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
208705cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
208805cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
208905cbc0dbSFan Du 	 */
20906b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
209105cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
209205cbc0dbSFan Du 		/* Check whether enough time has elaplased for
209305cbc0dbSFan Du 		 * another round of probing.
209405cbc0dbSFan Du 		 */
209505cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
20965d424d5aSJohn Heffner 		return -1;
20975d424d5aSJohn Heffner 	}
20985d424d5aSJohn Heffner 
20995d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
21007f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
21015d424d5aSJohn Heffner 		return -1;
21025d424d5aSJohn Heffner 
210391cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
21045d424d5aSJohn Heffner 		return -1;
210590840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
21065d424d5aSJohn Heffner 		return 0;
21075d424d5aSJohn Heffner 
2108d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2109d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2110d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
21115d424d5aSJohn Heffner 			return -1;
21125d424d5aSJohn Heffner 		else
21135d424d5aSJohn Heffner 			return 0;
21145d424d5aSJohn Heffner 	}
21155d424d5aSJohn Heffner 
2116808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2117808cf9e3SIlya Lesokhin 		return -1;
2118808cf9e3SIlya Lesokhin 
21195d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2120eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
212151456b29SIan Morris 	if (!nskb)
21225d424d5aSJohn Heffner 		return -1;
21233ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
21243ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21255d424d5aSJohn Heffner 
2126fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
21275d424d5aSJohn Heffner 
21285d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21295d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21304de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21315d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21325d424d5aSJohn Heffner 	nskb->csum = 0;
213398be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21345d424d5aSJohn Heffner 
213550c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21362b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
213750c4817eSIlpo Järvinen 
21385d424d5aSJohn Heffner 	len = 0;
2139234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21405d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21415d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21425d424d5aSJohn Heffner 
21435d424d5aSJohn Heffner 		if (skb->len <= copy) {
21445d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21455d424d5aSJohn Heffner 			 * Throw it away. */
21464de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2147808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2148808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2149808cf9e3SIlya Lesokhin 			 */
2150808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2151fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21523ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21535d424d5aSJohn Heffner 		} else {
21544de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2155a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21565d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21575d424d5aSJohn Heffner 				skb_pull(skb, copy);
21585d424d5aSJohn Heffner 			} else {
21595d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21605bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21615d424d5aSJohn Heffner 			}
21625d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21635d424d5aSJohn Heffner 		}
21645d424d5aSJohn Heffner 
21655d424d5aSJohn Heffner 		len += copy;
2166234b6860SIlpo Järvinen 
2167234b6860SIlpo Järvinen 		if (len >= probe_size)
2168234b6860SIlpo Järvinen 			break;
21695d424d5aSJohn Heffner 	}
21705bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21715d424d5aSJohn Heffner 
21725d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21737faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21747faee5c0SEric Dumazet 	 */
21755d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21765d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21775d424d5aSJohn Heffner 		 * effectively two packets. */
21785d424d5aSJohn Heffner 		tp->snd_cwnd--;
217966f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21805d424d5aSJohn Heffner 
21815d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
21820e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
21830e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
21845d424d5aSJohn Heffner 
21855d424d5aSJohn Heffner 		return 1;
21865d424d5aSJohn Heffner 	}
21875d424d5aSJohn Heffner 
21885d424d5aSJohn Heffner 	return -1;
21895d424d5aSJohn Heffner }
21905d424d5aSJohn Heffner 
2191218af599SEric Dumazet static bool tcp_pacing_check(const struct sock *sk)
2192218af599SEric Dumazet {
2193218af599SEric Dumazet 	return tcp_needs_internal_pacing(sk) &&
219473a6bab5SEric Dumazet 	       hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
2195218af599SEric Dumazet }
2196218af599SEric Dumazet 
2197f9616c35SEric Dumazet /* TCP Small Queues :
2198f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2199f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2200f9616c35SEric Dumazet  * This allows for :
2201f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2202f9616c35SEric Dumazet  *  - faster recovery
2203f9616c35SEric Dumazet  *  - high rates
2204f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2205f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2206f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2207f9616c35SEric Dumazet  */
2208f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2209f9616c35SEric Dumazet 				  unsigned int factor)
2210f9616c35SEric Dumazet {
2211f9616c35SEric Dumazet 	unsigned int limit;
2212f9616c35SEric Dumazet 
22133a9b76fdSEric Dumazet 	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
22149184d8bbSEric Dumazet 	limit = min_t(u32, limit,
22159184d8bbSEric Dumazet 		      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2216f9616c35SEric Dumazet 	limit <<= factor;
2217f9616c35SEric Dumazet 
221814afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
221975c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
222075eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
222175eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
222275eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
222375eefc6cSEric Dumazet 		 */
222475c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
222575eefc6cSEric Dumazet 			return false;
222675eefc6cSEric Dumazet 
22277aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2228f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2229f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2230f9616c35SEric Dumazet 		 * test again the condition.
2231f9616c35SEric Dumazet 		 */
2232f9616c35SEric Dumazet 		smp_mb__after_atomic();
223314afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2234f9616c35SEric Dumazet 			return true;
2235f9616c35SEric Dumazet 	}
2236f9616c35SEric Dumazet 	return false;
2237f9616c35SEric Dumazet }
2238f9616c35SEric Dumazet 
223905b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
224005b055e8SFrancis Yan {
2241628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2242efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
224305b055e8SFrancis Yan 
2244efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2245efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
224605b055e8SFrancis Yan 	tp->chrono_start = now;
224705b055e8SFrancis Yan 	tp->chrono_type = new;
224805b055e8SFrancis Yan }
224905b055e8SFrancis Yan 
225005b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
225105b055e8SFrancis Yan {
225205b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
225305b055e8SFrancis Yan 
225405b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
22550f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
22560f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
225705b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
225805b055e8SFrancis Yan 	 */
225905b055e8SFrancis Yan 	if (type > tp->chrono_type)
226005b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
226105b055e8SFrancis Yan }
226205b055e8SFrancis Yan 
226305b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
226405b055e8SFrancis Yan {
226505b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
226605b055e8SFrancis Yan 
22670f87230dSFrancis Yan 
22680f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
22690f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
22700f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
22710f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
22720f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
22730f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
22740f87230dSFrancis Yan 	 */
227575c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
227605b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
22770f87230dSFrancis Yan 	else if (type == tp->chrono_type)
22780f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
227905b055e8SFrancis Yan }
228005b055e8SFrancis Yan 
22811da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
22821da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
22831da177e4SLinus Torvalds  * window for us.
22841da177e4SLinus Torvalds  *
2285f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2286f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2287f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2288f8269a49SIlpo Järvinen  *
22896ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
22906ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
22916ba8a3b1SNandita Dukkipati 
2292a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2293a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
22941da177e4SLinus Torvalds  */
2295a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2296d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
22971da177e4SLinus Torvalds {
22981da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
229992df7b51SDavid S. Miller 	struct sk_buff *skb;
2300c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2301c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23025d424d5aSJohn Heffner 	int result;
23035615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2304605ad7f1SEric Dumazet 	u32 max_segs;
23051da177e4SLinus Torvalds 
2306c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23075d424d5aSJohn Heffner 
2308ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2309d5dd9175SIlpo Järvinen 	if (!push_one) {
23105d424d5aSJohn Heffner 		/* Do MTU probing. */
2311d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2312d5dd9175SIlpo Järvinen 		if (!result) {
2313a2a385d6SEric Dumazet 			return false;
23145d424d5aSJohn Heffner 		} else if (result > 0) {
23155d424d5aSJohn Heffner 			sent_pkts = 1;
23165d424d5aSJohn Heffner 		}
2317d5dd9175SIlpo Järvinen 	}
23185d424d5aSJohn Heffner 
2319ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2320fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2321c8ac3774SHerbert Xu 		unsigned int limit;
2322c8ac3774SHerbert Xu 
2323218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2324218af599SEric Dumazet 			break;
2325218af599SEric Dumazet 
23265bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2327c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2328c1b4a7e6SDavid S. Miller 
23299d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
23307faee5c0SEric Dumazet 			/* "skb_mstamp" is used as a start point for the retransmit timer */
2331e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
2332ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
23339d186cacSAndrey Vagin 		}
2334ec342325SAndrew Vagin 
2335b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
23366ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
23376ba8a3b1SNandita Dukkipati 			if (push_one == 2)
23386ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
23396ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23406ba8a3b1SNandita Dukkipati 			else
2341b68e9f85SHerbert Xu 				break;
23426ba8a3b1SNandita Dukkipati 		}
2343b68e9f85SHerbert Xu 
23445615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23455615f886SFrancis Yan 			is_rwnd_limited = true;
2346b68e9f85SHerbert Xu 			break;
23475615f886SFrancis Yan 		}
2348b68e9f85SHerbert Xu 
2349d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2350aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2351aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2352aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2353aa93466bSDavid S. Miller 				break;
2354c1b4a7e6SDavid S. Miller 		} else {
2355ca8a2263SNeal Cardwell 			if (!push_one &&
2356605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2357605ad7f1SEric Dumazet 						 max_segs))
2358aa93466bSDavid S. Miller 				break;
2359c1b4a7e6SDavid S. Miller 		}
2360aa93466bSDavid S. Miller 
2361605ad7f1SEric Dumazet 		limit = mss_now;
2362d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2363605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2364605ad7f1SEric Dumazet 						    min_t(unsigned int,
2365605ad7f1SEric Dumazet 							  cwnd_quota,
2366605ad7f1SEric Dumazet 							  max_segs),
2367605ad7f1SEric Dumazet 						    nonagle);
2368605ad7f1SEric Dumazet 
2369605ad7f1SEric Dumazet 		if (skb->len > limit &&
237075c119afSEric Dumazet 		    unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
237175c119afSEric Dumazet 					  skb, limit, mss_now, gfp)))
2372605ad7f1SEric Dumazet 			break;
2373605ad7f1SEric Dumazet 
2374f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
237546d3ceabSEric Dumazet 			break;
2376c9eeec26SEric Dumazet 
2377d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
23781da177e4SLinus Torvalds 			break;
23791da177e4SLinus Torvalds 
2380ec342325SAndrew Vagin repair:
23811da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
23821da177e4SLinus Torvalds 		 * This call will increment packets_out.
23831da177e4SLinus Torvalds 		 */
238466f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
23851da177e4SLinus Torvalds 
23861da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2387a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2388d5dd9175SIlpo Järvinen 
2389d5dd9175SIlpo Järvinen 		if (push_one)
2390d5dd9175SIlpo Järvinen 			break;
23911da177e4SLinus Torvalds 	}
23921da177e4SLinus Torvalds 
23935615f886SFrancis Yan 	if (is_rwnd_limited)
23945615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
23955615f886SFrancis Yan 	else
23965615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
23975615f886SFrancis Yan 
2398aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2399684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2400684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24016ba8a3b1SNandita Dukkipati 
24026ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24036ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2404ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2405d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2406ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2407a2a385d6SEric Dumazet 		return false;
24081da177e4SLinus Torvalds 	}
240975c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24106ba8a3b1SNandita Dukkipati }
24116ba8a3b1SNandita Dukkipati 
2412ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24136ba8a3b1SNandita Dukkipati {
24146ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24156ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2416a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24172ae21cf5SEric Dumazet 	int early_retrans;
24186ba8a3b1SNandita Dukkipati 
24196ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24206ba8a3b1SNandita Dukkipati 	 * finishes.
24216ba8a3b1SNandita Dukkipati 	 */
2422f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
24236ba8a3b1SNandita Dukkipati 		return false;
24246ba8a3b1SNandita Dukkipati 
24252ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
24266ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2427b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
24286ba8a3b1SNandita Dukkipati 	 */
24292ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2430bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2431b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2432b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
24336ba8a3b1SNandita Dukkipati 		return false;
24346ba8a3b1SNandita Dukkipati 
2435bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2436f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2437f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24386ba8a3b1SNandita Dukkipati 	 */
2439bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2440bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24416ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2442bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2443bb4d991aSYuchung Cheng 		else
2444bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2445bb4d991aSYuchung Cheng 	} else {
2446bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2447bb4d991aSYuchung Cheng 	}
24486ba8a3b1SNandita Dukkipati 
2449a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2450ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2451ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2452ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2453a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2454a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
24556ba8a3b1SNandita Dukkipati 
24566ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
24576ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
24586ba8a3b1SNandita Dukkipati 	return true;
24596ba8a3b1SNandita Dukkipati }
24606ba8a3b1SNandita Dukkipati 
24611f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
24621f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
24631f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
24641f3279aeSEric Dumazet  */
24651f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
24661f3279aeSEric Dumazet 				    const struct sk_buff *skb)
24671f3279aeSEric Dumazet {
246839bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2469c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
24701f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
24711f3279aeSEric Dumazet 		return true;
24721f3279aeSEric Dumazet 	}
24731f3279aeSEric Dumazet 	return false;
24741f3279aeSEric Dumazet }
24751f3279aeSEric Dumazet 
2476b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
24776ba8a3b1SNandita Dukkipati  * retransmit the last segment.
24786ba8a3b1SNandita Dukkipati  */
24796ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
24806ba8a3b1SNandita Dukkipati {
24819b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
24826ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
24836ba8a3b1SNandita Dukkipati 	int pcount;
24846ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
24856ba8a3b1SNandita Dukkipati 
2486b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
248775c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2488b340b264SYuchung Cheng 		pcount = tp->packets_out;
2489b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2490b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2491b340b264SYuchung Cheng 			goto probe_sent;
24926ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24936ba8a3b1SNandita Dukkipati 	}
249475c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
24956ba8a3b1SNandita Dukkipati 
24969b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
24979b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
24989b717a8dSNandita Dukkipati 		goto rearm_timer;
24999b717a8dSNandita Dukkipati 
25006ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
25016ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
25026ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25036ba8a3b1SNandita Dukkipati 
25041f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25051f3279aeSEric Dumazet 		goto rearm_timer;
25061f3279aeSEric Dumazet 
25076ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25086ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25096ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25106ba8a3b1SNandita Dukkipati 
25116ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
251275c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
251375c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25146cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25156ba8a3b1SNandita Dukkipati 			goto rearm_timer;
251675c119afSEric Dumazet 		skb = skb_rb_next(skb);
25176ba8a3b1SNandita Dukkipati 	}
25186ba8a3b1SNandita Dukkipati 
25196ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25206ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25216ba8a3b1SNandita Dukkipati 
252210d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2523b340b264SYuchung Cheng 		goto rearm_timer;
25246ba8a3b1SNandita Dukkipati 
25259b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
25269b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
25279b717a8dSNandita Dukkipati 
2528b340b264SYuchung Cheng probe_sent:
2529c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2530fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2531fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2532b340b264SYuchung Cheng rearm_timer:
2533fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
25341da177e4SLinus Torvalds }
25351da177e4SLinus Torvalds 
2536a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2537a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2538a762a980SDavid S. Miller  * The socket must be locked by the caller.
2539a762a980SDavid S. Miller  */
25409e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25419e412ba7SIlpo Järvinen 			       int nonagle)
2542a762a980SDavid S. Miller {
2543726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2544726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2545726e07a8SIlpo Järvinen 	 * all will be happy.
2546726e07a8SIlpo Järvinen 	 */
2547726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2548726e07a8SIlpo Järvinen 		return;
2549726e07a8SIlpo Järvinen 
255099a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
25517450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
25529e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2553a762a980SDavid S. Miller }
2554a762a980SDavid S. Miller 
2555c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2556c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2557c1b4a7e6SDavid S. Miller  */
2558c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2559c1b4a7e6SDavid S. Miller {
2560fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2561c1b4a7e6SDavid S. Miller 
2562c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2563c1b4a7e6SDavid S. Miller 
2564d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2565c1b4a7e6SDavid S. Miller }
2566c1b4a7e6SDavid S. Miller 
25671da177e4SLinus Torvalds /* This function returns the amount that we can raise the
25681da177e4SLinus Torvalds  * usable window based on the following constraints
25691da177e4SLinus Torvalds  *
25701da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
25711da177e4SLinus Torvalds  * 2. We limit memory per socket
25721da177e4SLinus Torvalds  *
25731da177e4SLinus Torvalds  * RFC 1122:
25741da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
25751da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
25761da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
25771da177e4SLinus Torvalds  *
25781da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
25791da177e4SLinus Torvalds  * it at least MSS bytes.
25801da177e4SLinus Torvalds  *
25811da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
25821da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
25831da177e4SLinus Torvalds  *
25841da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
25851da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
25861da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
25871da177e4SLinus Torvalds  * window to always advance by a single byte.
25881da177e4SLinus Torvalds  *
25891da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
25901da177e4SLinus Torvalds  * then this will not be a problem.
25911da177e4SLinus Torvalds  *
25921da177e4SLinus Torvalds  * BSD seems to make the following compromise:
25931da177e4SLinus Torvalds  *
25941da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
25951da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
25961da177e4SLinus Torvalds  *	then set the window to 0.
25971da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
25981da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
25991da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26001da177e4SLinus Torvalds  *
26011da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26021da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26031da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26041da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26051da177e4SLinus Torvalds  * because the pipeline is full.
26061da177e4SLinus Torvalds  *
26071da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26081da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26091da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26101da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26111da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26121da177e4SLinus Torvalds  *
26131da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26141da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26151da177e4SLinus Torvalds  *
26161da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26171da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26181da177e4SLinus Torvalds  */
26191da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26201da177e4SLinus Torvalds {
2621463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
26221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2623caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
26241da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
26251da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
26261da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
26271da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
26281da177e4SLinus Torvalds 	 */
2629463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
26301da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
263186c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
263286c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
26331da177e4SLinus Torvalds 	int window;
26341da177e4SLinus Torvalds 
263506425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
26361da177e4SLinus Torvalds 		mss = full_space;
263706425c30SEric Dumazet 		if (mss <= 0)
263806425c30SEric Dumazet 			return 0;
263906425c30SEric Dumazet 	}
2640b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2641463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26421da177e4SLinus Torvalds 
2643b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2644056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2645056834d9SIlpo Järvinen 					       4U * tp->advmss);
26461da177e4SLinus Torvalds 
264786c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
264886c1a045SFlorian Westphal 		 * increase it due to wscale.
264986c1a045SFlorian Westphal 		 */
265086c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
265186c1a045SFlorian Westphal 
265286c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
265386c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
265486c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
265586c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
265686c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
265786c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
265886c1a045SFlorian Westphal 		 */
265986c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
26601da177e4SLinus Torvalds 			return 0;
26611da177e4SLinus Torvalds 	}
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
26641da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
26651da177e4SLinus Torvalds 
26661da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
26671da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
26681da177e4SLinus Torvalds 	 */
26691da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
26701da177e4SLinus Torvalds 		window = free_space;
26711da177e4SLinus Torvalds 
26721da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
26731da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
26741da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
26751da177e4SLinus Torvalds 		 */
26761935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
26771da177e4SLinus Torvalds 	} else {
26781935299dSGao Feng 		window = tp->rcv_wnd;
26791da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
26801da177e4SLinus Torvalds 		 * Window clamp already applied above.
26811da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
26821da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
26831da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
26841da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
26851da177e4SLinus Torvalds 		 * is too small.
26861da177e4SLinus Torvalds 		 */
26871da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
26881935299dSGao Feng 			window = rounddown(free_space, mss);
268984565070SJohn Heffner 		else if (mss == full_space &&
2690b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
269184565070SJohn Heffner 			window = free_space;
26921da177e4SLinus Torvalds 	}
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds 	return window;
26951da177e4SLinus Torvalds }
26961da177e4SLinus Torvalds 
2697cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2698082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2699082ac2d5SMartin KaFai Lau {
27000a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27010a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27020a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2703082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2704082ac2d5SMartin KaFai Lau 
27050a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2706082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27072de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27082de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2709082ac2d5SMartin KaFai Lau 	}
2710082ac2d5SMartin KaFai Lau }
2711082ac2d5SMartin KaFai Lau 
27124a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2713f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27141da177e4SLinus Torvalds {
27151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
271675c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
271713dde04fSWei Yongjun 	int next_skb_size;
27181da177e4SLinus Torvalds 
2719058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27201da177e4SLinus Torvalds 
2721058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
27221da177e4SLinus Torvalds 
2723f8071cdeSEric Dumazet 	if (next_skb_size) {
2724f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2725f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2726f8071cdeSEric Dumazet 				      next_skb_size);
2727f8071cdeSEric Dumazet 		else if (!skb_shift(skb, next_skb, next_skb_size))
2728f8071cdeSEric Dumazet 			return false;
2729f8071cdeSEric Dumazet 	}
27302b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2731a6963a6bSIlpo Järvinen 
27321da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27331da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27341da177e4SLinus Torvalds 
2735e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27364de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27371da177e4SLinus Torvalds 
27381da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27391da177e4SLinus Torvalds 	 * packet counting does not break.
27401da177e4SLinus Torvalds 	 */
27414828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2742a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2743b7689205SIlpo Järvinen 
2744b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2745ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2746ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2747ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2748b7689205SIlpo Järvinen 
2749797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2750797108d1SIlpo Järvinen 
2751082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2752082ac2d5SMartin KaFai Lau 
275375c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2754f8071cdeSEric Dumazet 	return true;
27551da177e4SLinus Torvalds }
27561da177e4SLinus Torvalds 
275767edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2758a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
27594a17fc3aSIlpo Järvinen {
27604a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2761a2a385d6SEric Dumazet 		return false;
27624a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2763a2a385d6SEric Dumazet 		return false;
27642331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
27654a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2766a2a385d6SEric Dumazet 		return false;
27674a17fc3aSIlpo Järvinen 
2768a2a385d6SEric Dumazet 	return true;
27694a17fc3aSIlpo Järvinen }
27704a17fc3aSIlpo Järvinen 
277167edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
277267edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
277367edfef7SAndi Kleen  */
27744a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
27754a17fc3aSIlpo Järvinen 				     int space)
27764a17fc3aSIlpo Järvinen {
27774a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
27784a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2779a2a385d6SEric Dumazet 	bool first = true;
27804a17fc3aSIlpo Järvinen 
2781e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
27824a17fc3aSIlpo Järvinen 		return;
27834de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
27844a17fc3aSIlpo Järvinen 		return;
27854a17fc3aSIlpo Järvinen 
278675c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
27874a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
27884a17fc3aSIlpo Järvinen 			break;
27894a17fc3aSIlpo Järvinen 
2790a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2791a643b5d4SMartin KaFai Lau 			break;
2792a643b5d4SMartin KaFai Lau 
27934a17fc3aSIlpo Järvinen 		space -= skb->len;
27944a17fc3aSIlpo Järvinen 
27954a17fc3aSIlpo Järvinen 		if (first) {
2796a2a385d6SEric Dumazet 			first = false;
27974a17fc3aSIlpo Järvinen 			continue;
27984a17fc3aSIlpo Järvinen 		}
27994a17fc3aSIlpo Järvinen 
28004a17fc3aSIlpo Järvinen 		if (space < 0)
28014a17fc3aSIlpo Järvinen 			break;
28024a17fc3aSIlpo Järvinen 
28034a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28044a17fc3aSIlpo Järvinen 			break;
28054a17fc3aSIlpo Järvinen 
2806f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2807f8071cdeSEric Dumazet 			break;
28084a17fc3aSIlpo Järvinen 	}
28094a17fc3aSIlpo Järvinen }
28104a17fc3aSIlpo Järvinen 
28111da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28121da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28131da177e4SLinus Torvalds  * error occurred which prevented the send.
28141da177e4SLinus Torvalds  */
281510d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28161da177e4SLinus Torvalds {
28175d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
281810d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28197d227cd2SSridhar Samudrala 	unsigned int cur_mss;
282010d3be56SEric Dumazet 	int diff, len, err;
28211da177e4SLinus Torvalds 
282210d3be56SEric Dumazet 
282310d3be56SEric Dumazet 	/* Inconclusive MTU probe */
282410d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
28255d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
28265d424d5aSJohn Heffner 
28271da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2828caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
28291da177e4SLinus Torvalds 	 */
283014afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2831ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2832ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28331da177e4SLinus Torvalds 		return -EAGAIN;
28341da177e4SLinus Torvalds 
28351f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28361f3279aeSEric Dumazet 		return -EBUSY;
28371f3279aeSEric Dumazet 
28381da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28397f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
28407f582b24SEric Dumazet 			WARN_ON_ONCE(1);
28417f582b24SEric Dumazet 			return -EINVAL;
28427f582b24SEric Dumazet 		}
28431da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28441da177e4SLinus Torvalds 			return -ENOMEM;
28451da177e4SLinus Torvalds 	}
28461da177e4SLinus Torvalds 
28477d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28487d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
28497d227cd2SSridhar Samudrala 
28500c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
28517d227cd2SSridhar Samudrala 
28521da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
28531da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
28541da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
28551da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
28561da177e4SLinus Torvalds 	 */
28579d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
28589d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
28591da177e4SLinus Torvalds 		return -EAGAIN;
28601da177e4SLinus Torvalds 
286110d3be56SEric Dumazet 	len = cur_mss * segs;
286210d3be56SEric Dumazet 	if (skb->len > len) {
286375c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
286475c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
28651da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
286602276f3cSIlpo Järvinen 	} else {
2867c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2868c52e2421SEric Dumazet 			return -ENOMEM;
286910d3be56SEric Dumazet 
287010d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
287110d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
287210d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
287310d3be56SEric Dumazet 		if (diff)
287410d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
287510d3be56SEric Dumazet 		if (skb->len < cur_mss)
287610d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
28771da177e4SLinus Torvalds 	}
28781da177e4SLinus Torvalds 
287949213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
288049213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
288149213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
288249213555SDaniel Borkmann 
2883678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2884678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2885678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2886678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2887678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2888678550c6SYuchung Cheng 	tp->total_retrans += segs;
2889fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2890678550c6SYuchung Cheng 
289150bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
289250bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
289350bceae9SThomas Graf 	 * beyond what csum_start can cover.
289450bceae9SThomas Graf 	 */
289550bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
289650bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
289710a81980SEric Dumazet 		struct sk_buff *nskb;
289810a81980SEric Dumazet 
2899e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
290010a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2901c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2902117632e6SEric Dumazet 				     -ENOBUFS;
2903e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2904e2080072SEric Dumazet 
29055889e2c0SYousuk Seung 		if (!err) {
2906e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
29075889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29085889e2c0SYousuk Seung 		}
2909117632e6SEric Dumazet 	} else {
2910c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2911117632e6SEric Dumazet 	}
2912c84a5711SYuchung Cheng 
2913a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2914a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2915a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2916a31ad29eSLawrence Brakmo 
2917fc9f3501SEric Dumazet 	if (likely(!err)) {
2918c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2919e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2920678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2921678550c6SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2922fc9f3501SEric Dumazet 	}
2923c84a5711SYuchung Cheng 	return err;
292493b174adSYuchung Cheng }
292593b174adSYuchung Cheng 
292610d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
292793b174adSYuchung Cheng {
292893b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
292910d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
29301da177e4SLinus Torvalds 
29311da177e4SLinus Torvalds 	if (err == 0) {
29321da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
29331da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2934e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
29351da177e4SLinus Torvalds 		}
29361da177e4SLinus Torvalds #endif
29371da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
29381da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
29391da177e4SLinus Torvalds 
29401da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
29411da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
29427faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
29431da177e4SLinus Torvalds 
29441da177e4SLinus Torvalds 	}
29456e08d5e3SYuchung Cheng 
29466e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
29476e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
29486e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
29491da177e4SLinus Torvalds 	return err;
29501da177e4SLinus Torvalds }
29511da177e4SLinus Torvalds 
29521da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
29531da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
29541da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
29551da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
29561da177e4SLinus Torvalds  */
29571da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
29581da177e4SLinus Torvalds {
29596687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2960b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
29611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2962840a3cbeSYuchung Cheng 	u32 max_segs;
296361eb55f4SIlpo Järvinen 	int mib_idx;
29646a438bbeSStephen Hemminger 
296545e77d31SIlpo Järvinen 	if (!tp->packets_out)
296645e77d31SIlpo Järvinen 		return;
296745e77d31SIlpo Järvinen 
296875c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
2969b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
2970ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
297175c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
2972dca0aaf8SEric Dumazet 		__u8 sacked;
297310d3be56SEric Dumazet 		int segs;
29741da177e4SLinus Torvalds 
2975218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2976218af599SEric Dumazet 			break;
2977218af599SEric Dumazet 
29786a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
297951456b29SIan Morris 		if (!hole)
29806a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
29816a438bbeSStephen Hemminger 
298210d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
298310d3be56SEric Dumazet 		if (segs <= 0)
29841da177e4SLinus Torvalds 			return;
2985dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
2986a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
2987a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
2988a3d2e9f8SEric Dumazet 		 */
2989a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
29900e1c54c2SIlpo Järvinen 
2991840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
2992006f582cSIlpo Järvinen 			break;
29930e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
299451456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
29950e1c54c2SIlpo Järvinen 				hole = skb;
299661eb55f4SIlpo Järvinen 			continue;
29971da177e4SLinus Torvalds 
29980e1c54c2SIlpo Järvinen 		} else {
29990e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30000e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30010e1c54c2SIlpo Järvinen 			else
30020e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30030e1c54c2SIlpo Järvinen 		}
30040e1c54c2SIlpo Järvinen 
30050e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
300661eb55f4SIlpo Järvinen 			continue;
300740b215e5SPavel Emelyanov 
3008f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3009f9616c35SEric Dumazet 			return;
3010f9616c35SEric Dumazet 
301110d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30121da177e4SLinus Torvalds 			return;
301324ab6becSYuchung Cheng 
3014de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30151da177e4SLinus Torvalds 
3016684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3017a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3018a262f0cdSNandita Dukkipati 
301975c119afSEric Dumazet 		if (skb == rtx_head &&
302057dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3021463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30223f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
30233f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
30241da177e4SLinus Torvalds 	}
30251da177e4SLinus Torvalds }
30261da177e4SLinus Torvalds 
3027d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3028d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3029845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3030845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3031a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3032a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3033d83769a5SEric Dumazet  */
3034a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3035d83769a5SEric Dumazet {
3036e805605cSJohannes Weiner 	int amt;
3037d83769a5SEric Dumazet 
3038d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3039d83769a5SEric Dumazet 		return;
3040d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3041d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3042e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3043e805605cSJohannes Weiner 
3044baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3045baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3046d83769a5SEric Dumazet }
3047d83769a5SEric Dumazet 
3048845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3049845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
30501da177e4SLinus Torvalds  */
30511da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
30521da177e4SLinus Torvalds {
3053845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
30541da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30551da177e4SLinus Torvalds 
3056845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3057845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3058845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3059845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
30601da177e4SLinus Torvalds 	 */
306175c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
306275c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
306375c119afSEric Dumazet 
306475c119afSEric Dumazet 	if (tskb) {
3065845704a5SEric Dumazet coalesce:
3066845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3067845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
30681da177e4SLinus Torvalds 		tp->write_seq++;
306975c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3070845704a5SEric Dumazet 			/* This means tskb was already sent.
3071845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3072845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3073845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3074845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3075845704a5SEric Dumazet 			 */
3076845704a5SEric Dumazet 			tp->snd_nxt++;
3077845704a5SEric Dumazet 			return;
3078845704a5SEric Dumazet 		}
30791da177e4SLinus Torvalds 	} else {
3080845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3081845704a5SEric Dumazet 		if (unlikely(!skb)) {
3082845704a5SEric Dumazet 			if (tskb)
3083845704a5SEric Dumazet 				goto coalesce;
3084845704a5SEric Dumazet 			return;
30851da177e4SLinus Torvalds 		}
3086e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3087d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3088a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
30891da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3090e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3091a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
30921da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
30931da177e4SLinus Torvalds 	}
3094845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
30951da177e4SLinus Torvalds }
30961da177e4SLinus Torvalds 
30971da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
30981da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
30991da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
310065bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31011da177e4SLinus Torvalds  */
3102dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31031da177e4SLinus Torvalds {
31041da177e4SLinus Torvalds 	struct sk_buff *skb;
31051da177e4SLinus Torvalds 
31067cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31077cc2b043SGao Feng 
31081da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31091da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31101da177e4SLinus Torvalds 	if (!skb) {
31114e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31121da177e4SLinus Torvalds 		return;
31131da177e4SLinus Torvalds 	}
31141da177e4SLinus Torvalds 
31151da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31161da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3117e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3118a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31199a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
31201da177e4SLinus Torvalds 	/* Send it off. */
3121dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
31224e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3123c24b14c4SSong Liu 
3124c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3125c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3126c24b14c4SSong Liu 	 */
3127c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
31281da177e4SLinus Torvalds }
31291da177e4SLinus Torvalds 
313067edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
313167edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
31321da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
31331da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
31341da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
31351da177e4SLinus Torvalds  */
31361da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
31371da177e4SLinus Torvalds {
31381da177e4SLinus Torvalds 	struct sk_buff *skb;
31391da177e4SLinus Torvalds 
314075c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
314151456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
314275c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31431da177e4SLinus Torvalds 		return -EFAULT;
31441da177e4SLinus Torvalds 	}
31454de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31461da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3147e2080072SEric Dumazet 			struct sk_buff *nskb;
3148e2080072SEric Dumazet 
3149e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3150e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3151e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
315251456b29SIan Morris 			if (!nskb)
31531da177e4SLinus Torvalds 				return -ENOMEM;
3154e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
315575c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3156f4a775d1SEric Dumazet 			__skb_header_release(nskb);
315775c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
31583ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
31593ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
31601da177e4SLinus Torvalds 			skb = nskb;
31611da177e4SLinus Torvalds 		}
31621da177e4SLinus Torvalds 
31634de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3164735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
31651da177e4SLinus Torvalds 	}
3166dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
31671da177e4SLinus Torvalds }
31681da177e4SLinus Torvalds 
31694aea39c1SEric Dumazet /**
31704aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
31714aea39c1SEric Dumazet  * sk: listener socket
31724aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
31734aea39c1SEric Dumazet  * req: request_sock pointer
31744aea39c1SEric Dumazet  *
31754aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
31764aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
31774aea39c1SEric Dumazet  */
31785d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3179e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3180ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3181b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
31821da177e4SLinus Torvalds {
31832e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
31845d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
318580f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
31865d062de7SEric Dumazet 	struct tcp_out_options opts;
31875d062de7SEric Dumazet 	struct sk_buff *skb;
3188bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
31895d062de7SEric Dumazet 	struct tcphdr *th;
3190f5fff5dcSTom Quetchenbach 	int mss;
31911da177e4SLinus Torvalds 
3192ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
31934aea39c1SEric Dumazet 	if (unlikely(!skb)) {
31944aea39c1SEric Dumazet 		dst_release(dst);
31951da177e4SLinus Torvalds 		return NULL;
31964aea39c1SEric Dumazet 	}
31971da177e4SLinus Torvalds 	/* Reserve space for headers. */
31981da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
31991da177e4SLinus Torvalds 
3200b3d05147SEric Dumazet 	switch (synack_type) {
3201b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32029e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3203b3d05147SEric Dumazet 		break;
3204b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3205b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3206b3d05147SEric Dumazet 		 * to avoid false sharing.
3207b3d05147SEric Dumazet 		 */
3208b3d05147SEric Dumazet 		break;
3209b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3210ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3211ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3212ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3213ca6fb065SEric Dumazet 		 */
3214ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3215b3d05147SEric Dumazet 		break;
3216ca6fb065SEric Dumazet 	}
32174aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
32181da177e4SLinus Torvalds 
32193541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3220f5fff5dcSTom Quetchenbach 
322133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
32228b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
32238b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
32249a568de4SEric Dumazet 		skb->skb_mstamp = cookie_init_timestamp(req);
32258b5f12d0SFlorian Westphal 	else
32268b5f12d0SFlorian Westphal #endif
32279a568de4SEric Dumazet 		skb->skb_mstamp = tcp_clock_us();
322880f03e27SEric Dumazet 
322980f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
323080f03e27SEric Dumazet 	rcu_read_lock();
3231fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
323280f03e27SEric Dumazet #endif
323358d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
323460e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
323560e2a778SUrsula Braun 					     foc) + sizeof(*th);
323633ad798cSAdam Langley 
3237aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3238aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
32391da177e4SLinus Torvalds 
3240ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32411da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
32421da177e4SLinus Torvalds 	th->syn = 1;
32431da177e4SLinus Torvalds 	th->ack = 1;
32446ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3245b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3246634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3247e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
32483b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
32493b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
32508336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
32518336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
32521da177e4SLinus Torvalds 
32531da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3254ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
32555d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
32561da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
325790bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3258cfb6eeb4SYOSHIFUJI Hideaki 
3259cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3260cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
326180f03e27SEric Dumazet 	if (md5)
3262bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
326339f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
326480f03e27SEric Dumazet 	rcu_read_unlock();
3265cfb6eeb4SYOSHIFUJI Hideaki #endif
3266cfb6eeb4SYOSHIFUJI Hideaki 
3267b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
32682456e855SThomas Gleixner 	skb->tstamp = 0;
32691da177e4SLinus Torvalds 	return skb;
32701da177e4SLinus Torvalds }
32714bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
32721da177e4SLinus Torvalds 
327381164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
327481164413SDaniel Borkmann {
327581164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
327681164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
327781164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
327881164413SDaniel Borkmann 
327981164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
328081164413SDaniel Borkmann 		return;
328181164413SDaniel Borkmann 
328281164413SDaniel Borkmann 	rcu_read_lock();
328381164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
328481164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
328581164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
328681164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
328781164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
328881164413SDaniel Borkmann 	}
328981164413SDaniel Borkmann 	rcu_read_unlock();
329081164413SDaniel Borkmann }
329181164413SDaniel Borkmann 
329267edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3293f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
32941da177e4SLinus Torvalds {
3295cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
32961da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32971da177e4SLinus Torvalds 	__u8 rcv_wscale;
329813d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
32991da177e4SLinus Torvalds 
33001da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33011da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33021da177e4SLinus Torvalds 	 */
33035d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33045d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33055d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33061da177e4SLinus Torvalds 
3307cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
330800db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3309cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3310cfb6eeb4SYOSHIFUJI Hideaki #endif
3311cfb6eeb4SYOSHIFUJI Hideaki 
33121da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
33131da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
33141da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
33151da177e4SLinus Torvalds 	tp->max_window = 0;
33165d424d5aSJohn Heffner 	tcp_mtup_init(sk);
33171da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
33181da177e4SLinus Torvalds 
331981164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
332081164413SDaniel Borkmann 
33211da177e4SLinus Torvalds 	if (!tp->window_clamp)
33221da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
33233541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3324f5fff5dcSTom Quetchenbach 
33251da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
33261da177e4SLinus Torvalds 
3327e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3328e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3329e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3330e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3331e88c64f0SHagen Paul Pfeifer 
333213d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
333313d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
333413d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
333513d3b1ebSLawrence Brakmo 
3336ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
33371da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33381da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33391da177e4SLinus Torvalds 				  &tp->window_clamp,
33409bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
334131d12926Slaurent chavey 				  &rcv_wscale,
334213d3b1ebSLawrence Brakmo 				  rcv_wnd);
33431da177e4SLinus Torvalds 
33441da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
33451da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
33461da177e4SLinus Torvalds 
33471da177e4SLinus Torvalds 	sk->sk_err = 0;
33481da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
33491da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3350ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
33517f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
33521da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
33531da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
335433f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3355370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3356ee995283SPavel Emelyanov 
3357ee995283SPavel Emelyanov 	if (likely(!tp->repair))
33581da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3359c7781a6eSAndrew Vagin 	else
336070eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3361ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3362ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
33631da177e4SLinus Torvalds 
33648550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3365463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
33661da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
33671da177e4SLinus Torvalds }
33681da177e4SLinus Torvalds 
3369783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3370783237e8SYuchung Cheng {
3371783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3372783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3373783237e8SYuchung Cheng 
3374783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3375f4a775d1SEric Dumazet 	__skb_header_release(skb);
3376783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3377783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3378783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3379783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3380783237e8SYuchung Cheng }
3381783237e8SYuchung Cheng 
3382783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3383783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3384783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3385783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3386783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3387783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3388783237e8SYuchung Cheng  */
3389783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3390783237e8SYuchung Cheng {
3391783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3392783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3393065263f4SWei Wang 	int space, err = 0;
3394355a901eSEric Dumazet 	struct sk_buff *syn_data;
3395783237e8SYuchung Cheng 
339667da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3397065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3398783237e8SYuchung Cheng 		goto fallback;
3399783237e8SYuchung Cheng 
3400783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3401783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3402783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3403783237e8SYuchung Cheng 	 */
34043541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34053541f9e8SEric Dumazet 
34061b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3407783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3408783237e8SYuchung Cheng 
3409f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3410f5ddcbbbSEric Dumazet 
3411f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3412f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3413f5ddcbbbSEric Dumazet 
3414eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3415355a901eSEric Dumazet 	if (!syn_data)
3416783237e8SYuchung Cheng 		goto fallback;
3417355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3418355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
341907e100f9SEric Dumazet 	if (space) {
342007e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
342157be5bdaSAl Viro 					    &fo->data->msg_iter);
342257be5bdaSAl Viro 		if (unlikely(!copied)) {
3423ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3424355a901eSEric Dumazet 			kfree_skb(syn_data);
3425783237e8SYuchung Cheng 			goto fallback;
3426783237e8SYuchung Cheng 		}
342757be5bdaSAl Viro 		if (copied != space) {
342857be5bdaSAl Viro 			skb_trim(syn_data, copied);
342957be5bdaSAl Viro 			space = copied;
343057be5bdaSAl Viro 		}
343107e100f9SEric Dumazet 	}
3432355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3433355a901eSEric Dumazet 	if (space == fo->size)
3434355a901eSEric Dumazet 		fo->data = NULL;
3435355a901eSEric Dumazet 	fo->copied = space;
3436783237e8SYuchung Cheng 
3437355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34380f87230dSFrancis Yan 	if (syn_data->len)
34390f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3440355a901eSEric Dumazet 
3441355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3442355a901eSEric Dumazet 
3443355a901eSEric Dumazet 	syn->skb_mstamp = syn_data->skb_mstamp;
3444355a901eSEric Dumazet 
3445355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3446355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3447355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3448355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3449431a9124SEric Dumazet 	 */
3450355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3451355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3452355a901eSEric Dumazet 	if (!err) {
345367da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
345475c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3455f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3456783237e8SYuchung Cheng 		goto done;
3457783237e8SYuchung Cheng 	}
3458783237e8SYuchung Cheng 
345975c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
346075c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3461b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3462b5b7db8dSEric Dumazet 
3463783237e8SYuchung Cheng fallback:
3464783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3465783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3466783237e8SYuchung Cheng 		fo->cookie.len = 0;
3467783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3468783237e8SYuchung Cheng 	if (err)
3469783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3470783237e8SYuchung Cheng done:
3471783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3472783237e8SYuchung Cheng 	return err;
3473783237e8SYuchung Cheng }
3474783237e8SYuchung Cheng 
347567edfef7SAndi Kleen /* Build a SYN and send it off. */
34761da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
34771da177e4SLinus Torvalds {
34781da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34791da177e4SLinus Torvalds 	struct sk_buff *buff;
3480ee586811SEric Paris 	int err;
34811da177e4SLinus Torvalds 
3482de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
34838ba60924SEric Dumazet 
34848ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
34858ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
34868ba60924SEric Dumazet 
34871da177e4SLinus Torvalds 	tcp_connect_init(sk);
34881da177e4SLinus Torvalds 
34892b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
34902b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
34912b916477SAndrey Vagin 		return 0;
34922b916477SAndrey Vagin 	}
34932b916477SAndrey Vagin 
3494eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3495355a901eSEric Dumazet 	if (unlikely(!buff))
34961da177e4SLinus Torvalds 		return -ENOBUFS;
34971da177e4SLinus Torvalds 
3498a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
34999a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35009a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3501783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3502735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
350375c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35041da177e4SLinus Torvalds 
3505783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3506783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3507783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3508ee586811SEric Paris 	if (err == -ECONNREFUSED)
3509ee586811SEric Paris 		return err;
3510bd37a088SWei Yongjun 
3511bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3512bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3513bd37a088SWei Yongjun 	 */
3514bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3515bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3516b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3517b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3518b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3519b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3520b5b7db8dSEric Dumazet 	}
352181cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
35221da177e4SLinus Torvalds 
35231da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
35243f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35253f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
35261da177e4SLinus Torvalds 	return 0;
35271da177e4SLinus Torvalds }
35284bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
35291da177e4SLinus Torvalds 
35301da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
35311da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
35321da177e4SLinus Torvalds  * for details.
35331da177e4SLinus Torvalds  */
35341da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
35351da177e4SLinus Torvalds {
3536463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3537463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35381da177e4SLinus Torvalds 	unsigned long timeout;
35391da177e4SLinus Torvalds 
35401da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3541463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
35421da177e4SLinus Torvalds 		int max_ato = HZ / 2;
35431da177e4SLinus Torvalds 
3544056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3545056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
35461da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
35471da177e4SLinus Torvalds 
35481da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
35491da177e4SLinus Torvalds 
35501da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3551463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
35521da177e4SLinus Torvalds 		 * directly.
35531da177e4SLinus Torvalds 		 */
3554740b0f18SEric Dumazet 		if (tp->srtt_us) {
3555740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3556740b0f18SEric Dumazet 					TCP_DELACK_MIN);
35571da177e4SLinus Torvalds 
35581da177e4SLinus Torvalds 			if (rtt < max_ato)
35591da177e4SLinus Torvalds 				max_ato = rtt;
35601da177e4SLinus Torvalds 		}
35611da177e4SLinus Torvalds 
35621da177e4SLinus Torvalds 		ato = min(ato, max_ato);
35631da177e4SLinus Torvalds 	}
35641da177e4SLinus Torvalds 
35651da177e4SLinus Torvalds 	/* Stay within the limit we were given */
35661da177e4SLinus Torvalds 	timeout = jiffies + ato;
35671da177e4SLinus Torvalds 
35681da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3569463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
35701da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
35711da177e4SLinus Torvalds 		 * send ACK now.
35721da177e4SLinus Torvalds 		 */
3573463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3574463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
35751da177e4SLinus Torvalds 			tcp_send_ack(sk);
35761da177e4SLinus Torvalds 			return;
35771da177e4SLinus Torvalds 		}
35781da177e4SLinus Torvalds 
3579463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3580463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
35811da177e4SLinus Torvalds 	}
3582463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3583463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3584463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
35851da177e4SLinus Torvalds }
35861da177e4SLinus Torvalds 
35871da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
35882987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
35891da177e4SLinus Torvalds {
35901da177e4SLinus Torvalds 	struct sk_buff *buff;
35911da177e4SLinus Torvalds 
3592058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3593058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3594058dc334SIlpo Järvinen 		return;
3595058dc334SIlpo Järvinen 
35961da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
35971da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
35981da177e4SLinus Torvalds 	 * sock.
35991da177e4SLinus Torvalds 	 */
36007450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36017450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36027450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3603463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3604463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36053f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36063f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36071da177e4SLinus Torvalds 		return;
36081da177e4SLinus Torvalds 	}
36091da177e4SLinus Torvalds 
36101da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36111da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3612a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
36131da177e4SLinus Torvalds 
361498781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
361598781965SEric Dumazet 	 * too much.
361698781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
361798781965SEric Dumazet 	 */
361898781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
361998781965SEric Dumazet 
36201da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
36212987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
36221da177e4SLinus Torvalds }
362327cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
36242987babbSYuchung Cheng 
36252987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
36262987babbSYuchung Cheng {
36272987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
36281da177e4SLinus Torvalds }
36291da177e4SLinus Torvalds 
36301da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
36311da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
36321da177e4SLinus Torvalds  *
36331da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
36341da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
36351da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
36361da177e4SLinus Torvalds  *
36371da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
36381da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36391da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
36401da177e4SLinus Torvalds  */
3641e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
36421da177e4SLinus Torvalds {
36431da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36441da177e4SLinus Torvalds 	struct sk_buff *skb;
36451da177e4SLinus Torvalds 
36461da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
36477450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
36487450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
364951456b29SIan Morris 	if (!skb)
36501da177e4SLinus Torvalds 		return -1;
36511da177e4SLinus Torvalds 
36521da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
36531da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36541da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
36551da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
36561da177e4SLinus Torvalds 	 * send it.
36571da177e4SLinus Torvalds 	 */
3658a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3659e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
36607450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
36611da177e4SLinus Torvalds }
36621da177e4SLinus Torvalds 
3663385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3664ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3665ee995283SPavel Emelyanov {
3666ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3667ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
36689a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3669e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3670ee995283SPavel Emelyanov 	}
3671ee995283SPavel Emelyanov }
3672ee995283SPavel Emelyanov 
367367edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3674e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
36751da177e4SLinus Torvalds {
36761da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36771da177e4SLinus Torvalds 	struct sk_buff *skb;
36781da177e4SLinus Torvalds 
3679058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3680058dc334SIlpo Järvinen 		return -1;
3681058dc334SIlpo Järvinen 
368200db4124SIan Morris 	skb = tcp_send_head(sk);
368300db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
36841da177e4SLinus Torvalds 		int err;
36850c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
368690840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
36871da177e4SLinus Torvalds 
36881da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
36891da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
36901da177e4SLinus Torvalds 
36911da177e4SLinus Torvalds 		/* We are probing the opening of a window
36921da177e4SLinus Torvalds 		 * but the window size is != 0
36931da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
36941da177e4SLinus Torvalds 		 */
36951da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
36961da177e4SLinus Torvalds 		    skb->len > mss) {
36971da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
36984de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
369975c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
370075c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37011da177e4SLinus Torvalds 				return -1;
37021da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37035bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37041da177e4SLinus Torvalds 
37054de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3706dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
370766f5fe62SIlpo Järvinen 		if (!err)
370866f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37091da177e4SLinus Torvalds 		return err;
37101da177e4SLinus Torvalds 	} else {
371133f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3712e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3713e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
37141da177e4SLinus Torvalds 	}
37151da177e4SLinus Torvalds }
37161da177e4SLinus Torvalds 
37171da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
37181da177e4SLinus Torvalds  * a partial packet else a zero probe.
37191da177e4SLinus Torvalds  */
37201da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
37211da177e4SLinus Torvalds {
3722463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
37231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3724c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3725fcdd1cf4SEric Dumazet 	unsigned long probe_max;
37261da177e4SLinus Torvalds 	int err;
37271da177e4SLinus Torvalds 
3728e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
37291da177e4SLinus Torvalds 
373075c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
37311da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
37326687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3733463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
37341da177e4SLinus Torvalds 		return;
37351da177e4SLinus Torvalds 	}
37361da177e4SLinus Torvalds 
37371da177e4SLinus Torvalds 	if (err <= 0) {
3738c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3739463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
37406687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3741fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
37421da177e4SLinus Torvalds 	} else {
37431da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
37446687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
37451da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
37461da177e4SLinus Torvalds 		 *
37471da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
37481da177e4SLinus Torvalds 		 */
37496687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
37506687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3751fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
37521da177e4SLinus Torvalds 	}
3753fcdd1cf4SEric Dumazet 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
375421c8fe99SEric Dumazet 				  tcp_probe0_when(sk, probe_max),
3755fcdd1cf4SEric Dumazet 				  TCP_RTO_MAX);
37561da177e4SLinus Torvalds }
37575db92c99SOctavian Purdila 
3758ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
37595db92c99SOctavian Purdila {
37605db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
37615db92c99SOctavian Purdila 	struct flowi fl;
37625db92c99SOctavian Purdila 	int res;
37635db92c99SOctavian Purdila 
376458d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3765b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
37665db92c99SOctavian Purdila 	if (!res) {
376790bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
376802a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
37697e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
37707e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3771cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
37725db92c99SOctavian Purdila 	}
37735db92c99SOctavian Purdila 	return res;
37745db92c99SOctavian Purdila }
37755db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3776