xref: /linux/net/ipv4/tcp_output.c (revision 2bec445f9bf35e52e395b971df48d3e1e5dc704a)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds #include <linux/compiler.h>
435a0e3ad6STejun Heo #include <linux/gfp.h>
441da177e4SLinus Torvalds #include <linux/module.h>
4560e2a778SUrsula Braun #include <linux/static_key.h>
461da177e4SLinus Torvalds 
47e086101bSCong Wang #include <trace/events/tcp.h>
4835089bb2SDavid S. Miller 
499799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
509799ccb0SEric Dumazet  * ensuring monotically increasing values.
519799ccb0SEric Dumazet  */
529799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
539799ccb0SEric Dumazet {
549799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
559799ccb0SEric Dumazet 
565f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
57e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
589799ccb0SEric Dumazet }
599799ccb0SEric Dumazet 
6046d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6146d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
62519855c5SWilliam Allen Simpson 
6367edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6475c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
656ff03ac3SIlpo Järvinen {
666ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
676ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6866f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
699e412ba7SIlpo Järvinen 
70e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
718512430eSIlpo Järvinen 
7275c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7375c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7475c119afSEric Dumazet 
7585369750SCambda Zhu 	if (tp->highest_sack == NULL)
7685369750SCambda Zhu 		tp->highest_sack = skb;
7785369750SCambda Zhu 
7866f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
79bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
80750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
81f19c29e3SYuchung Cheng 
82f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
83f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
846a5dc9e5SEric Dumazet }
851da177e4SLinus Torvalds 
86a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
87a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
881da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
891da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
901da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
911da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
921da177e4SLinus Torvalds  */
93cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
941da177e4SLinus Torvalds {
95cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
969e412ba7SIlpo Järvinen 
97a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
98a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
99a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1001da177e4SLinus Torvalds 		return tp->snd_nxt;
1011da177e4SLinus Torvalds 	else
10290840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1061da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1071da177e4SLinus Torvalds  *
1081da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1091da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1101da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1111da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1121da177e4SLinus Torvalds  *    large MSS.
1131da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1141da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1151da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1161da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1171da177e4SLinus Torvalds  *    probably even Jumbo".
1181da177e4SLinus Torvalds  */
1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
122cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1231da177e4SLinus Torvalds 	int mss = tp->advmss;
1241da177e4SLinus Torvalds 
1250dbaee3bSDavid S. Miller 	if (dst) {
1260dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1270dbaee3bSDavid S. Miller 
1280dbaee3bSDavid S. Miller 		if (metric < mss) {
1290dbaee3bSDavid S. Miller 			mss = metric;
1301da177e4SLinus Torvalds 			tp->advmss = mss;
1311da177e4SLinus Torvalds 		}
1320dbaee3bSDavid S. Miller 	}
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds 	return (__u16)mss;
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1386f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1396f021c62SEric Dumazet  */
1406f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1411da177e4SLinus Torvalds {
142463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1436f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1441da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1451da177e4SLinus Torvalds 
1466687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1491da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1501da177e4SLinus Torvalds 
151463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1521da177e4SLinus Torvalds 		cwnd >>= 1;
1531da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
154c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1551da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
15867edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15940efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
160cf533ea5SEric Dumazet 				struct sock *sk)
1611da177e4SLinus Torvalds {
162463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
163d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1641da177e4SLinus Torvalds 
16505c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16605c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16705c5a46dSNeal Cardwell 
1684a41f453SWei Wang 	/* If this is the first data packet sent in response to the
1694a41f453SWei Wang 	 * previous received data,
1704a41f453SWei Wang 	 * and it is a reply for ato after last received packet,
1714a41f453SWei Wang 	 * increase pingpong count.
1721da177e4SLinus Torvalds 	 */
1734a41f453SWei Wang 	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
1744a41f453SWei Wang 	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
1754a41f453SWei Wang 		inet_csk_inc_pingpong_cnt(sk);
1764a41f453SWei Wang 
1774a41f453SWei Wang 	tp->lsndtime = now;
1781da177e4SLinus Torvalds }
1791da177e4SLinus Torvalds 
18067edfef7SAndi Kleen /* Account for an ACK we sent. */
18127cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
18227cde44aSYuchung Cheng 				      u32 rcv_nxt)
1831da177e4SLinus Torvalds {
1845d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1855d9f4262SEric Dumazet 
18686de5921SEric Dumazet 	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
187200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
18886de5921SEric Dumazet 			      tp->compressed_ack - TCP_FASTRETRANS_THRESH);
18986de5921SEric Dumazet 		tp->compressed_ack = TCP_FASTRETRANS_THRESH;
1905d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1915d9f4262SEric Dumazet 			__sock_put(sk);
1925d9f4262SEric Dumazet 	}
19327cde44aSYuchung Cheng 
19427cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19527cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
196463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
197463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
2001da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2011da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2021da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2031da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2041da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2051da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2061da177e4SLinus Torvalds  */
207ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2081da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20931d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21031d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2111da177e4SLinus Torvalds {
2121da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2131da177e4SLinus Torvalds 
2141da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2151da177e4SLinus Torvalds 	if (*window_clamp == 0)
216589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2171da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2181da177e4SLinus Torvalds 
2191da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2201da177e4SLinus Torvalds 	if (space > mss)
221589c49cbSGao Feng 		space = rounddown(space, mss);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22415d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22515d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22615d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22715d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22815d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22915d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2301da177e4SLinus Torvalds 	 */
231ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2321da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23315d99e02SRick Jones 	else
234a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
235a337531bSYuchung Cheng 
236a337531bSYuchung Cheng 	if (init_rcv_wnd)
237a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23815d99e02SRick Jones 
23919bf6261SEric Dumazet 	*rcv_wscale = 0;
2401da177e4SLinus Torvalds 	if (wscale_ok) {
241589c49cbSGao Feng 		/* Set window scaling on max possible window */
242356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
243f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
244316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24519bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24619bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2471da177e4SLinus Torvalds 	}
2481da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
249589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2501da177e4SLinus Torvalds }
2514bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2541da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2551da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2561da177e4SLinus Torvalds  * frame.
2571da177e4SLinus Torvalds  */
25840efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2591da177e4SLinus Torvalds {
2601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2618e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2621da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2631da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	/* Never shrink the offered window */
2661da177e4SLinus Torvalds 	if (new_win < cur_win) {
2671da177e4SLinus Torvalds 		/* Danger Will Robinson!
2681da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2691da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2701da177e4SLinus Torvalds 		 * window in time.  --DaveM
2711da177e4SLinus Torvalds 		 *
2721da177e4SLinus Torvalds 		 * Relax Will Robinson.
2731da177e4SLinus Torvalds 		 */
2748e165e20SFlorian Westphal 		if (new_win == 0)
2758e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2768e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
277607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2781da177e4SLinus Torvalds 	}
2791da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2801da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2811da177e4SLinus Torvalds 
2821da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2831da177e4SLinus Torvalds 	 * scaled window.
2841da177e4SLinus Torvalds 	 */
285ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
286ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2871da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2881da177e4SLinus Torvalds 	else
2891da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2921da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2931da177e4SLinus Torvalds 
29431770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
2958e165e20SFlorian Westphal 	if (new_win == 0) {
29631770e34SFlorian Westphal 		tp->pred_flags = 0;
2978e165e20SFlorian Westphal 		if (old_win)
2988e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2998e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3008e165e20SFlorian Westphal 	} else if (old_win == 0) {
3018e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3028e165e20SFlorian Westphal 	}
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	return new_win;
3051da177e4SLinus Torvalds }
3061da177e4SLinus Torvalds 
30767edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
308735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
309bdf1ee5dSIlpo Järvinen {
31030e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31130e502a3SDaniel Borkmann 
3124de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
313bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3144de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31591b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
31691b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
31730e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
318bdf1ee5dSIlpo Järvinen }
319bdf1ee5dSIlpo Järvinen 
32067edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
321735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
322bdf1ee5dSIlpo Järvinen {
323bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32491b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
325f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
32691b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
327f7b3bec6SFlorian Westphal 
328f7b3bec6SFlorian Westphal 	if (!use_ecn) {
329f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
330f7b3bec6SFlorian Westphal 
331f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
332f7b3bec6SFlorian Westphal 			use_ecn = true;
333f7b3bec6SFlorian Westphal 	}
334bdf1ee5dSIlpo Järvinen 
335bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
336f7b3bec6SFlorian Westphal 
337f7b3bec6SFlorian Westphal 	if (use_ecn) {
3384de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
339bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34091b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34130e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
342bdf1ee5dSIlpo Järvinen 	}
343bdf1ee5dSIlpo Järvinen }
344bdf1ee5dSIlpo Järvinen 
34549213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34649213555SDaniel Borkmann {
34749213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
34849213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
34949213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35049213555SDaniel Borkmann 		 */
35149213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
35249213555SDaniel Borkmann }
35349213555SDaniel Borkmann 
354735d3831SFlorian Westphal static void
3556ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
356bdf1ee5dSIlpo Järvinen {
3576ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
358bdf1ee5dSIlpo Järvinen 		th->ece = 1;
359bdf1ee5dSIlpo Järvinen }
360bdf1ee5dSIlpo Järvinen 
36167edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
36267edfef7SAndi Kleen  * be sent.
36367edfef7SAndi Kleen  */
364735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
365ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
366bdf1ee5dSIlpo Järvinen {
367bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
368bdf1ee5dSIlpo Järvinen 
369bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
370bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
371bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
372bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
373bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
374bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
375bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
376ea1627c2SEric Dumazet 				th->cwr = 1;
377bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
378bdf1ee5dSIlpo Järvinen 			}
37930e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
380bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
381bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
382bdf1ee5dSIlpo Järvinen 		}
383bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
384ea1627c2SEric Dumazet 			th->ece = 1;
385bdf1ee5dSIlpo Järvinen 	}
386bdf1ee5dSIlpo Järvinen }
387bdf1ee5dSIlpo Järvinen 
388e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
389e870a8efSIlpo Järvinen  * auto increment end seqno.
390e870a8efSIlpo Järvinen  */
391e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
392e870a8efSIlpo Järvinen {
3932e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
394e870a8efSIlpo Järvinen 
3954de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
396e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
397e870a8efSIlpo Järvinen 
398cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
399e870a8efSIlpo Järvinen 
400e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
401a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
402e870a8efSIlpo Järvinen 		seq++;
403e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
404e870a8efSIlpo Järvinen }
405e870a8efSIlpo Järvinen 
406a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40733f5f57eSIlpo Järvinen {
40833f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40933f5f57eSIlpo Järvinen }
41033f5f57eSIlpo Järvinen 
41133ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
41233ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41333ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41489e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4152100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41660e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
41760e2a778SUrsula Braun 
41860e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
41960e2a778SUrsula Braun {
42060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
42160e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
42260e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
42360e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
42460e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
42560e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
42660e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
42760e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
42860e2a778SUrsula Braun 		}
42960e2a778SUrsula Braun 	}
43060e2a778SUrsula Braun #endif
43160e2a778SUrsula Braun }
43233ad798cSAdam Langley 
43333ad798cSAdam Langley struct tcp_out_options {
4342100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4352100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
43633ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43733ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
438bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
439bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4402100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4412100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
44233ad798cSAdam Langley };
44333ad798cSAdam Langley 
44467edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
44567edfef7SAndi Kleen  *
44667edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
447fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
448fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4498e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
450fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
451fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
452fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
453fd6149d3SIlpo Järvinen  *
454fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
455fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
456fd6149d3SIlpo Järvinen  */
45733ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
458bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
459bd0388aeSWilliam Allen Simpson {
4602100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
461bd0388aeSWilliam Allen Simpson 
462bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4631a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4641a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
465bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
466bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46733ad798cSAdam Langley 		ptr += 4;
46833ad798cSAdam Langley 	}
46933ad798cSAdam Langley 
470fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
471fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
472fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
473fd6149d3SIlpo Järvinen 			       opts->mss);
474fd6149d3SIlpo Järvinen 	}
475fd6149d3SIlpo Järvinen 
476bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
477bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47833ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47933ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
48033ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
48133ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
482bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
48333ad798cSAdam Langley 		} else {
484496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
48540efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
48640efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48740efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48840efc6faSStephen Hemminger 		}
48933ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
49033ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
49133ad798cSAdam Langley 	}
49233ad798cSAdam Langley 
493bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49433ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49533ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
49633ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49733ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49833ad798cSAdam Langley 	}
49933ad798cSAdam Langley 
500bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
50133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
50233ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
50333ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
50433ad798cSAdam Langley 			       opts->ws);
50533ad798cSAdam Langley 	}
50633ad798cSAdam Langley 
50733ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50833ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50933ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
51040efc6faSStephen Hemminger 		int this_sack;
51140efc6faSStephen Hemminger 
51240efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
51340efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
51440efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
51533ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
51640efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5172de979bdSStephen Hemminger 
51833ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51933ad798cSAdam Langley 		     ++this_sack) {
52040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
52140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
52240efc6faSStephen Hemminger 		}
5232de979bdSStephen Hemminger 
52440efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
52540efc6faSStephen Hemminger 	}
5262100c8d2SYuchung Cheng 
5272100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5282100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5297f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5307f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5312100c8d2SYuchung Cheng 
5327f9b838bSDaniel Lee 		if (foc->exp) {
5337f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5347f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5352100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5367f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5377f9b838bSDaniel Lee 		} else {
5387f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5397f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5407f9b838bSDaniel Lee 			*p++ = len;
5412100c8d2SYuchung Cheng 		}
5427f9b838bSDaniel Lee 
5437f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5447f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5457f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5467f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5477f9b838bSDaniel Lee 		}
5487f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5492100c8d2SYuchung Cheng 	}
55060e2a778SUrsula Braun 
55160e2a778SUrsula Braun 	smc_options_write(ptr, &options);
55260e2a778SUrsula Braun }
55360e2a778SUrsula Braun 
55460e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
55560e2a778SUrsula Braun 			   struct tcp_out_options *opts,
55660e2a778SUrsula Braun 			   unsigned int *remaining)
55760e2a778SUrsula Braun {
55860e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
55960e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
56060e2a778SUrsula Braun 		if (tp->syn_smc) {
56160e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
56260e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
56360e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
56460e2a778SUrsula Braun 			}
56560e2a778SUrsula Braun 		}
56660e2a778SUrsula Braun 	}
56760e2a778SUrsula Braun #endif
56860e2a778SUrsula Braun }
56960e2a778SUrsula Braun 
57060e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
57160e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
57260e2a778SUrsula Braun 				struct tcp_out_options *opts,
57360e2a778SUrsula Braun 				unsigned int *remaining)
57460e2a778SUrsula Braun {
57560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57660e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57760e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
57860e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57960e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
58060e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
58160e2a778SUrsula Braun 			}
58260e2a778SUrsula Braun 		}
58360e2a778SUrsula Braun 	}
58460e2a778SUrsula Braun #endif
58540efc6faSStephen Hemminger }
58640efc6faSStephen Hemminger 
58767edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58867edfef7SAndi Kleen  * network wire format yet.
58967edfef7SAndi Kleen  */
59095c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
59133ad798cSAdam Langley 				struct tcp_out_options *opts,
592cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
593cf533ea5SEric Dumazet {
59433ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
59595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
596783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
59733ad798cSAdam Langley 
5988c2320e8SEric Dumazet 	*md5 = NULL;
599cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
600921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
6016015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
60233ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
60333ad798cSAdam Langley 		if (*md5) {
60433ad798cSAdam Langley 			opts->options |= OPTION_MD5;
605bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
606cfb6eeb4SYOSHIFUJI Hideaki 		}
6078c2320e8SEric Dumazet 	}
608cfb6eeb4SYOSHIFUJI Hideaki #endif
60933ad798cSAdam Langley 
61033ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
61133ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
61233ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61333ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61433ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61533ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61633ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
61733ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
61833ad798cSAdam Langley 	 * going out.  */
61933ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
620bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
62133ad798cSAdam Langley 
6225d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
62333ad798cSAdam Langley 		opts->options |= OPTION_TS;
6247faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
62533ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
626bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62733ad798cSAdam Langley 	}
6289bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
62933ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
63089e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
631bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
63233ad798cSAdam Langley 	}
633f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
63433ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
635b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
636bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
63733ad798cSAdam Langley 	}
63833ad798cSAdam Langley 
639783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6402646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6412646c831SDaniel Lee 
6422646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6432646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
644783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
645783237e8SYuchung Cheng 		if (remaining >= need) {
646783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
647783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
648783237e8SYuchung Cheng 			remaining -= need;
649783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6502646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
651783237e8SYuchung Cheng 		}
652783237e8SYuchung Cheng 	}
653bd0388aeSWilliam Allen Simpson 
65460e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
65560e2a778SUrsula Braun 
656bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65733ad798cSAdam Langley }
65833ad798cSAdam Langley 
65967edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
66060e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
66160e2a778SUrsula Braun 				       struct request_sock *req,
66295c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
66333ad798cSAdam Langley 				       struct tcp_out_options *opts,
66480f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6658336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6664957faadSWilliam Allen Simpson {
66733ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
66895c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
66933ad798cSAdam Langley 
67033ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
67180f03e27SEric Dumazet 	if (md5) {
67233ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6734957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6744957faadSWilliam Allen Simpson 
6754957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6764957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6774957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6784957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6794957faadSWilliam Allen Simpson 		 */
680de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
68133ad798cSAdam Langley 	}
68233ad798cSAdam Langley #endif
68333ad798cSAdam Langley 
6844957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68533ad798cSAdam Langley 	opts->mss = mss;
6864957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
68733ad798cSAdam Langley 
68833ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
68933ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
69089e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6914957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
69233ad798cSAdam Langley 	}
693de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
69433ad798cSAdam Langley 		opts->options |= OPTION_TS;
69595a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
69633ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6974957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
69833ad798cSAdam Langley 	}
69933ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
70033ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
701de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
7024957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70333ad798cSAdam Langley 	}
7047f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7057f9b838bSDaniel Lee 		u32 need = foc->len;
7067f9b838bSDaniel Lee 
7077f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7087f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7098336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7108336886fSJerry Chu 		if (remaining >= need) {
7118336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7128336886fSJerry Chu 			opts->fastopen_cookie = foc;
7138336886fSJerry Chu 			remaining -= need;
7148336886fSJerry Chu 		}
7158336886fSJerry Chu 	}
7164957faadSWilliam Allen Simpson 
71760e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
71860e2a778SUrsula Braun 
7194957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
72033ad798cSAdam Langley }
72133ad798cSAdam Langley 
72267edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
72367edfef7SAndi Kleen  * final wire format yet.
72467edfef7SAndi Kleen  */
72595c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
72633ad798cSAdam Langley 					struct tcp_out_options *opts,
727cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
728cf533ea5SEric Dumazet {
72933ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
73095c96174SEric Dumazet 	unsigned int size = 0;
731cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
73233ad798cSAdam Langley 
7335843ef42SAndi Kleen 	opts->options = 0;
7345843ef42SAndi Kleen 
7358c2320e8SEric Dumazet 	*md5 = NULL;
73633ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
737921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
7386015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
73933ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7408c2320e8SEric Dumazet 		if (*md5) {
74133ad798cSAdam Langley 			opts->options |= OPTION_MD5;
74233ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
74333ad798cSAdam Langley 		}
7448c2320e8SEric Dumazet 	}
74533ad798cSAdam Langley #endif
74633ad798cSAdam Langley 
74733ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
74833ad798cSAdam Langley 		opts->options |= OPTION_TS;
7497faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
75033ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
75133ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
75233ad798cSAdam Langley 	}
75333ad798cSAdam Langley 
754cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
755cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
75695c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
75733ad798cSAdam Langley 		opts->num_sack_blocks =
75895c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
75933ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
76033ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
7619424e2e7SEric Dumazet 		if (likely(opts->num_sack_blocks))
76233ad798cSAdam Langley 			size += TCPOLEN_SACK_BASE_ALIGNED +
76333ad798cSAdam Langley 				opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
76433ad798cSAdam Langley 	}
76533ad798cSAdam Langley 
76633ad798cSAdam Langley 	return size;
76740efc6faSStephen Hemminger }
7681da177e4SLinus Torvalds 
76946d3ceabSEric Dumazet 
77046d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
77146d3ceabSEric Dumazet  *
77246d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
77346d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
77446d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
77546d3ceabSEric Dumazet  *
77646d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
77746d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7788e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
77946d3ceabSEric Dumazet  *
78046d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
78146d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
78246d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
78346d3ceabSEric Dumazet  */
78446d3ceabSEric Dumazet struct tsq_tasklet {
78546d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
78646d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
78746d3ceabSEric Dumazet };
78846d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
78946d3ceabSEric Dumazet 
79073a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
7916f458dfbSEric Dumazet {
7926f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7936f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
794f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
795f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
796f9616c35SEric Dumazet 
797f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
7983a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
7993a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
800f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
8013a91d29fSKoichiro Den 		}
802f9616c35SEric Dumazet 
803f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
804bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8056f458dfbSEric Dumazet 	}
806f9616c35SEric Dumazet }
80773a6bab5SEric Dumazet 
80873a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
80973a6bab5SEric Dumazet {
81073a6bab5SEric Dumazet 	bh_lock_sock(sk);
81173a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
81273a6bab5SEric Dumazet 		tcp_tsq_write(sk);
81373a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
81473a6bab5SEric Dumazet 		sock_hold(sk);
81573a6bab5SEric Dumazet 	bh_unlock_sock(sk);
81673a6bab5SEric Dumazet }
81746d3ceabSEric Dumazet /*
8188e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
81946d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8208e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
82146d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
82246d3ceabSEric Dumazet  */
82346d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
82446d3ceabSEric Dumazet {
82546d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
82646d3ceabSEric Dumazet 	LIST_HEAD(list);
82746d3ceabSEric Dumazet 	unsigned long flags;
82846d3ceabSEric Dumazet 	struct list_head *q, *n;
82946d3ceabSEric Dumazet 	struct tcp_sock *tp;
83046d3ceabSEric Dumazet 	struct sock *sk;
83146d3ceabSEric Dumazet 
83246d3ceabSEric Dumazet 	local_irq_save(flags);
83346d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
83446d3ceabSEric Dumazet 	local_irq_restore(flags);
83546d3ceabSEric Dumazet 
83646d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
83746d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
83846d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
83946d3ceabSEric Dumazet 
84046d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8410a9648f1SEric Dumazet 		smp_mb__before_atomic();
8427aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8437aa5470cSEric Dumazet 
8446f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
84546d3ceabSEric Dumazet 		sk_free(sk);
84646d3ceabSEric Dumazet 	}
84746d3ceabSEric Dumazet }
84846d3ceabSEric Dumazet 
84940fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
85040fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
85140fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
85240fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
85346d3ceabSEric Dumazet /**
85446d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
85546d3ceabSEric Dumazet  * @sk: socket
85646d3ceabSEric Dumazet  *
85746d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
85846d3ceabSEric Dumazet  * actions before socket release.
85946d3ceabSEric Dumazet  */
86046d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
86146d3ceabSEric Dumazet {
8626f458dfbSEric Dumazet 	unsigned long flags, nflags;
86346d3ceabSEric Dumazet 
8646f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8656f458dfbSEric Dumazet 	do {
8667aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8676f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8686f458dfbSEric Dumazet 			return;
8696f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8707aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8716f458dfbSEric Dumazet 
87273a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
87373a6bab5SEric Dumazet 		tcp_tsq_write(sk);
87473a6bab5SEric Dumazet 		__sock_put(sk);
87573a6bab5SEric Dumazet 	}
876c3f9b018SEric Dumazet 	/* Here begins the tricky part :
877c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
878c3f9b018SEric Dumazet 	 * 1) BH disabled
879c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
880c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
881c3f9b018SEric Dumazet 	 *
882c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
883c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
884c3f9b018SEric Dumazet 	 */
885c3f9b018SEric Dumazet 	sock_release_ownership(sk);
886c3f9b018SEric Dumazet 
88740fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8886f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
889144d56e9SEric Dumazet 		__sock_put(sk);
890144d56e9SEric Dumazet 	}
89140fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8926f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
893144d56e9SEric Dumazet 		__sock_put(sk);
894144d56e9SEric Dumazet 	}
89540fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8964fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
897144d56e9SEric Dumazet 		__sock_put(sk);
898144d56e9SEric Dumazet 	}
89946d3ceabSEric Dumazet }
90046d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
90146d3ceabSEric Dumazet 
90246d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
90346d3ceabSEric Dumazet {
90446d3ceabSEric Dumazet 	int i;
90546d3ceabSEric Dumazet 
90646d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
90746d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
90846d3ceabSEric Dumazet 
90946d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
91046d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
91146d3ceabSEric Dumazet 			     tcp_tasklet_func,
91246d3ceabSEric Dumazet 			     (unsigned long)tsq);
91346d3ceabSEric Dumazet 	}
91446d3ceabSEric Dumazet }
91546d3ceabSEric Dumazet 
91646d3ceabSEric Dumazet /*
91746d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9188e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
91946d3ceabSEric Dumazet  * hold qdisc lock.
92046d3ceabSEric Dumazet  */
921d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
92246d3ceabSEric Dumazet {
92346d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
92446d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
925408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9269b462d02SEric Dumazet 
9279b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9289b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9299b462d02SEric Dumazet 	 */
93014afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9319b462d02SEric Dumazet 
9329b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9339b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9349b462d02SEric Dumazet 	 * This gives :
9359b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9369b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9379b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9389b462d02SEric Dumazet 	 */
93914afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9409b462d02SEric Dumazet 		goto out;
94146d3ceabSEric Dumazet 
9427aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
94346d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
944a9b204d1SEric Dumazet 		bool empty;
94546d3ceabSEric Dumazet 
946408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
947408f0a6cSEric Dumazet 			goto out;
948408f0a6cSEric Dumazet 
94973a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9507aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
951408f0a6cSEric Dumazet 		if (nval != oval)
952408f0a6cSEric Dumazet 			continue;
953408f0a6cSEric Dumazet 
95446d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
95546d3ceabSEric Dumazet 		local_irq_save(flags);
956903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
957a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
95846d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
959a9b204d1SEric Dumazet 		if (empty)
96046d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
96146d3ceabSEric Dumazet 		local_irq_restore(flags);
9629b462d02SEric Dumazet 		return;
96346d3ceabSEric Dumazet 	}
9649b462d02SEric Dumazet out:
9659b462d02SEric Dumazet 	sk_free(sk);
96646d3ceabSEric Dumazet }
96746d3ceabSEric Dumazet 
96873a6bab5SEric Dumazet /* Note: Called under soft irq.
96973a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
970218af599SEric Dumazet  */
971218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
972218af599SEric Dumazet {
973218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
974218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
975218af599SEric Dumazet 
97673a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
97773a6bab5SEric Dumazet 	sock_put(sk);
978218af599SEric Dumazet 
979218af599SEric Dumazet 	return HRTIMER_NORESTART;
980218af599SEric Dumazet }
981218af599SEric Dumazet 
982a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
983a7a25630SEric Dumazet 				      u64 prior_wstamp)
984e2080072SEric Dumazet {
985ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
986ab408b6dSEric Dumazet 
987ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
98876a9ebe8SEric Dumazet 		unsigned long rate = sk->sk_pacing_rate;
989ab408b6dSEric Dumazet 
990ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
991ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
992ab408b6dSEric Dumazet 		 * this is a minor annoyance.
993ab408b6dSEric Dumazet 		 */
99476a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
995a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
996a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
997a7a25630SEric Dumazet 
998a7a25630SEric Dumazet 			/* take into account OS jitter */
999a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
1000a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
1001ab408b6dSEric Dumazet 		}
1002ab408b6dSEric Dumazet 	}
1003e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1004e2080072SEric Dumazet }
1005e2080072SEric Dumazet 
10061da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10071da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10081da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10091da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10101da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10111da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10121da177e4SLinus Torvalds  * device.
10131da177e4SLinus Torvalds  *
10141da177e4SLinus Torvalds  * We are working here with either a clone of the original
10151da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10161da177e4SLinus Torvalds  */
10172987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10182987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10191da177e4SLinus Torvalds {
10206687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1021dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1022dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1023dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
102433ad798cSAdam Langley 	struct tcp_out_options opts;
102595c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10268c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1027cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10281da177e4SLinus Torvalds 	struct tcphdr *th;
1029a7a25630SEric Dumazet 	u64 prior_wstamp;
10301da177e4SLinus Torvalds 	int err;
10311da177e4SLinus Torvalds 
1032dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10336f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
10347f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
10357f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
10367f12422cSYuchung Cheng 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1037ccdbb6e9SEric Dumazet 	if (clone_it) {
10386f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10396f094b9eSLawrence Brakmo 			- tp->snd_una;
10408c72c65bSEric Dumazet 		oskb = skb;
1041e2080072SEric Dumazet 
1042e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1043e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1044e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1045dfb4b9dcSDavid S. Miller 			else
1046e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1047e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1048e2080072SEric Dumazet 
1049dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1050dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1051dfb4b9dcSDavid S. Miller 	}
10525f6188a8SEric Dumazet 
1053dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1054dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
105533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10561da177e4SLinus Torvalds 
1057051ba674SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
105833ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1059051ba674SEric Dumazet 	} else {
106033ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
106133ad798cSAdam Langley 							   &md5);
1062051ba674SEric Dumazet 		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1063051ba674SEric Dumazet 		 * at receiver : This slightly improve GRO performance.
1064051ba674SEric Dumazet 		 * Note that we do not force the PSH flag for non GSO packets,
1065051ba674SEric Dumazet 		 * because they might be sent under high congestion events,
1066051ba674SEric Dumazet 		 * and in this case it is better to delay the delivery of 1-MSS
1067051ba674SEric Dumazet 		 * packets and thus the corresponding ACK packet that would
1068051ba674SEric Dumazet 		 * release the following packet.
1069051ba674SEric Dumazet 		 */
1070051ba674SEric Dumazet 		if (tcp_skb_pcount(skb) > 1)
1071051ba674SEric Dumazet 			tcb->tcp_flags |= TCPHDR_PSH;
1072051ba674SEric Dumazet 	}
107333ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10741da177e4SLinus Torvalds 
1075547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1076b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
107773a6bab5SEric Dumazet 	 * which holds one reference to sk.
1078b2532eb9SEric Dumazet 	 *
1079b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1080b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1081547669d4SEric Dumazet 	 */
1082b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10831da177e4SLinus Torvalds 
108438ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
108538ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
108638ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
108738ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
108838ab52e8SEric Dumazet 	 */
108938ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
109038ab52e8SEric Dumazet 
1091aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1092aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
109346d3ceabSEric Dumazet 
109446d3ceabSEric Dumazet 	skb_orphan(skb);
109546d3ceabSEric Dumazet 	skb->sk = sk;
10961d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1097b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
109814afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10991da177e4SLinus Torvalds 
1100c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1101c3a2e837SJulian Anastasov 
11021da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1103ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1104c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1105c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
11061da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
11072987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1108df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
11094de075e0SEric Dumazet 					tcb->tcp_flags);
1110dfb4b9dcSDavid S. Miller 
11111da177e4SLinus Torvalds 	th->check		= 0;
11121da177e4SLinus Torvalds 	th->urg_ptr		= 0;
11131da177e4SLinus Torvalds 
111433f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11157691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11167691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11171da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11181da177e4SLinus Torvalds 			th->urg = 1;
11197691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11200eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11217691367dSHerbert Xu 			th->urg = 1;
11227691367dSHerbert Xu 		}
11231da177e4SLinus Torvalds 	}
11241da177e4SLinus Torvalds 
1125bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
112651466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1127ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1128ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1129ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1130ea1627c2SEric Dumazet 	} else {
1131ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1132ea1627c2SEric Dumazet 		 * is never scaled.
1133ea1627c2SEric Dumazet 		 */
1134ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1135ea1627c2SEric Dumazet 	}
1136cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1137cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1138cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1139a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1140bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
114139f8e58eSEric Dumazet 					       md5, sk, skb);
1142cfb6eeb4SYOSHIFUJI Hideaki 	}
1143cfb6eeb4SYOSHIFUJI Hideaki #endif
1144cfb6eeb4SYOSHIFUJI Hideaki 
1145bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11461da177e4SLinus Torvalds 
11474de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
114827cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11491da177e4SLinus Torvalds 
1150a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1151cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1152a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1153ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1154a44d6eacSMartin KaFai Lau 	}
11551da177e4SLinus Torvalds 
1156bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1157aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1158aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11591da177e4SLinus Torvalds 
11602efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1161f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1162cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1163f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1164cd7d8498SEric Dumazet 
1165d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1166971f10ecSEric Dumazet 
1167971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1168971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1169971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1170971f10ecSEric Dumazet 
1171a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1172a842fe14SEric Dumazet 
1173b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11747faee5c0SEric Dumazet 
11758c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11765ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11778c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11788c72c65bSEric Dumazet 	}
1179fc225799SEric Dumazet 	if (!err && oskb) {
1180a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1181fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1182fc225799SEric Dumazet 	}
11838c72c65bSEric Dumazet 	return err;
11841da177e4SLinus Torvalds }
11851da177e4SLinus Torvalds 
11862987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11872987babbSYuchung Cheng 			    gfp_t gfp_mask)
11882987babbSYuchung Cheng {
11892987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11902987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11912987babbSYuchung Cheng }
11922987babbSYuchung Cheng 
119367edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11941da177e4SLinus Torvalds  *
11951da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11961da177e4SLinus Torvalds  * otherwise socket can stall.
11971da177e4SLinus Torvalds  */
11981da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11991da177e4SLinus Torvalds {
12001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12011da177e4SLinus Torvalds 
12021da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
12030f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1204f4a775d1SEric Dumazet 	__skb_header_release(skb);
1205fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
1206ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
12073ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
12081da177e4SLinus Torvalds }
12091da177e4SLinus Torvalds 
121067edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
12115bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1212f6302d1dSDavid S. Miller {
12134a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1214f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1215f6302d1dSDavid S. Miller 		 * non-TSO case.
1216f6302d1dSDavid S. Miller 		 */
1217cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1218f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1219f6302d1dSDavid S. Miller 	} else {
1220cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1221f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12221da177e4SLinus Torvalds 	}
12231da177e4SLinus Torvalds }
12241da177e4SLinus Torvalds 
1225797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1226797108d1SIlpo Järvinen  * tweaks to fix counters
1227797108d1SIlpo Järvinen  */
1228cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1229797108d1SIlpo Järvinen {
1230797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1231797108d1SIlpo Järvinen 
1232797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1233797108d1SIlpo Järvinen 
1234797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1235797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1236797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1237797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1238797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1239797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1240797108d1SIlpo Järvinen 
1241797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1242797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1243797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1244797108d1SIlpo Järvinen 
1245797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1246797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1247713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1248797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1249797108d1SIlpo Järvinen 
1250797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1251797108d1SIlpo Järvinen }
1252797108d1SIlpo Järvinen 
12530a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12540a2cf20cSSoheil Hassas Yeganeh {
12550a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12560a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12570a2cf20cSSoheil Hassas Yeganeh }
12580a2cf20cSSoheil Hassas Yeganeh 
1259490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1260490cc7d0SWillem de Bruijn {
1261490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1262490cc7d0SWillem de Bruijn 
12630a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1264490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1265490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1266490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1267490cc7d0SWillem de Bruijn 
1268490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1269490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1270490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1271b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1272b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1273490cc7d0SWillem de Bruijn 	}
1274490cc7d0SWillem de Bruijn }
1275490cc7d0SWillem de Bruijn 
1276a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1277a166140eSMartin KaFai Lau {
1278a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1279a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1280a166140eSMartin KaFai Lau }
1281a166140eSMartin KaFai Lau 
128275c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
128375c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
128475c119afSEric Dumazet 					 struct sk_buff *buff,
128575c119afSEric Dumazet 					 struct sock *sk,
128675c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
128775c119afSEric Dumazet {
128875c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
128975c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
129075c119afSEric Dumazet 	else
129175c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
129275c119afSEric Dumazet }
129375c119afSEric Dumazet 
12941da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12951da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12961da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12971da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12981da177e4SLinus Torvalds  */
129975c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
130075c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
13016cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
13021da177e4SLinus Torvalds {
13031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13041da177e4SLinus Torvalds 	struct sk_buff *buff;
13056475be16SDavid S. Miller 	int nsize, old_factor;
1306b617158dSEric Dumazet 	long limit;
1307b60b49eaSHerbert Xu 	int nlen;
13089ce01461SIlpo Järvinen 	u8 flags;
13091da177e4SLinus Torvalds 
13102fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
13112fceec13SIlpo Järvinen 		return -EINVAL;
13126a438bbeSStephen Hemminger 
13131da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
13141da177e4SLinus Torvalds 	if (nsize < 0)
13151da177e4SLinus Torvalds 		nsize = 0;
13161da177e4SLinus Torvalds 
1317b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1318b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1319b617158dSEric Dumazet 	 * SO_SNDBUF values.
1320b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1321b617158dSEric Dumazet 	 */
1322b617158dSEric Dumazet 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1323b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1324b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1325b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1326b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1327f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1328f070ef2aSEric Dumazet 		return -ENOMEM;
1329f070ef2aSEric Dumazet 	}
1330f070ef2aSEric Dumazet 
13316cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13321da177e4SLinus Torvalds 		return -ENOMEM;
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1335eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
133651456b29SIan Morris 	if (!buff)
13371da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
133841477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
1339ef5cb973SHerbert Xu 
1340ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
13413ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1342b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1343b60b49eaSHerbert Xu 	buff->truesize += nlen;
1344b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13451da177e4SLinus Torvalds 
13461da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13471da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13481da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13491da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13501da177e4SLinus Torvalds 
13511da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13524de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13534de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13544de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1355e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1356a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13571da177e4SLinus Torvalds 
13581da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13591da177e4SLinus Torvalds 
136098be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13611da177e4SLinus Torvalds 
1362a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1363490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13641da177e4SLinus Torvalds 
13656475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13666475be16SDavid S. Miller 
13671da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13685bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13695bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13701da177e4SLinus Torvalds 
1371b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1372b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1373b9f64820SYuchung Cheng 
13746475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13756475be16SDavid S. Miller 	 * adjust the various packet counters.
13766475be16SDavid S. Miller 	 */
1377cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13786475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13796475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13801da177e4SLinus Torvalds 
1381797108d1SIlpo Järvinen 		if (diff)
1382797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13831da177e4SLinus Torvalds 	}
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1386f4a775d1SEric Dumazet 	__skb_header_release(buff);
138775c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1388f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1389e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13901da177e4SLinus Torvalds 
13911da177e4SLinus Torvalds 	return 0;
13921da177e4SLinus Torvalds }
13931da177e4SLinus Torvalds 
1394f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1395f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13961da177e4SLinus Torvalds  */
13977162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13981da177e4SLinus Torvalds {
13997b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
14001da177e4SLinus Torvalds 	int i, k, eat;
14011da177e4SLinus Torvalds 
14024fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
14034fa48bf3SEric Dumazet 	if (eat) {
14044fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
14054fa48bf3SEric Dumazet 		len -= eat;
14064fa48bf3SEric Dumazet 		if (!len)
14077162fb24SEric Dumazet 			return 0;
14084fa48bf3SEric Dumazet 	}
14091da177e4SLinus Torvalds 	eat = len;
14101da177e4SLinus Torvalds 	k = 0;
14117b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
14127b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
14137b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
14149e903e08SEric Dumazet 
14159e903e08SEric Dumazet 		if (size <= eat) {
1416aff65da0SIan Campbell 			skb_frag_unref(skb, i);
14179e903e08SEric Dumazet 			eat -= size;
14181da177e4SLinus Torvalds 		} else {
14197b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
14201da177e4SLinus Torvalds 			if (eat) {
1421b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[k], eat);
14227b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
14231da177e4SLinus Torvalds 				eat = 0;
14241da177e4SLinus Torvalds 			}
14251da177e4SLinus Torvalds 			k++;
14261da177e4SLinus Torvalds 		}
14271da177e4SLinus Torvalds 	}
14287b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds 	skb->data_len -= len;
14311da177e4SLinus Torvalds 	skb->len = skb->data_len;
14327162fb24SEric Dumazet 	return len;
14331da177e4SLinus Torvalds }
14341da177e4SLinus Torvalds 
143567edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14361da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14371da177e4SLinus Torvalds {
14387162fb24SEric Dumazet 	u32 delta_truesize;
14397162fb24SEric Dumazet 
144014bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14411da177e4SLinus Torvalds 		return -ENOMEM;
14421da177e4SLinus Torvalds 
14437162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
144684fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14471da177e4SLinus Torvalds 
14487162fb24SEric Dumazet 	if (delta_truesize) {
14497162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
1450ab4e846aSEric Dumazet 		sk_wmem_queued_add(sk, -delta_truesize);
14517162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14521da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14537162fb24SEric Dumazet 	}
14541da177e4SLinus Torvalds 
14555b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14561da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14575bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14581da177e4SLinus Torvalds 
14591da177e4SLinus Torvalds 	return 0;
14601da177e4SLinus Torvalds }
14611da177e4SLinus Torvalds 
14621b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14631b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14645d424d5aSJohn Heffner {
1465cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1466cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14675d424d5aSJohn Heffner 	int mss_now;
14685d424d5aSJohn Heffner 
14695d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14705d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14715d424d5aSJohn Heffner 	 */
14725d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14735d424d5aSJohn Heffner 
147467469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
147567469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
147667469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
147767469601SEric Dumazet 
147867469601SEric Dumazet 		if (dst && dst_allfrag(dst))
147967469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
148067469601SEric Dumazet 	}
148167469601SEric Dumazet 
14825d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14835d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14845d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14855d424d5aSJohn Heffner 
14865d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14875d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14885d424d5aSJohn Heffner 
14895d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14905f3e2bf0SEric Dumazet 	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
14915d424d5aSJohn Heffner 	return mss_now;
14925d424d5aSJohn Heffner }
14935d424d5aSJohn Heffner 
14941b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14951b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14961b63edd6SYuchung Cheng {
14971b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14981b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14991b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
15001b63edd6SYuchung Cheng }
15011b63edd6SYuchung Cheng 
15025d424d5aSJohn Heffner /* Inverse of above */
150367469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
15045d424d5aSJohn Heffner {
1505cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1506cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
15075d424d5aSJohn Heffner 	int mtu;
15085d424d5aSJohn Heffner 
15095d424d5aSJohn Heffner 	mtu = mss +
15105d424d5aSJohn Heffner 	      tp->tcp_header_len +
15115d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
15125d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
15135d424d5aSJohn Heffner 
151467469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
151567469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
151667469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
151767469601SEric Dumazet 
151867469601SEric Dumazet 		if (dst && dst_allfrag(dst))
151967469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
152067469601SEric Dumazet 	}
15215d424d5aSJohn Heffner 	return mtu;
15225d424d5aSJohn Heffner }
1523556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
15245d424d5aSJohn Heffner 
152567edfef7SAndi Kleen /* MTU probing init per socket */
15265d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
15275d424d5aSJohn Heffner {
15285d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15295d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1530b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15315d424d5aSJohn Heffner 
1532b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15335d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15345d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1535b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15365d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
153705cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1538c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15395d424d5aSJohn Heffner }
15404bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15415d424d5aSJohn Heffner 
15421da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15451da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1548caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15491da177e4SLinus Torvalds    It also does not include TCP options.
15501da177e4SLinus Torvalds 
1551d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15541da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15551da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15561da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15591da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15601da177e4SLinus Torvalds 
1561d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1562d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15631da177e4SLinus Torvalds  */
15641da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15651da177e4SLinus Torvalds {
15661da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1567d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15685d424d5aSJohn Heffner 	int mss_now;
15691da177e4SLinus Torvalds 
15705d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15715d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15721da177e4SLinus Torvalds 
15735d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1574409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15751da177e4SLinus Torvalds 
15761da177e4SLinus Torvalds 	/* And store cached results */
1577d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15785d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15795d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1580c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15811da177e4SLinus Torvalds 
15821da177e4SLinus Torvalds 	return mss_now;
15831da177e4SLinus Torvalds }
15844bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15871da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15881da177e4SLinus Torvalds  */
15890c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15901da177e4SLinus Torvalds {
1591cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1592cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1593c1b4a7e6SDavid S. Miller 	u32 mss_now;
159495c96174SEric Dumazet 	unsigned int header_len;
159533ad798cSAdam Langley 	struct tcp_out_options opts;
159633ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15971da177e4SLinus Torvalds 
1598c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1599c1b4a7e6SDavid S. Miller 
16001da177e4SLinus Torvalds 	if (dst) {
16011da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1602d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
16031da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
16041da177e4SLinus Torvalds 	}
16051da177e4SLinus Torvalds 
160633ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
160733ad798cSAdam Langley 		     sizeof(struct tcphdr);
160833ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
160933ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
161033ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
161133ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
161233ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
161333ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
161433ad798cSAdam Langley 		mss_now -= delta;
161533ad798cSAdam Langley 	}
1616cfb6eeb4SYOSHIFUJI Hideaki 
16171da177e4SLinus Torvalds 	return mss_now;
16181da177e4SLinus Torvalds }
16191da177e4SLinus Torvalds 
162086fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
162186fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
162286fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
162386fd14adSWeiping Pan  */
162486fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1625a762a980SDavid S. Miller {
16269e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1627a762a980SDavid S. Miller 
162886fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
162986fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
163086fd14adSWeiping Pan 		/* Limited by application or receiver window. */
163186fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
163286fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
163386fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
163486fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
163586fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
163686fd14adSWeiping Pan 		}
163786fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
163886fd14adSWeiping Pan 	}
1639c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
164086fd14adSWeiping Pan }
164186fd14adSWeiping Pan 
1642ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1643a762a980SDavid S. Miller {
16441b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1645a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1646a762a980SDavid S. Miller 
1647ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1648ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1649ca8a2263SNeal Cardwell 	 */
1650ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1651ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1652ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1653ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1654ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1655ca8a2263SNeal Cardwell 	}
1656e114a710SEric Dumazet 
165724901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1658a762a980SDavid S. Miller 		/* Network is feed fully. */
1659a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1660c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1661a762a980SDavid S. Miller 	} else {
1662a762a980SDavid S. Miller 		/* Network starves. */
1663a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1664a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1665a762a980SDavid S. Miller 
1666b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1667c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16681b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1669a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1670b0f71bd3SFrancis Yan 
1671b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1672b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1673b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1674b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
167575c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1676b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1677b0f71bd3SFrancis Yan 		 */
167875c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1679b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1680b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1681b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1682a762a980SDavid S. Miller 	}
1683a762a980SDavid S. Miller }
1684a762a980SDavid S. Miller 
1685d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1686d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1687d4589926SEric Dumazet {
1688d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1689d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1690d4589926SEric Dumazet }
1691d4589926SEric Dumazet 
1692d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1693d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1694d4589926SEric Dumazet  * The test is really :
1695d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1696d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1697d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1698d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16990e3a4803SIlpo Järvinen  */
1700d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1701d4589926SEric Dumazet 				const struct sk_buff *skb)
1702d4589926SEric Dumazet {
1703d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1704d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1705d4589926SEric Dumazet }
1706d4589926SEric Dumazet 
1707d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1708d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1709d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1710d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1711d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1712d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1713d4589926SEric Dumazet  */
1714d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1715cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1716d4589926SEric Dumazet {
1717d4589926SEric Dumazet 	return partial &&
1718d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1719d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1720d4589926SEric Dumazet }
1721605ad7f1SEric Dumazet 
1722605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1723605ad7f1SEric Dumazet  * to send one TSO packet per ms
1724605ad7f1SEric Dumazet  */
1725dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
17261b3878caSNeal Cardwell 			    int min_tso_segs)
1727605ad7f1SEric Dumazet {
1728605ad7f1SEric Dumazet 	u32 bytes, segs;
1729605ad7f1SEric Dumazet 
173076a9ebe8SEric Dumazet 	bytes = min_t(unsigned long,
17317c68fa2bSEric Dumazet 		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
1732605ad7f1SEric Dumazet 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1733605ad7f1SEric Dumazet 
1734605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1735605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1736605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1737605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1738605ad7f1SEric Dumazet 	 */
17391b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1740605ad7f1SEric Dumazet 
1741350c9f48SEric Dumazet 	return segs;
1742605ad7f1SEric Dumazet }
1743605ad7f1SEric Dumazet 
1744ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1745ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1746ed6e7268SNeal Cardwell  */
1747ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1748ed6e7268SNeal Cardwell {
1749ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1750dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1751ed6e7268SNeal Cardwell 
1752dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1753dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1754dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1755dcb8c9b4SEric Dumazet 
1756dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1757350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1758ed6e7268SNeal Cardwell }
1759ed6e7268SNeal Cardwell 
1760d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1761d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1762d4589926SEric Dumazet 					const struct sk_buff *skb,
1763d4589926SEric Dumazet 					unsigned int mss_now,
1764d4589926SEric Dumazet 					unsigned int max_segs,
1765d4589926SEric Dumazet 					int nonagle)
1766c1b4a7e6SDavid S. Miller {
1767cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1768d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1769c1b4a7e6SDavid S. Miller 
177090840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17711485348dSBen Hutchings 	max_len = mss_now * max_segs;
17720e3a4803SIlpo Järvinen 
17731485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17741485348dSBen Hutchings 		return max_len;
17750e3a4803SIlpo Järvinen 
17765ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17775ea3a748SIlpo Järvinen 
17781485348dSBen Hutchings 	if (max_len <= needed)
17791485348dSBen Hutchings 		return max_len;
17800e3a4803SIlpo Järvinen 
1781d4589926SEric Dumazet 	partial = needed % mss_now;
1782d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1783d4589926SEric Dumazet 	 * to include this last segment in this skb.
1784d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1785d4589926SEric Dumazet 	 */
1786cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1787d4589926SEric Dumazet 		return needed - partial;
1788d4589926SEric Dumazet 
1789d4589926SEric Dumazet 	return needed;
1790c1b4a7e6SDavid S. Miller }
1791c1b4a7e6SDavid S. Miller 
1792c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1793c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1794c1b4a7e6SDavid S. Miller  */
1795cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1796cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1797c1b4a7e6SDavid S. Miller {
1798d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1799c1b4a7e6SDavid S. Miller 
1800c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
18014de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
18024de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1803c1b4a7e6SDavid S. Miller 		return 1;
1804c1b4a7e6SDavid S. Miller 
1805c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1806c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1807d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1808c1b4a7e6SDavid S. Miller 		return 0;
1809d649a7a8SEric Dumazet 
1810d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1811d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1812d649a7a8SEric Dumazet 	 */
1813d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1814d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1815c1b4a7e6SDavid S. Miller }
1816c1b4a7e6SDavid S. Miller 
1817b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
181867edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1819c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1820c1b4a7e6SDavid S. Miller  */
18215bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1822c1b4a7e6SDavid S. Miller {
1823c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1824c1b4a7e6SDavid S. Miller 
1825f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
18265bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1827c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1828c1b4a7e6SDavid S. Miller 	}
1829c1b4a7e6SDavid S. Miller 	return tso_segs;
1830c1b4a7e6SDavid S. Miller }
1831c1b4a7e6SDavid S. Miller 
1832c1b4a7e6SDavid S. Miller 
1833a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1834c1b4a7e6SDavid S. Miller  * sent now.
1835c1b4a7e6SDavid S. Miller  */
1836a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1837c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1838c1b4a7e6SDavid S. Miller {
1839c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1840c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1841c1b4a7e6SDavid S. Miller 	 *
1842c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1843c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1844c1b4a7e6SDavid S. Miller 	 */
1845c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1846a2a385d6SEric Dumazet 		return true;
1847c1b4a7e6SDavid S. Miller 
18489b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18499b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1850a2a385d6SEric Dumazet 		return true;
1851c1b4a7e6SDavid S. Miller 
1852cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1853a2a385d6SEric Dumazet 		return true;
1854c1b4a7e6SDavid S. Miller 
1855a2a385d6SEric Dumazet 	return false;
1856c1b4a7e6SDavid S. Miller }
1857c1b4a7e6SDavid S. Miller 
1858c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1859a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1860a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1861056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1862c1b4a7e6SDavid S. Miller {
1863c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1864c1b4a7e6SDavid S. Miller 
1865c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1866c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1867c1b4a7e6SDavid S. Miller 
186890840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1869c1b4a7e6SDavid S. Miller }
1870c1b4a7e6SDavid S. Miller 
1871c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1872c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1873c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1874c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1875c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1876c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1877c1b4a7e6SDavid S. Miller  */
187856483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1879c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1880c1b4a7e6SDavid S. Miller {
1881c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
188256483341SEric Dumazet 	struct sk_buff *buff;
18839ce01461SIlpo Järvinen 	u8 flags;
1884c1b4a7e6SDavid S. Miller 
1885c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1886c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
188756483341SEric Dumazet 		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
188856483341SEric Dumazet 				    skb, len, mss_now, gfp);
1889c1b4a7e6SDavid S. Miller 
1890eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
189151456b29SIan Morris 	if (unlikely(!buff))
1892c1b4a7e6SDavid S. Miller 		return -ENOMEM;
189341477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
1894c1b4a7e6SDavid S. Miller 
1895ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
18963ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1897b60b49eaSHerbert Xu 	buff->truesize += nlen;
1898c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1899c1b4a7e6SDavid S. Miller 
1900c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1901c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1902c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1903c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1904c1b4a7e6SDavid S. Miller 
1905c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
19064de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
19074de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
19084de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1909c1b4a7e6SDavid S. Miller 
1910c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1911c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1912c1b4a7e6SDavid S. Miller 
1913a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1914a166140eSMartin KaFai Lau 
191598be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1916c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1917490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1918c1b4a7e6SDavid S. Miller 
1919c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
19205bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
19215bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1922c1b4a7e6SDavid S. Miller 
1923c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1924f4a775d1SEric Dumazet 	__skb_header_release(buff);
192556483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
1926c1b4a7e6SDavid S. Miller 
1927c1b4a7e6SDavid S. Miller 	return 0;
1928c1b4a7e6SDavid S. Miller }
1929c1b4a7e6SDavid S. Miller 
1930c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1931c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1932c1b4a7e6SDavid S. Miller  *
1933c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1934c1b4a7e6SDavid S. Miller  */
1935ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1936f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
1937f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
1938f9bfe4e6SEric Dumazet 				 u32 max_segs)
1939c1b4a7e6SDavid S. Miller {
19406687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1941f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
194250c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
194350c8339eSEric Dumazet 	struct sk_buff *head;
1944ad9f4f50SEric Dumazet 	int win_divisor;
1945f1c6ea38SEric Dumazet 	s64 delta;
1946c1b4a7e6SDavid S. Miller 
194799d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1948ae8064acSJohn Heffner 		goto send_now;
1949ae8064acSJohn Heffner 
19505f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
1951a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
1952a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
1953a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
19545f852eb5SEric Dumazet 	 */
1955a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1956a682850aSEric Dumazet 	if (delta > 0)
1957ae8064acSJohn Heffner 		goto send_now;
1958908a75c1SDavid S. Miller 
1959c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1960c1b4a7e6SDavid S. Miller 
1961c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1962c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1963c1b4a7e6SDavid S. Miller 
196490840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1965c1b4a7e6SDavid S. Miller 
1966c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1967c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1968c1b4a7e6SDavid S. Miller 
1969c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1970c1b4a7e6SDavid S. Miller 
1971ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1972605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1973ae8064acSJohn Heffner 		goto send_now;
1974ba244fe9SDavid S. Miller 
197562ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
197662ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
197762ad2761SIlpo Järvinen 		goto send_now;
197862ad2761SIlpo Järvinen 
19795bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1980ad9f4f50SEric Dumazet 	if (win_divisor) {
1981c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1982c1b4a7e6SDavid S. Miller 
1983c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1984c1b4a7e6SDavid S. Miller 		 * just use it.
1985c1b4a7e6SDavid S. Miller 		 */
1986ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1987c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1988ae8064acSJohn Heffner 			goto send_now;
1989c1b4a7e6SDavid S. Miller 	} else {
1990c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1991c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1992c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1993c1b4a7e6SDavid S. Miller 		 * then send now.
1994c1b4a7e6SDavid S. Miller 		 */
19956b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1996ae8064acSJohn Heffner 			goto send_now;
1997c1b4a7e6SDavid S. Miller 	}
1998c1b4a7e6SDavid S. Miller 
199975c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
200075c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
200175c119afSEric Dumazet 	if (!head)
200275c119afSEric Dumazet 		goto send_now;
2003f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
200450c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
2005f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
200650c8339eSEric Dumazet 		goto send_now;
200750c8339eSEric Dumazet 
2008f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
2009f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
2010f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
2011f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
2012f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
2013f9bfe4e6SEric Dumazet 	 */
2014f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
2015f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
2016ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2017f9bfe4e6SEric Dumazet 			return true;
2018f9bfe4e6SEric Dumazet 		}
2019f9bfe4e6SEric Dumazet 	} else {
2020f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2021f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2022f9bfe4e6SEric Dumazet 			return true;
2023f9bfe4e6SEric Dumazet 		}
2024f9bfe4e6SEric Dumazet 	}
2025f9bfe4e6SEric Dumazet 
2026f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2027d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2028d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2029f9bfe4e6SEric Dumazet 		goto send_now;
2030ca8a2263SNeal Cardwell 
2031a2a385d6SEric Dumazet 	return true;
2032ae8064acSJohn Heffner 
2033ae8064acSJohn Heffner send_now:
2034a2a385d6SEric Dumazet 	return false;
2035c1b4a7e6SDavid S. Miller }
2036c1b4a7e6SDavid S. Miller 
203705cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
203805cbc0dbSFan Du {
203905cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
204005cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
204105cbc0dbSFan Du 	struct net *net = sock_net(sk);
204205cbc0dbSFan Du 	u32 interval;
204305cbc0dbSFan Du 	s32 delta;
204405cbc0dbSFan Du 
204505cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
2046c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
204705cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
204805cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
204905cbc0dbSFan Du 
205005cbc0dbSFan Du 		/* Update current search range */
205105cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
205205cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
205305cbc0dbSFan Du 			sizeof(struct tcphdr) +
205405cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
205505cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
205605cbc0dbSFan Du 
205705cbc0dbSFan Du 		/* Update probe time stamp */
2058c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
205905cbc0dbSFan Du 	}
206005cbc0dbSFan Du }
206105cbc0dbSFan Du 
2062808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2063808cf9e3SIlya Lesokhin {
2064808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2065808cf9e3SIlya Lesokhin 
2066808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2067808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2068808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2069808cf9e3SIlya Lesokhin 			break;
2070808cf9e3SIlya Lesokhin 
2071888a5c53SWillem de Bruijn 		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2072808cf9e3SIlya Lesokhin 			return false;
2073808cf9e3SIlya Lesokhin 
2074808cf9e3SIlya Lesokhin 		len -= skb->len;
2075808cf9e3SIlya Lesokhin 	}
2076808cf9e3SIlya Lesokhin 
2077808cf9e3SIlya Lesokhin 	return true;
2078808cf9e3SIlya Lesokhin }
2079808cf9e3SIlya Lesokhin 
20805d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
208167edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
208267edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
208367edfef7SAndi Kleen  * changes resulting in larger path MTUs.
208467edfef7SAndi Kleen  *
20855d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20865d424d5aSJohn Heffner  *         1 if a probe was sent,
2087056834d9SIlpo Järvinen  *         -1 otherwise
2088056834d9SIlpo Järvinen  */
20895d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20905d424d5aSJohn Heffner {
20915d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
209212a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20935d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20946b58e0a5SFan Du 	struct net *net = sock_net(sk);
20955d424d5aSJohn Heffner 	int probe_size;
209691cc17c0SIlpo Järvinen 	int size_needed;
209712a59abcSEric Dumazet 	int copy, len;
20985d424d5aSJohn Heffner 	int mss_now;
20996b58e0a5SFan Du 	int interval;
21005d424d5aSJohn Heffner 
21015d424d5aSJohn Heffner 	/* Not currently probing/verifying,
21025d424d5aSJohn Heffner 	 * not in recovery,
21035d424d5aSJohn Heffner 	 * have enough cwnd, and
210412a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
210512a59abcSEric Dumazet 	 */
210612a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
21075d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
21085d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
21095d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
211012a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
21115d424d5aSJohn Heffner 		return -1;
21125d424d5aSJohn Heffner 
21136b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
21146b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
21156b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
21166b58e0a5SFan Du 	 */
21170c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
21186b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
21196b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
212091cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
21216b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
212205cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
212305cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
212405cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
212505cbc0dbSFan Du 	 */
21266b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
212705cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
212805cbc0dbSFan Du 		/* Check whether enough time has elaplased for
212905cbc0dbSFan Du 		 * another round of probing.
213005cbc0dbSFan Du 		 */
213105cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
21325d424d5aSJohn Heffner 		return -1;
21335d424d5aSJohn Heffner 	}
21345d424d5aSJohn Heffner 
21355d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
21367f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
21375d424d5aSJohn Heffner 		return -1;
21385d424d5aSJohn Heffner 
213991cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
21405d424d5aSJohn Heffner 		return -1;
214190840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
21425d424d5aSJohn Heffner 		return 0;
21435d424d5aSJohn Heffner 
2144d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2145d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2146d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
21475d424d5aSJohn Heffner 			return -1;
21485d424d5aSJohn Heffner 		else
21495d424d5aSJohn Heffner 			return 0;
21505d424d5aSJohn Heffner 	}
21515d424d5aSJohn Heffner 
2152808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2153808cf9e3SIlya Lesokhin 		return -1;
2154808cf9e3SIlya Lesokhin 
21555d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2156eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
215751456b29SIan Morris 	if (!nskb)
21585d424d5aSJohn Heffner 		return -1;
2159ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, nskb->truesize);
21603ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21615d424d5aSJohn Heffner 
2162fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
216341477662SJakub Kicinski 	skb_copy_decrypted(nskb, skb);
21645d424d5aSJohn Heffner 
21655d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21665d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21674de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21685d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21695d424d5aSJohn Heffner 	nskb->csum = 0;
217098be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21715d424d5aSJohn Heffner 
217250c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21732b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
217450c4817eSIlpo Järvinen 
21755d424d5aSJohn Heffner 	len = 0;
2176234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21775d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21785d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21795d424d5aSJohn Heffner 
21805d424d5aSJohn Heffner 		if (skb->len <= copy) {
21815d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21825d424d5aSJohn Heffner 			 * Throw it away. */
21834de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2184808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2185808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2186808cf9e3SIlya Lesokhin 			 */
2187808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2188888a5c53SWillem de Bruijn 			tcp_skb_collapse_tstamp(nskb, skb);
2189fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21903ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21915d424d5aSJohn Heffner 		} else {
21924de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2193a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21945d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21955d424d5aSJohn Heffner 				skb_pull(skb, copy);
21965d424d5aSJohn Heffner 			} else {
21975d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21985bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21995d424d5aSJohn Heffner 			}
22005d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
22015d424d5aSJohn Heffner 		}
22025d424d5aSJohn Heffner 
22035d424d5aSJohn Heffner 		len += copy;
2204234b6860SIlpo Järvinen 
2205234b6860SIlpo Järvinen 		if (len >= probe_size)
2206234b6860SIlpo Järvinen 			break;
22075d424d5aSJohn Heffner 	}
22085bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
22095d424d5aSJohn Heffner 
22105d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
22117faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
22127faee5c0SEric Dumazet 	 */
22135d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
22145d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
22155d424d5aSJohn Heffner 		 * effectively two packets. */
22165d424d5aSJohn Heffner 		tp->snd_cwnd--;
221766f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
22185d424d5aSJohn Heffner 
22195d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
22200e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
22210e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
22225d424d5aSJohn Heffner 
22235d424d5aSJohn Heffner 		return 1;
22245d424d5aSJohn Heffner 	}
22255d424d5aSJohn Heffner 
22265d424d5aSJohn Heffner 	return -1;
22275d424d5aSJohn Heffner }
22285d424d5aSJohn Heffner 
2229864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2230218af599SEric Dumazet {
2231864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2232864e5c09SEric Dumazet 
2233864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2234864e5c09SEric Dumazet 		return false;
2235864e5c09SEric Dumazet 
2236864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2237864e5c09SEric Dumazet 		return false;
2238864e5c09SEric Dumazet 
2239864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2240864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2241864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2242864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2243864e5c09SEric Dumazet 		sock_hold(sk);
2244864e5c09SEric Dumazet 	}
2245864e5c09SEric Dumazet 	return true;
2246218af599SEric Dumazet }
2247218af599SEric Dumazet 
2248f9616c35SEric Dumazet /* TCP Small Queues :
2249f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2250f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2251f9616c35SEric Dumazet  * This allows for :
2252f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2253f9616c35SEric Dumazet  *  - faster recovery
2254f9616c35SEric Dumazet  *  - high rates
2255f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2256f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2257f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2258f9616c35SEric Dumazet  */
2259f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2260f9616c35SEric Dumazet 				  unsigned int factor)
2261f9616c35SEric Dumazet {
226276a9ebe8SEric Dumazet 	unsigned long limit;
2263f9616c35SEric Dumazet 
226476a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
226576a9ebe8SEric Dumazet 		      2 * skb->truesize,
22667c68fa2bSEric Dumazet 		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2267c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
226876a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
22699184d8bbSEric Dumazet 			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2270f9616c35SEric Dumazet 	limit <<= factor;
2271f9616c35SEric Dumazet 
2272a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2273a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
2274a842fe14SEric Dumazet 		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2275a842fe14SEric Dumazet 
2276a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2277a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2278a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2279a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2280a842fe14SEric Dumazet 		 */
2281a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2282a842fe14SEric Dumazet 		limit += extra_bytes;
2283a842fe14SEric Dumazet 	}
228414afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
228575c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
228675eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
228775eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
228875eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
228975eefc6cSEric Dumazet 		 */
229075c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
229175eefc6cSEric Dumazet 			return false;
229275eefc6cSEric Dumazet 
22937aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2294f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2295f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2296f9616c35SEric Dumazet 		 * test again the condition.
2297f9616c35SEric Dumazet 		 */
2298f9616c35SEric Dumazet 		smp_mb__after_atomic();
229914afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2300f9616c35SEric Dumazet 			return true;
2301f9616c35SEric Dumazet 	}
2302f9616c35SEric Dumazet 	return false;
2303f9616c35SEric Dumazet }
2304f9616c35SEric Dumazet 
230505b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
230605b055e8SFrancis Yan {
2307628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2308efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
230905b055e8SFrancis Yan 
2310efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2311efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
231205b055e8SFrancis Yan 	tp->chrono_start = now;
231305b055e8SFrancis Yan 	tp->chrono_type = new;
231405b055e8SFrancis Yan }
231505b055e8SFrancis Yan 
231605b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
231705b055e8SFrancis Yan {
231805b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
231905b055e8SFrancis Yan 
232005b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
23210f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
23220f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
232305b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
232405b055e8SFrancis Yan 	 */
232505b055e8SFrancis Yan 	if (type > tp->chrono_type)
232605b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
232705b055e8SFrancis Yan }
232805b055e8SFrancis Yan 
232905b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
233005b055e8SFrancis Yan {
233105b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
233205b055e8SFrancis Yan 
23330f87230dSFrancis Yan 
23340f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
23350f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
23360f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
23370f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
23380f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
23390f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
23400f87230dSFrancis Yan 	 */
234175c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
234205b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
23430f87230dSFrancis Yan 	else if (type == tp->chrono_type)
23440f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
234505b055e8SFrancis Yan }
234605b055e8SFrancis Yan 
23471da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
23481da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
23491da177e4SLinus Torvalds  * window for us.
23501da177e4SLinus Torvalds  *
2351f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2352f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2353f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2354f8269a49SIlpo Järvinen  *
23556ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
23566ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
23576ba8a3b1SNandita Dukkipati 
2358a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2359a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
23601da177e4SLinus Torvalds  */
2361a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2362d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
23631da177e4SLinus Torvalds {
23641da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
236592df7b51SDavid S. Miller 	struct sk_buff *skb;
2366c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2367c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23685d424d5aSJohn Heffner 	int result;
23695615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2370605ad7f1SEric Dumazet 	u32 max_segs;
23711da177e4SLinus Torvalds 
2372c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23735d424d5aSJohn Heffner 
2374ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2375d5dd9175SIlpo Järvinen 	if (!push_one) {
23765d424d5aSJohn Heffner 		/* Do MTU probing. */
2377d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2378d5dd9175SIlpo Järvinen 		if (!result) {
2379a2a385d6SEric Dumazet 			return false;
23805d424d5aSJohn Heffner 		} else if (result > 0) {
23815d424d5aSJohn Heffner 			sent_pkts = 1;
23825d424d5aSJohn Heffner 		}
2383d5dd9175SIlpo Järvinen 	}
23845d424d5aSJohn Heffner 
2385ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2386fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2387c8ac3774SHerbert Xu 		unsigned int limit;
2388c8ac3774SHerbert Xu 
238979861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
239079861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
239179861919SEric Dumazet 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
239279861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2393bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
239479861919SEric Dumazet 			goto repair; /* Skip network transmission */
239579861919SEric Dumazet 		}
239679861919SEric Dumazet 
2397218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2398218af599SEric Dumazet 			break;
2399218af599SEric Dumazet 
24005bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2401c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2402c1b4a7e6SDavid S. Miller 
2403b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
24046ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
24056ba8a3b1SNandita Dukkipati 			if (push_one == 2)
24066ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
24076ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
24086ba8a3b1SNandita Dukkipati 			else
2409b68e9f85SHerbert Xu 				break;
24106ba8a3b1SNandita Dukkipati 		}
2411b68e9f85SHerbert Xu 
24125615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
24135615f886SFrancis Yan 			is_rwnd_limited = true;
2414b68e9f85SHerbert Xu 			break;
24155615f886SFrancis Yan 		}
2416b68e9f85SHerbert Xu 
2417d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2418aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2419aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2420aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2421aa93466bSDavid S. Miller 				break;
2422c1b4a7e6SDavid S. Miller 		} else {
2423ca8a2263SNeal Cardwell 			if (!push_one &&
2424605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2425f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2426aa93466bSDavid S. Miller 				break;
2427c1b4a7e6SDavid S. Miller 		}
2428aa93466bSDavid S. Miller 
2429605ad7f1SEric Dumazet 		limit = mss_now;
2430d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2431605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2432605ad7f1SEric Dumazet 						    min_t(unsigned int,
2433605ad7f1SEric Dumazet 							  cwnd_quota,
2434605ad7f1SEric Dumazet 							  max_segs),
2435605ad7f1SEric Dumazet 						    nonagle);
2436605ad7f1SEric Dumazet 
2437605ad7f1SEric Dumazet 		if (skb->len > limit &&
243856483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2439605ad7f1SEric Dumazet 			break;
2440605ad7f1SEric Dumazet 
2441f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
244246d3ceabSEric Dumazet 			break;
2443c9eeec26SEric Dumazet 
24441f85e626SEric Dumazet 		/* Argh, we hit an empty skb(), presumably a thread
24451f85e626SEric Dumazet 		 * is sleeping in sendmsg()/sk_stream_wait_memory().
24461f85e626SEric Dumazet 		 * We do not want to send a pure-ack packet and have
24471f85e626SEric Dumazet 		 * a strange looking rtx queue with empty packet(s).
24481f85e626SEric Dumazet 		 */
24491f85e626SEric Dumazet 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
24501f85e626SEric Dumazet 			break;
24511f85e626SEric Dumazet 
2452d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
24531da177e4SLinus Torvalds 			break;
24541da177e4SLinus Torvalds 
2455ec342325SAndrew Vagin repair:
24561da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
24571da177e4SLinus Torvalds 		 * This call will increment packets_out.
24581da177e4SLinus Torvalds 		 */
245966f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
24601da177e4SLinus Torvalds 
24611da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2462a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2463d5dd9175SIlpo Järvinen 
2464d5dd9175SIlpo Järvinen 		if (push_one)
2465d5dd9175SIlpo Järvinen 			break;
24661da177e4SLinus Torvalds 	}
24671da177e4SLinus Torvalds 
24685615f886SFrancis Yan 	if (is_rwnd_limited)
24695615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
24705615f886SFrancis Yan 	else
24715615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
24725615f886SFrancis Yan 
2473aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2474684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2475684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24766ba8a3b1SNandita Dukkipati 
24776ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24786ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2479ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2480d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2481ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2482a2a385d6SEric Dumazet 		return false;
24831da177e4SLinus Torvalds 	}
248475c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24856ba8a3b1SNandita Dukkipati }
24866ba8a3b1SNandita Dukkipati 
2487ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24886ba8a3b1SNandita Dukkipati {
24896ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24906ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2491a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24922ae21cf5SEric Dumazet 	int early_retrans;
24936ba8a3b1SNandita Dukkipati 
24946ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24956ba8a3b1SNandita Dukkipati 	 * finishes.
24966ba8a3b1SNandita Dukkipati 	 */
2497d983ea6fSEric Dumazet 	if (rcu_access_pointer(tp->fastopen_rsk))
24986ba8a3b1SNandita Dukkipati 		return false;
24996ba8a3b1SNandita Dukkipati 
25002ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
25016ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2502b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
25036ba8a3b1SNandita Dukkipati 	 */
25042ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2505bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2506b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2507b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
25086ba8a3b1SNandita Dukkipati 		return false;
25096ba8a3b1SNandita Dukkipati 
2510bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2511f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2512f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
25136ba8a3b1SNandita Dukkipati 	 */
2514bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2515bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
25166ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2517bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2518bb4d991aSYuchung Cheng 		else
2519bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2520bb4d991aSYuchung Cheng 	} else {
2521bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2522bb4d991aSYuchung Cheng 	}
25236ba8a3b1SNandita Dukkipati 
2524a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2525ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2526ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2527ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2528a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2529a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
25306ba8a3b1SNandita Dukkipati 
25313f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
25323f80e08fSEric Dumazet 			     TCP_RTO_MAX, NULL);
25336ba8a3b1SNandita Dukkipati 	return true;
25346ba8a3b1SNandita Dukkipati }
25356ba8a3b1SNandita Dukkipati 
25361f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
25371f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
25381f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
25391f3279aeSEric Dumazet  */
25401f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
25411f3279aeSEric Dumazet 				    const struct sk_buff *skb)
25421f3279aeSEric Dumazet {
254339bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2544c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
25451f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
25461f3279aeSEric Dumazet 		return true;
25471f3279aeSEric Dumazet 	}
25481f3279aeSEric Dumazet 	return false;
25491f3279aeSEric Dumazet }
25501f3279aeSEric Dumazet 
2551b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
25526ba8a3b1SNandita Dukkipati  * retransmit the last segment.
25536ba8a3b1SNandita Dukkipati  */
25546ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
25556ba8a3b1SNandita Dukkipati {
25569b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
25576ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
25586ba8a3b1SNandita Dukkipati 	int pcount;
25596ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
25606ba8a3b1SNandita Dukkipati 
2561b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
256275c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2563b340b264SYuchung Cheng 		pcount = tp->packets_out;
2564b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2565b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2566b340b264SYuchung Cheng 			goto probe_sent;
25676ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25686ba8a3b1SNandita Dukkipati 	}
256975c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2570b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2571b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2572b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2573b2b7af86SYuchung Cheng 			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2574b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2575b2b7af86SYuchung Cheng 		return;
2576b2b7af86SYuchung Cheng 	}
25776ba8a3b1SNandita Dukkipati 
25789b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
25799b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
25809b717a8dSNandita Dukkipati 		goto rearm_timer;
25819b717a8dSNandita Dukkipati 
25821f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25831f3279aeSEric Dumazet 		goto rearm_timer;
25841f3279aeSEric Dumazet 
25856ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25866ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25876ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25886ba8a3b1SNandita Dukkipati 
25896ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
259075c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
259175c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25926cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25936ba8a3b1SNandita Dukkipati 			goto rearm_timer;
259475c119afSEric Dumazet 		skb = skb_rb_next(skb);
25956ba8a3b1SNandita Dukkipati 	}
25966ba8a3b1SNandita Dukkipati 
25976ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25986ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25996ba8a3b1SNandita Dukkipati 
260010d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2601b340b264SYuchung Cheng 		goto rearm_timer;
26026ba8a3b1SNandita Dukkipati 
26039b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
26049b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
26059b717a8dSNandita Dukkipati 
2606b340b264SYuchung Cheng probe_sent:
2607c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2608fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2609fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2610b340b264SYuchung Cheng rearm_timer:
2611fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
26121da177e4SLinus Torvalds }
26131da177e4SLinus Torvalds 
2614a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2615a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2616a762a980SDavid S. Miller  * The socket must be locked by the caller.
2617a762a980SDavid S. Miller  */
26189e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
26199e412ba7SIlpo Järvinen 			       int nonagle)
2620a762a980SDavid S. Miller {
2621726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2622726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2623726e07a8SIlpo Järvinen 	 * all will be happy.
2624726e07a8SIlpo Järvinen 	 */
2625726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2626726e07a8SIlpo Järvinen 		return;
2627726e07a8SIlpo Järvinen 
262899a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
26297450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
26309e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2631a762a980SDavid S. Miller }
2632a762a980SDavid S. Miller 
2633c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2634c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2635c1b4a7e6SDavid S. Miller  */
2636c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2637c1b4a7e6SDavid S. Miller {
2638fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2639c1b4a7e6SDavid S. Miller 
2640c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2641c1b4a7e6SDavid S. Miller 
2642d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2643c1b4a7e6SDavid S. Miller }
2644c1b4a7e6SDavid S. Miller 
26451da177e4SLinus Torvalds /* This function returns the amount that we can raise the
26461da177e4SLinus Torvalds  * usable window based on the following constraints
26471da177e4SLinus Torvalds  *
26481da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
26491da177e4SLinus Torvalds  * 2. We limit memory per socket
26501da177e4SLinus Torvalds  *
26511da177e4SLinus Torvalds  * RFC 1122:
26521da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
26531da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
26541da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
26551da177e4SLinus Torvalds  *
26561da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
26571da177e4SLinus Torvalds  * it at least MSS bytes.
26581da177e4SLinus Torvalds  *
26591da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
26601da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
26611da177e4SLinus Torvalds  *
26621da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
26631da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
26641da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
26651da177e4SLinus Torvalds  * window to always advance by a single byte.
26661da177e4SLinus Torvalds  *
26671da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
26681da177e4SLinus Torvalds  * then this will not be a problem.
26691da177e4SLinus Torvalds  *
26701da177e4SLinus Torvalds  * BSD seems to make the following compromise:
26711da177e4SLinus Torvalds  *
26721da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
26731da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
26741da177e4SLinus Torvalds  *	then set the window to 0.
26751da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
26761da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
26771da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26781da177e4SLinus Torvalds  *
26791da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26801da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26811da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26821da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26831da177e4SLinus Torvalds  * because the pipeline is full.
26841da177e4SLinus Torvalds  *
26851da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26861da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26871da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26881da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26891da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26901da177e4SLinus Torvalds  *
26911da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26921da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26931da177e4SLinus Torvalds  *
26941da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26951da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26961da177e4SLinus Torvalds  */
26971da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26981da177e4SLinus Torvalds {
2699463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
27001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2701caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
27021da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
27031da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
27041da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
27051da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
27061da177e4SLinus Torvalds 	 */
2707463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
27081da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
270986c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
271086c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
27111da177e4SLinus Torvalds 	int window;
27121da177e4SLinus Torvalds 
271306425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
27141da177e4SLinus Torvalds 		mss = full_space;
271506425c30SEric Dumazet 		if (mss <= 0)
271606425c30SEric Dumazet 			return 0;
271706425c30SEric Dumazet 	}
2718b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2719463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
27201da177e4SLinus Torvalds 
2721b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2722056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2723056834d9SIlpo Järvinen 					       4U * tp->advmss);
27241da177e4SLinus Torvalds 
272586c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
272686c1a045SFlorian Westphal 		 * increase it due to wscale.
272786c1a045SFlorian Westphal 		 */
272886c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
272986c1a045SFlorian Westphal 
273086c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
273186c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
273286c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
273386c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
273486c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
273586c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
273686c1a045SFlorian Westphal 		 */
273786c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
27381da177e4SLinus Torvalds 			return 0;
27391da177e4SLinus Torvalds 	}
27401da177e4SLinus Torvalds 
27411da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
27421da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
27431da177e4SLinus Torvalds 
27441da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
27451da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
27461da177e4SLinus Torvalds 	 */
27471da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
27481da177e4SLinus Torvalds 		window = free_space;
27491da177e4SLinus Torvalds 
27501da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
27511da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
27521da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
27531da177e4SLinus Torvalds 		 */
27541935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
27551da177e4SLinus Torvalds 	} else {
27561935299dSGao Feng 		window = tp->rcv_wnd;
27571da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
27581da177e4SLinus Torvalds 		 * Window clamp already applied above.
27591da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
27601da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
27611da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
27621da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
27631da177e4SLinus Torvalds 		 * is too small.
27641da177e4SLinus Torvalds 		 */
27651da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
27661935299dSGao Feng 			window = rounddown(free_space, mss);
276784565070SJohn Heffner 		else if (mss == full_space &&
2768b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
276984565070SJohn Heffner 			window = free_space;
27701da177e4SLinus Torvalds 	}
27711da177e4SLinus Torvalds 
27721da177e4SLinus Torvalds 	return window;
27731da177e4SLinus Torvalds }
27741da177e4SLinus Torvalds 
2775cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2776082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2777082ac2d5SMartin KaFai Lau {
27780a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27790a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27800a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2781082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2782082ac2d5SMartin KaFai Lau 
27830a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2784082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27852de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27862de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2787082ac2d5SMartin KaFai Lau 	}
2788082ac2d5SMartin KaFai Lau }
2789082ac2d5SMartin KaFai Lau 
27904a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2791f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27921da177e4SLinus Torvalds {
27931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
279475c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
279513dde04fSWei Yongjun 	int next_skb_size;
27961da177e4SLinus Torvalds 
2797058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27981da177e4SLinus Torvalds 
2799058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
28001da177e4SLinus Torvalds 
2801f8071cdeSEric Dumazet 	if (next_skb_size) {
2802f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2803f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2804f8071cdeSEric Dumazet 				      next_skb_size);
28053b4929f6SEric Dumazet 		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
2806f8071cdeSEric Dumazet 			return false;
2807f8071cdeSEric Dumazet 	}
28082b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2809a6963a6bSIlpo Järvinen 
28101da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
28111da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
28121da177e4SLinus Torvalds 
2813e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
28144de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
28151da177e4SLinus Torvalds 
28161da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
28171da177e4SLinus Torvalds 	 * packet counting does not break.
28181da177e4SLinus Torvalds 	 */
28194828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2820a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2821b7689205SIlpo Järvinen 
2822b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2823ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2824ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2825ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2826b7689205SIlpo Järvinen 
2827797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2828797108d1SIlpo Järvinen 
2829082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2830082ac2d5SMartin KaFai Lau 
283175c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2832f8071cdeSEric Dumazet 	return true;
28331da177e4SLinus Torvalds }
28341da177e4SLinus Torvalds 
283567edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2836a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
28374a17fc3aSIlpo Järvinen {
28384a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2839a2a385d6SEric Dumazet 		return false;
28404a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2841a2a385d6SEric Dumazet 		return false;
28422331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
28434a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2844a2a385d6SEric Dumazet 		return false;
28454a17fc3aSIlpo Järvinen 
2846a2a385d6SEric Dumazet 	return true;
28474a17fc3aSIlpo Järvinen }
28484a17fc3aSIlpo Järvinen 
284967edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
285067edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
285167edfef7SAndi Kleen  */
28524a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
28534a17fc3aSIlpo Järvinen 				     int space)
28544a17fc3aSIlpo Järvinen {
28554a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
28564a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2857a2a385d6SEric Dumazet 	bool first = true;
28584a17fc3aSIlpo Järvinen 
2859e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
28604a17fc3aSIlpo Järvinen 		return;
28614de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
28624a17fc3aSIlpo Järvinen 		return;
28634a17fc3aSIlpo Järvinen 
286475c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
28654a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
28664a17fc3aSIlpo Järvinen 			break;
28674a17fc3aSIlpo Järvinen 
2868a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2869a643b5d4SMartin KaFai Lau 			break;
2870a643b5d4SMartin KaFai Lau 
28714a17fc3aSIlpo Järvinen 		space -= skb->len;
28724a17fc3aSIlpo Järvinen 
28734a17fc3aSIlpo Järvinen 		if (first) {
2874a2a385d6SEric Dumazet 			first = false;
28754a17fc3aSIlpo Järvinen 			continue;
28764a17fc3aSIlpo Järvinen 		}
28774a17fc3aSIlpo Järvinen 
28784a17fc3aSIlpo Järvinen 		if (space < 0)
28794a17fc3aSIlpo Järvinen 			break;
28804a17fc3aSIlpo Järvinen 
28814a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28824a17fc3aSIlpo Järvinen 			break;
28834a17fc3aSIlpo Järvinen 
2884f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2885f8071cdeSEric Dumazet 			break;
28864a17fc3aSIlpo Järvinen 	}
28874a17fc3aSIlpo Järvinen }
28884a17fc3aSIlpo Järvinen 
28891da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28901da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28911da177e4SLinus Torvalds  * error occurred which prevented the send.
28921da177e4SLinus Torvalds  */
289310d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28941da177e4SLinus Torvalds {
28955d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
289610d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28977d227cd2SSridhar Samudrala 	unsigned int cur_mss;
289810d3be56SEric Dumazet 	int diff, len, err;
28991da177e4SLinus Torvalds 
290010d3be56SEric Dumazet 
290110d3be56SEric Dumazet 	/* Inconclusive MTU probe */
290210d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
29035d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
29045d424d5aSJohn Heffner 
29051da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2906caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
29071da177e4SLinus Torvalds 	 */
290814afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2909ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2910ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
29111da177e4SLinus Torvalds 		return -EAGAIN;
29121da177e4SLinus Torvalds 
29131f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
29141f3279aeSEric Dumazet 		return -EBUSY;
29151f3279aeSEric Dumazet 
29161da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
29177f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
29187f582b24SEric Dumazet 			WARN_ON_ONCE(1);
29197f582b24SEric Dumazet 			return -EINVAL;
29207f582b24SEric Dumazet 		}
29211da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
29221da177e4SLinus Torvalds 			return -ENOMEM;
29231da177e4SLinus Torvalds 	}
29241da177e4SLinus Torvalds 
29257d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
29267d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
29277d227cd2SSridhar Samudrala 
29280c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
29297d227cd2SSridhar Samudrala 
29301da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
29311da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
29321da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
29331da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
29341da177e4SLinus Torvalds 	 */
29359d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
29369d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
29371da177e4SLinus Torvalds 		return -EAGAIN;
29381da177e4SLinus Torvalds 
293910d3be56SEric Dumazet 	len = cur_mss * segs;
294010d3be56SEric Dumazet 	if (skb->len > len) {
294175c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
294275c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
29431da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
294402276f3cSIlpo Järvinen 	} else {
2945c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2946c52e2421SEric Dumazet 			return -ENOMEM;
294710d3be56SEric Dumazet 
294810d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
294910d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
295010d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
295110d3be56SEric Dumazet 		if (diff)
295210d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
295310d3be56SEric Dumazet 		if (skb->len < cur_mss)
295410d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
29551da177e4SLinus Torvalds 	}
29561da177e4SLinus Torvalds 
295749213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
295849213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
295949213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
296049213555SDaniel Borkmann 
2961678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2962678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2963678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2964678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2965678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2966678550c6SYuchung Cheng 	tp->total_retrans += segs;
2967fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2968678550c6SYuchung Cheng 
296950bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
297050bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
297150bceae9SThomas Graf 	 * beyond what csum_start can cover.
297250bceae9SThomas Graf 	 */
297350bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
297450bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
297510a81980SEric Dumazet 		struct sk_buff *nskb;
297610a81980SEric Dumazet 
2977e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
297810a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2979c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2980117632e6SEric Dumazet 				     -ENOBUFS;
2981e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2982e2080072SEric Dumazet 
29835889e2c0SYousuk Seung 		if (!err) {
2984a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
29855889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29865889e2c0SYousuk Seung 		}
2987117632e6SEric Dumazet 	} else {
2988c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2989117632e6SEric Dumazet 	}
2990c84a5711SYuchung Cheng 
29917f12422cSYuchung Cheng 	/* To avoid taking spuriously low RTT samples based on a timestamp
29927f12422cSYuchung Cheng 	 * for a transmit that never happened, always mark EVER_RETRANS
29937f12422cSYuchung Cheng 	 */
29947f12422cSYuchung Cheng 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
29957f12422cSYuchung Cheng 
2996a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2997a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2998a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2999a31ad29eSLawrence Brakmo 
3000fc9f3501SEric Dumazet 	if (likely(!err)) {
3001e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
3002678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
3003ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3004fc9f3501SEric Dumazet 	}
3005c84a5711SYuchung Cheng 	return err;
300693b174adSYuchung Cheng }
300793b174adSYuchung Cheng 
300810d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
300993b174adSYuchung Cheng {
301093b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
301110d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds 	if (err == 0) {
30141da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
30151da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3016e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
30171da177e4SLinus Torvalds 		}
30181da177e4SLinus Torvalds #endif
30191da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
30201da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
30217ae18975SYuchung Cheng 	}
30221da177e4SLinus Torvalds 
30237ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
30241da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
30257faee5c0SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp(skb);
30261da177e4SLinus Torvalds 
30276e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
30286e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
30296e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
30301da177e4SLinus Torvalds 	return err;
30311da177e4SLinus Torvalds }
30321da177e4SLinus Torvalds 
30331da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
30341da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
30351da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
30361da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
30371da177e4SLinus Torvalds  */
30381da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
30391da177e4SLinus Torvalds {
30406687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3041b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
30421da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3043840a3cbeSYuchung Cheng 	u32 max_segs;
304461eb55f4SIlpo Järvinen 	int mib_idx;
30456a438bbeSStephen Hemminger 
304645e77d31SIlpo Järvinen 	if (!tp->packets_out)
304745e77d31SIlpo Järvinen 		return;
304845e77d31SIlpo Järvinen 
304975c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3050b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3051ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
305275c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3053dca0aaf8SEric Dumazet 		__u8 sacked;
305410d3be56SEric Dumazet 		int segs;
30551da177e4SLinus Torvalds 
3056218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3057218af599SEric Dumazet 			break;
3058218af599SEric Dumazet 
30596a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
306051456b29SIan Morris 		if (!hole)
30616a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
30626a438bbeSStephen Hemminger 
306310d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
306410d3be56SEric Dumazet 		if (segs <= 0)
30651da177e4SLinus Torvalds 			return;
3066dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3067a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3068a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3069a3d2e9f8SEric Dumazet 		 */
3070a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
30710e1c54c2SIlpo Järvinen 
3072840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3073006f582cSIlpo Järvinen 			break;
30740e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
307551456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
30760e1c54c2SIlpo Järvinen 				hole = skb;
307761eb55f4SIlpo Järvinen 			continue;
30781da177e4SLinus Torvalds 
30790e1c54c2SIlpo Järvinen 		} else {
30800e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30810e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30820e1c54c2SIlpo Järvinen 			else
30830e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30840e1c54c2SIlpo Järvinen 		}
30850e1c54c2SIlpo Järvinen 
30860e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
308761eb55f4SIlpo Järvinen 			continue;
308840b215e5SPavel Emelyanov 
3089f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3090f9616c35SEric Dumazet 			return;
3091f9616c35SEric Dumazet 
309210d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30931da177e4SLinus Torvalds 			return;
309424ab6becSYuchung Cheng 
3095de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30961da177e4SLinus Torvalds 
3097684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3098a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3099a262f0cdSNandita Dukkipati 
310075c119afSEric Dumazet 		if (skb == rtx_head &&
310157dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
31023f80e08fSEric Dumazet 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
31033f421baaSArnaldo Carvalho de Melo 					     inet_csk(sk)->icsk_rto,
31043f80e08fSEric Dumazet 					     TCP_RTO_MAX,
31053f80e08fSEric Dumazet 					     skb);
31061da177e4SLinus Torvalds 	}
31071da177e4SLinus Torvalds }
31081da177e4SLinus Torvalds 
3109d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3110d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3111845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3112845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3113a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3114a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3115d83769a5SEric Dumazet  */
3116a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3117d83769a5SEric Dumazet {
3118e805605cSJohannes Weiner 	int amt;
3119d83769a5SEric Dumazet 
3120d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3121d83769a5SEric Dumazet 		return;
3122d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3123d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3124e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3125e805605cSJohannes Weiner 
3126baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3127baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3128d83769a5SEric Dumazet }
3129d83769a5SEric Dumazet 
3130845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3131845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
31321da177e4SLinus Torvalds  */
31331da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
31341da177e4SLinus Torvalds {
3135ee2aabd3SEric Dumazet 	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
31361da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31371da177e4SLinus Torvalds 
3138845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3139845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3140845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3141845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
31421da177e4SLinus Torvalds 	 */
3143ee2aabd3SEric Dumazet 	tskb = tail;
314475c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
314575c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
314675c119afSEric Dumazet 
314775c119afSEric Dumazet 	if (tskb) {
3148845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3149845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
31501da177e4SLinus Torvalds 		tp->write_seq++;
3151ee2aabd3SEric Dumazet 		if (!tail) {
3152845704a5SEric Dumazet 			/* This means tskb was already sent.
3153845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3154845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3155845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3156845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3157845704a5SEric Dumazet 			 */
3158e0d694d6SEric Dumazet 			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3159845704a5SEric Dumazet 			return;
3160845704a5SEric Dumazet 		}
31611da177e4SLinus Torvalds 	} else {
3162845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3163d1edc085SColin Ian King 		if (unlikely(!skb))
3164845704a5SEric Dumazet 			return;
3165d1edc085SColin Ian King 
3166e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3167d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3168a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
31691da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3170e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3171a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
31721da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
31731da177e4SLinus Torvalds 	}
3174845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
31751da177e4SLinus Torvalds }
31761da177e4SLinus Torvalds 
31771da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
31781da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
31791da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
318065bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31811da177e4SLinus Torvalds  */
3182dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31831da177e4SLinus Torvalds {
31841da177e4SLinus Torvalds 	struct sk_buff *skb;
31851da177e4SLinus Torvalds 
31867cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31877cc2b043SGao Feng 
31881da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31891da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31901da177e4SLinus Torvalds 	if (!skb) {
31914e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31921da177e4SLinus Torvalds 		return;
31931da177e4SLinus Torvalds 	}
31941da177e4SLinus Torvalds 
31951da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31961da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3197e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3198a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31999a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
32001da177e4SLinus Torvalds 	/* Send it off. */
3201dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
32024e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3203c24b14c4SSong Liu 
3204c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3205c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3206c24b14c4SSong Liu 	 */
3207c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
32081da177e4SLinus Torvalds }
32091da177e4SLinus Torvalds 
321067edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
321167edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
32121da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
32131da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
32141da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
32151da177e4SLinus Torvalds  */
32161da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
32171da177e4SLinus Torvalds {
32181da177e4SLinus Torvalds 	struct sk_buff *skb;
32191da177e4SLinus Torvalds 
322075c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
322151456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
322275c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
32231da177e4SLinus Torvalds 		return -EFAULT;
32241da177e4SLinus Torvalds 	}
32254de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
32261da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3227e2080072SEric Dumazet 			struct sk_buff *nskb;
3228e2080072SEric Dumazet 
3229e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3230e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3231e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
323251456b29SIan Morris 			if (!nskb)
32331da177e4SLinus Torvalds 				return -ENOMEM;
3234e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3235*2bec445fSEric Dumazet 			tcp_highest_sack_replace(sk, skb, nskb);
323675c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3237f4a775d1SEric Dumazet 			__skb_header_release(nskb);
323875c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3239ab4e846aSEric Dumazet 			sk_wmem_queued_add(sk, nskb->truesize);
32403ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
32411da177e4SLinus Torvalds 			skb = nskb;
32421da177e4SLinus Torvalds 		}
32431da177e4SLinus Torvalds 
32444de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3245735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
32461da177e4SLinus Torvalds 	}
3247dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
32481da177e4SLinus Torvalds }
32491da177e4SLinus Torvalds 
32504aea39c1SEric Dumazet /**
32514aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
32524aea39c1SEric Dumazet  * sk: listener socket
32534aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
32544aea39c1SEric Dumazet  * req: request_sock pointer
32554aea39c1SEric Dumazet  *
32564aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
32574aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
32584aea39c1SEric Dumazet  */
32595d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3260e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3261ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3262b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
32631da177e4SLinus Torvalds {
32642e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
32655d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
326680f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
32675d062de7SEric Dumazet 	struct tcp_out_options opts;
32685d062de7SEric Dumazet 	struct sk_buff *skb;
3269bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
32705d062de7SEric Dumazet 	struct tcphdr *th;
3271f5fff5dcSTom Quetchenbach 	int mss;
3272a842fe14SEric Dumazet 	u64 now;
32731da177e4SLinus Torvalds 
3274ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
32754aea39c1SEric Dumazet 	if (unlikely(!skb)) {
32764aea39c1SEric Dumazet 		dst_release(dst);
32771da177e4SLinus Torvalds 		return NULL;
32784aea39c1SEric Dumazet 	}
32791da177e4SLinus Torvalds 	/* Reserve space for headers. */
32801da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32811da177e4SLinus Torvalds 
3282b3d05147SEric Dumazet 	switch (synack_type) {
3283b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32849e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3285b3d05147SEric Dumazet 		break;
3286b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3287b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3288b3d05147SEric Dumazet 		 * to avoid false sharing.
3289b3d05147SEric Dumazet 		 */
3290b3d05147SEric Dumazet 		break;
3291b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3292ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3293ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3294ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3295ca6fb065SEric Dumazet 		 */
3296ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3297b3d05147SEric Dumazet 		break;
3298ca6fb065SEric Dumazet 	}
32994aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
33001da177e4SLinus Torvalds 
33013541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3302f5fff5dcSTom Quetchenbach 
330333ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3304a842fe14SEric Dumazet 	now = tcp_clock_ns();
33058b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
33068b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
3307200ecef6SEric Dumazet 		skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
33088b5f12d0SFlorian Westphal 	else
33098b5f12d0SFlorian Westphal #endif
33109e450c1eSYuchung Cheng 	{
3311a842fe14SEric Dumazet 		skb->skb_mstamp_ns = now;
33129e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
33139e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
33149e450c1eSYuchung Cheng 	}
331580f03e27SEric Dumazet 
331680f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
331780f03e27SEric Dumazet 	rcu_read_lock();
3318fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
331980f03e27SEric Dumazet #endif
332058d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
332160e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
332260e2a778SUrsula Braun 					     foc) + sizeof(*th);
332333ad798cSAdam Langley 
3324aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3325aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
33261da177e4SLinus Torvalds 
3327ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
33281da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
33291da177e4SLinus Torvalds 	th->syn = 1;
33301da177e4SLinus Torvalds 	th->ack = 1;
33316ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3332b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3333634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3334e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
33353b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
33363b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
33378336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
33388336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
33391da177e4SLinus Torvalds 
33401da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3341ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
33425d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
33431da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
334490bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3345cfb6eeb4SYOSHIFUJI Hideaki 
3346cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3347cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
334880f03e27SEric Dumazet 	if (md5)
3349bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
335039f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
335180f03e27SEric Dumazet 	rcu_read_unlock();
3352cfb6eeb4SYOSHIFUJI Hideaki #endif
3353cfb6eeb4SYOSHIFUJI Hideaki 
3354a842fe14SEric Dumazet 	skb->skb_mstamp_ns = now;
3355a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3356a842fe14SEric Dumazet 
33571da177e4SLinus Torvalds 	return skb;
33581da177e4SLinus Torvalds }
33594bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
33601da177e4SLinus Torvalds 
336181164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
336281164413SDaniel Borkmann {
336381164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
336481164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
336581164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
336681164413SDaniel Borkmann 
336781164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
336881164413SDaniel Borkmann 		return;
336981164413SDaniel Borkmann 
337081164413SDaniel Borkmann 	rcu_read_lock();
337181164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
337281164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
337381164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
337481164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
337581164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
337681164413SDaniel Borkmann 	}
337781164413SDaniel Borkmann 	rcu_read_unlock();
337881164413SDaniel Borkmann }
337981164413SDaniel Borkmann 
338067edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3381f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
33821da177e4SLinus Torvalds {
3383cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
33841da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33851da177e4SLinus Torvalds 	__u8 rcv_wscale;
338613d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
33871da177e4SLinus Torvalds 
33881da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33891da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33901da177e4SLinus Torvalds 	 */
33915d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33925d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33935d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33941da177e4SLinus Torvalds 
3395cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
339600db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3397cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3398cfb6eeb4SYOSHIFUJI Hideaki #endif
3399cfb6eeb4SYOSHIFUJI Hideaki 
34001da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
34011da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
34021da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
34031da177e4SLinus Torvalds 	tp->max_window = 0;
34045d424d5aSJohn Heffner 	tcp_mtup_init(sk);
34051da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
34061da177e4SLinus Torvalds 
340781164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
340881164413SDaniel Borkmann 
34091da177e4SLinus Torvalds 	if (!tp->window_clamp)
34101da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
34113541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3412f5fff5dcSTom Quetchenbach 
34131da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
34141da177e4SLinus Torvalds 
3415e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3416e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3417e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3418e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3419e88c64f0SHagen Paul Pfeifer 
342013d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
342113d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
342213d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
342313d3b1ebSLawrence Brakmo 
3424ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
34251da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
34261da177e4SLinus Torvalds 				  &tp->rcv_wnd,
34271da177e4SLinus Torvalds 				  &tp->window_clamp,
34289bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
342931d12926Slaurent chavey 				  &rcv_wscale,
343013d3b1ebSLawrence Brakmo 				  rcv_wnd);
34311da177e4SLinus Torvalds 
34321da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
34331da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
34341da177e4SLinus Torvalds 
34351da177e4SLinus Torvalds 	sk->sk_err = 0;
34361da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
34371da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3438ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
34397f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
34401da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
34411da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
344233f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3443e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3444ee995283SPavel Emelyanov 
3445ee995283SPavel Emelyanov 	if (likely(!tp->repair))
34461da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3447c7781a6eSAndrew Vagin 	else
344870eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3449ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
34507db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
34511da177e4SLinus Torvalds 
34528550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3453463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
34541da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
34551da177e4SLinus Torvalds }
34561da177e4SLinus Torvalds 
3457783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3458783237e8SYuchung Cheng {
3459783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3460783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3461783237e8SYuchung Cheng 
3462783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3463f4a775d1SEric Dumazet 	__skb_header_release(skb);
3464ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
3465783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
34660f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3467783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3468783237e8SYuchung Cheng }
3469783237e8SYuchung Cheng 
3470783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3471783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3472783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3473783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3474783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3475783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3476783237e8SYuchung Cheng  */
3477783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3478783237e8SYuchung Cheng {
3479783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3480783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3481065263f4SWei Wang 	int space, err = 0;
3482355a901eSEric Dumazet 	struct sk_buff *syn_data;
3483783237e8SYuchung Cheng 
348467da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3485065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3486783237e8SYuchung Cheng 		goto fallback;
3487783237e8SYuchung Cheng 
3488783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3489783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3490783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3491783237e8SYuchung Cheng 	 */
34923541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34933541f9e8SEric Dumazet 
34941b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3495783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3496783237e8SYuchung Cheng 
3497f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3498f5ddcbbbSEric Dumazet 
3499f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3500f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3501f5ddcbbbSEric Dumazet 
3502eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3503355a901eSEric Dumazet 	if (!syn_data)
3504783237e8SYuchung Cheng 		goto fallback;
3505355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3506355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
350707e100f9SEric Dumazet 	if (space) {
350807e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
350957be5bdaSAl Viro 					    &fo->data->msg_iter);
351057be5bdaSAl Viro 		if (unlikely(!copied)) {
3511ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3512355a901eSEric Dumazet 			kfree_skb(syn_data);
3513783237e8SYuchung Cheng 			goto fallback;
3514783237e8SYuchung Cheng 		}
351557be5bdaSAl Viro 		if (copied != space) {
351657be5bdaSAl Viro 			skb_trim(syn_data, copied);
351757be5bdaSAl Viro 			space = copied;
351857be5bdaSAl Viro 		}
3519f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
352007e100f9SEric Dumazet 	}
3521355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3522355a901eSEric Dumazet 	if (space == fo->size)
3523355a901eSEric Dumazet 		fo->data = NULL;
3524355a901eSEric Dumazet 	fo->copied = space;
3525783237e8SYuchung Cheng 
3526355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
35270f87230dSFrancis Yan 	if (syn_data->len)
35280f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3529355a901eSEric Dumazet 
3530355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3531355a901eSEric Dumazet 
3532d3edd06eSEric Dumazet 	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3533355a901eSEric Dumazet 
3534355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3535355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3536355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3537355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3538431a9124SEric Dumazet 	 */
3539355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3540355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3541355a901eSEric Dumazet 	if (!err) {
354267da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
354375c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3544f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3545783237e8SYuchung Cheng 		goto done;
3546783237e8SYuchung Cheng 	}
3547783237e8SYuchung Cheng 
354875c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
354975c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3550b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3551b5b7db8dSEric Dumazet 
3552783237e8SYuchung Cheng fallback:
3553783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3554783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3555783237e8SYuchung Cheng 		fo->cookie.len = 0;
3556783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3557783237e8SYuchung Cheng 	if (err)
3558783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3559783237e8SYuchung Cheng done:
3560783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3561783237e8SYuchung Cheng 	return err;
3562783237e8SYuchung Cheng }
3563783237e8SYuchung Cheng 
356467edfef7SAndi Kleen /* Build a SYN and send it off. */
35651da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
35661da177e4SLinus Torvalds {
35671da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35681da177e4SLinus Torvalds 	struct sk_buff *buff;
3569ee586811SEric Paris 	int err;
35701da177e4SLinus Torvalds 
3571de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
35728ba60924SEric Dumazet 
35738ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
35748ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
35758ba60924SEric Dumazet 
35761da177e4SLinus Torvalds 	tcp_connect_init(sk);
35771da177e4SLinus Torvalds 
35782b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
35792b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
35802b916477SAndrey Vagin 		return 0;
35812b916477SAndrey Vagin 	}
35822b916477SAndrey Vagin 
3583eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3584355a901eSEric Dumazet 	if (unlikely(!buff))
35851da177e4SLinus Torvalds 		return -ENOBUFS;
35861da177e4SLinus Torvalds 
3587a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
35889a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35899a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3590783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3591735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
359275c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35931da177e4SLinus Torvalds 
3594783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3595783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3596783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3597ee586811SEric Paris 	if (err == -ECONNREFUSED)
3598ee586811SEric Paris 		return err;
3599bd37a088SWei Yongjun 
3600bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3601bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3602bd37a088SWei Yongjun 	 */
3603e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3604bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3605b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3606b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3607e0d694d6SEric Dumazet 		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3608b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3609b5b7db8dSEric Dumazet 	}
361081cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
36111da177e4SLinus Torvalds 
36121da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
36133f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
36143f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
36151da177e4SLinus Torvalds 	return 0;
36161da177e4SLinus Torvalds }
36174bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
36181da177e4SLinus Torvalds 
36191da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
36201da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
36211da177e4SLinus Torvalds  * for details.
36221da177e4SLinus Torvalds  */
36231da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
36241da177e4SLinus Torvalds {
3625463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3626463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
36271da177e4SLinus Torvalds 	unsigned long timeout;
36281da177e4SLinus Torvalds 
36291da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3630463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
36311da177e4SLinus Torvalds 		int max_ato = HZ / 2;
36321da177e4SLinus Torvalds 
363331954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
3634056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
36351da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
36361da177e4SLinus Torvalds 
36371da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
36381da177e4SLinus Torvalds 
36391da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3640463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
36411da177e4SLinus Torvalds 		 * directly.
36421da177e4SLinus Torvalds 		 */
3643740b0f18SEric Dumazet 		if (tp->srtt_us) {
3644740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3645740b0f18SEric Dumazet 					TCP_DELACK_MIN);
36461da177e4SLinus Torvalds 
36471da177e4SLinus Torvalds 			if (rtt < max_ato)
36481da177e4SLinus Torvalds 				max_ato = rtt;
36491da177e4SLinus Torvalds 		}
36501da177e4SLinus Torvalds 
36511da177e4SLinus Torvalds 		ato = min(ato, max_ato);
36521da177e4SLinus Torvalds 	}
36531da177e4SLinus Torvalds 
36541da177e4SLinus Torvalds 	/* Stay within the limit we were given */
36551da177e4SLinus Torvalds 	timeout = jiffies + ato;
36561da177e4SLinus Torvalds 
36571da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3658463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
36591da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
36601da177e4SLinus Torvalds 		 * send ACK now.
36611da177e4SLinus Torvalds 		 */
3662463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3663463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
36641da177e4SLinus Torvalds 			tcp_send_ack(sk);
36651da177e4SLinus Torvalds 			return;
36661da177e4SLinus Torvalds 		}
36671da177e4SLinus Torvalds 
3668463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3669463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
36701da177e4SLinus Torvalds 	}
3671463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3672463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3673463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
36741da177e4SLinus Torvalds }
36751da177e4SLinus Torvalds 
36761da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
36772987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
36781da177e4SLinus Torvalds {
36791da177e4SLinus Torvalds 	struct sk_buff *buff;
36801da177e4SLinus Torvalds 
3681058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3682058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3683058dc334SIlpo Järvinen 		return;
3684058dc334SIlpo Järvinen 
36851da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
36861da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
36871da177e4SLinus Torvalds 	 * sock.
36881da177e4SLinus Torvalds 	 */
36897450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36907450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36917450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3692463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3693463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36943f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36953f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36961da177e4SLinus Torvalds 		return;
36971da177e4SLinus Torvalds 	}
36981da177e4SLinus Torvalds 
36991da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
37001da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3701a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
37021da177e4SLinus Torvalds 
370398781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
370498781965SEric Dumazet 	 * too much.
370598781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
370698781965SEric Dumazet 	 */
370798781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
370898781965SEric Dumazet 
37091da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
37102987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
37111da177e4SLinus Torvalds }
371227cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
37132987babbSYuchung Cheng 
37142987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
37152987babbSYuchung Cheng {
37162987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
37171da177e4SLinus Torvalds }
37181da177e4SLinus Torvalds 
37191da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
37201da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
37211da177e4SLinus Torvalds  *
37221da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
37231da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
37241da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
37251da177e4SLinus Torvalds  *
37261da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
37271da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
37281da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
37291da177e4SLinus Torvalds  */
3730e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
37311da177e4SLinus Torvalds {
37321da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37331da177e4SLinus Torvalds 	struct sk_buff *skb;
37341da177e4SLinus Torvalds 
37351da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
37367450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
37377450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
373851456b29SIan Morris 	if (!skb)
37391da177e4SLinus Torvalds 		return -1;
37401da177e4SLinus Torvalds 
37411da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
37421da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
37431da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
37441da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
37451da177e4SLinus Torvalds 	 * send it.
37461da177e4SLinus Torvalds 	 */
3747a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3748e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
37497450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
37501da177e4SLinus Torvalds }
37511da177e4SLinus Torvalds 
3752385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3753ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3754ee995283SPavel Emelyanov {
3755ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3756ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
37579a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3758e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3759ee995283SPavel Emelyanov 	}
3760ee995283SPavel Emelyanov }
3761ee995283SPavel Emelyanov 
376267edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3763e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
37641da177e4SLinus Torvalds {
37651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37661da177e4SLinus Torvalds 	struct sk_buff *skb;
37671da177e4SLinus Torvalds 
3768058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3769058dc334SIlpo Järvinen 		return -1;
3770058dc334SIlpo Järvinen 
377100db4124SIan Morris 	skb = tcp_send_head(sk);
377200db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
37731da177e4SLinus Torvalds 		int err;
37740c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
377590840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
37761da177e4SLinus Torvalds 
37771da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
37781da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
37791da177e4SLinus Torvalds 
37801da177e4SLinus Torvalds 		/* We are probing the opening of a window
37811da177e4SLinus Torvalds 		 * but the window size is != 0
37821da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
37831da177e4SLinus Torvalds 		 */
37841da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
37851da177e4SLinus Torvalds 		    skb->len > mss) {
37861da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
37874de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
378875c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
378975c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37901da177e4SLinus Torvalds 				return -1;
37911da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37925bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37931da177e4SLinus Torvalds 
37944de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3795dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
379666f5fe62SIlpo Järvinen 		if (!err)
379766f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37981da177e4SLinus Torvalds 		return err;
37991da177e4SLinus Torvalds 	} else {
380033f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3801e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3802e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
38031da177e4SLinus Torvalds 	}
38041da177e4SLinus Torvalds }
38051da177e4SLinus Torvalds 
38061da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
38071da177e4SLinus Torvalds  * a partial packet else a zero probe.
38081da177e4SLinus Torvalds  */
38091da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
38101da177e4SLinus Torvalds {
3811463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
38121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3813c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3814c1d5674fSYuchung Cheng 	unsigned long timeout;
38151da177e4SLinus Torvalds 	int err;
38161da177e4SLinus Torvalds 
3817e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
38181da177e4SLinus Torvalds 
381975c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
38201da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
38216687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3822463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
38231da177e4SLinus Torvalds 		return;
38241da177e4SLinus Torvalds 	}
38251da177e4SLinus Torvalds 
3826c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
38271da177e4SLinus Torvalds 	if (err <= 0) {
3828c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3829463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
3830c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
38311da177e4SLinus Torvalds 	} else {
38321da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
3833c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
38341da177e4SLinus Torvalds 		 */
3835c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
38361da177e4SLinus Torvalds 	}
3837c1d5674fSYuchung Cheng 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
38381da177e4SLinus Torvalds }
38395db92c99SOctavian Purdila 
3840ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
38415db92c99SOctavian Purdila {
38425db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
38435db92c99SOctavian Purdila 	struct flowi fl;
38445db92c99SOctavian Purdila 	int res;
38455db92c99SOctavian Purdila 
384658d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3847b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
38485db92c99SOctavian Purdila 	if (!res) {
384990bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
385002a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
38517e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
38527e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3853cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
38545db92c99SOctavian Purdila 	}
38555db92c99SOctavian Purdila 	return res;
38565db92c99SOctavian Purdila }
38575db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3858