xref: /linux/net/ipv4/tcp_output.c (revision 1f85e6267caca44b30c54711652b0726fadbb131)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds #include <linux/compiler.h>
435a0e3ad6STejun Heo #include <linux/gfp.h>
441da177e4SLinus Torvalds #include <linux/module.h>
4560e2a778SUrsula Braun #include <linux/static_key.h>
461da177e4SLinus Torvalds 
47e086101bSCong Wang #include <trace/events/tcp.h>
4835089bb2SDavid S. Miller 
499799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
509799ccb0SEric Dumazet  * ensuring monotically increasing values.
519799ccb0SEric Dumazet  */
529799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
539799ccb0SEric Dumazet {
549799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
559799ccb0SEric Dumazet 
565f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
57e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
589799ccb0SEric Dumazet }
599799ccb0SEric Dumazet 
6046d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6146d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
62519855c5SWilliam Allen Simpson 
6367edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6475c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
656ff03ac3SIlpo Järvinen {
666ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
676ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6866f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
699e412ba7SIlpo Järvinen 
70e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
718512430eSIlpo Järvinen 
7275c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7375c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7475c119afSEric Dumazet 
7566f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
76bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
77750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
78f19c29e3SYuchung Cheng 
79f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
80f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
816a5dc9e5SEric Dumazet }
821da177e4SLinus Torvalds 
83a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
84a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
851da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
861da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
871da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
881da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
891da177e4SLinus Torvalds  */
90cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
911da177e4SLinus Torvalds {
92cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
939e412ba7SIlpo Järvinen 
94a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
95a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
96a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
971da177e4SLinus Torvalds 		return tp->snd_nxt;
981da177e4SLinus Torvalds 	else
9990840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1031da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1041da177e4SLinus Torvalds  *
1051da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1061da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1071da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1081da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1091da177e4SLinus Torvalds  *    large MSS.
1101da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1111da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1121da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1131da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1141da177e4SLinus Torvalds  *    probably even Jumbo".
1151da177e4SLinus Torvalds  */
1161da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1171da177e4SLinus Torvalds {
1181da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
119cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1201da177e4SLinus Torvalds 	int mss = tp->advmss;
1211da177e4SLinus Torvalds 
1220dbaee3bSDavid S. Miller 	if (dst) {
1230dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1240dbaee3bSDavid S. Miller 
1250dbaee3bSDavid S. Miller 		if (metric < mss) {
1260dbaee3bSDavid S. Miller 			mss = metric;
1271da177e4SLinus Torvalds 			tp->advmss = mss;
1281da177e4SLinus Torvalds 		}
1290dbaee3bSDavid S. Miller 	}
1301da177e4SLinus Torvalds 
1311da177e4SLinus Torvalds 	return (__u16)mss;
1321da177e4SLinus Torvalds }
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1356f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1366f021c62SEric Dumazet  */
1376f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1381da177e4SLinus Torvalds {
139463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1406f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1411da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1421da177e4SLinus Torvalds 
1436687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1441da177e4SLinus Torvalds 
1456687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1461da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1471da177e4SLinus Torvalds 
148463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1491da177e4SLinus Torvalds 		cwnd >>= 1;
1501da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
151c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1521da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1531da177e4SLinus Torvalds }
1541da177e4SLinus Torvalds 
15567edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15640efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
157cf533ea5SEric Dumazet 				struct sock *sk)
1581da177e4SLinus Torvalds {
159463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
160d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1611da177e4SLinus Torvalds 
16205c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16305c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16405c5a46dSNeal Cardwell 
1654a41f453SWei Wang 	/* If this is the first data packet sent in response to the
1664a41f453SWei Wang 	 * previous received data,
1674a41f453SWei Wang 	 * and it is a reply for ato after last received packet,
1684a41f453SWei Wang 	 * increase pingpong count.
1691da177e4SLinus Torvalds 	 */
1704a41f453SWei Wang 	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
1714a41f453SWei Wang 	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
1724a41f453SWei Wang 		inet_csk_inc_pingpong_cnt(sk);
1734a41f453SWei Wang 
1744a41f453SWei Wang 	tp->lsndtime = now;
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
17767edfef7SAndi Kleen /* Account for an ACK we sent. */
17827cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
17927cde44aSYuchung Cheng 				      u32 rcv_nxt)
1801da177e4SLinus Torvalds {
1815d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1825d9f4262SEric Dumazet 
18386de5921SEric Dumazet 	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
18586de5921SEric Dumazet 			      tp->compressed_ack - TCP_FASTRETRANS_THRESH);
18686de5921SEric Dumazet 		tp->compressed_ack = TCP_FASTRETRANS_THRESH;
1875d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1885d9f4262SEric Dumazet 			__sock_put(sk);
1895d9f4262SEric Dumazet 	}
19027cde44aSYuchung Cheng 
19127cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19227cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
193463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
194463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1981da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1991da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2001da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2011da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2021da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2031da177e4SLinus Torvalds  */
204ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2051da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2121da177e4SLinus Torvalds 	if (*window_clamp == 0)
213589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2141da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2171da177e4SLinus Torvalds 	if (space > mss)
218589c49cbSGao Feng 		space = rounddown(space, mss);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2271da177e4SLinus Torvalds 	 */
228ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2291da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23015d99e02SRick Jones 	else
231a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
232a337531bSYuchung Cheng 
233a337531bSYuchung Cheng 	if (init_rcv_wnd)
234a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23515d99e02SRick Jones 
23619bf6261SEric Dumazet 	*rcv_wscale = 0;
2371da177e4SLinus Torvalds 	if (wscale_ok) {
238589c49cbSGao Feng 		/* Set window scaling on max possible window */
239356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
240f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
241316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24219bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24319bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
246589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2471da177e4SLinus Torvalds }
2484bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2511da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2521da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2531da177e4SLinus Torvalds  * frame.
2541da177e4SLinus Torvalds  */
25540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2588e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2591da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2601da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* Never shrink the offered window */
2631da177e4SLinus Torvalds 	if (new_win < cur_win) {
2641da177e4SLinus Torvalds 		/* Danger Will Robinson!
2651da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2661da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2671da177e4SLinus Torvalds 		 * window in time.  --DaveM
2681da177e4SLinus Torvalds 		 *
2691da177e4SLinus Torvalds 		 * Relax Will Robinson.
2701da177e4SLinus Torvalds 		 */
2718e165e20SFlorian Westphal 		if (new_win == 0)
2728e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2738e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
274607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2751da177e4SLinus Torvalds 	}
2761da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2771da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2801da177e4SLinus Torvalds 	 * scaled window.
2811da177e4SLinus Torvalds 	 */
282ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
283ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2841da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2851da177e4SLinus Torvalds 	else
2861da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2891da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2901da177e4SLinus Torvalds 
29131770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
2928e165e20SFlorian Westphal 	if (new_win == 0) {
29331770e34SFlorian Westphal 		tp->pred_flags = 0;
2948e165e20SFlorian Westphal 		if (old_win)
2958e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2968e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
2978e165e20SFlorian Westphal 	} else if (old_win == 0) {
2988e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
2998e165e20SFlorian Westphal 	}
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds 	return new_win;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
30467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
305735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
306bdf1ee5dSIlpo Järvinen {
30730e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
30830e502a3SDaniel Borkmann 
3094de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
310bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3114de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31291b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
31391b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
31430e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
315bdf1ee5dSIlpo Järvinen }
316bdf1ee5dSIlpo Järvinen 
31767edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
318735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
319bdf1ee5dSIlpo Järvinen {
320bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32191b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
322f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
32391b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
324f7b3bec6SFlorian Westphal 
325f7b3bec6SFlorian Westphal 	if (!use_ecn) {
326f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
327f7b3bec6SFlorian Westphal 
328f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
329f7b3bec6SFlorian Westphal 			use_ecn = true;
330f7b3bec6SFlorian Westphal 	}
331bdf1ee5dSIlpo Järvinen 
332bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
333f7b3bec6SFlorian Westphal 
334f7b3bec6SFlorian Westphal 	if (use_ecn) {
3354de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
336bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
33791b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
33830e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
339bdf1ee5dSIlpo Järvinen 	}
340bdf1ee5dSIlpo Järvinen }
341bdf1ee5dSIlpo Järvinen 
34249213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34349213555SDaniel Borkmann {
34449213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
34549213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
34649213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
34749213555SDaniel Borkmann 		 */
34849213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
34949213555SDaniel Borkmann }
35049213555SDaniel Borkmann 
351735d3831SFlorian Westphal static void
3526ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
353bdf1ee5dSIlpo Järvinen {
3546ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
355bdf1ee5dSIlpo Järvinen 		th->ece = 1;
356bdf1ee5dSIlpo Järvinen }
357bdf1ee5dSIlpo Järvinen 
35867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
35967edfef7SAndi Kleen  * be sent.
36067edfef7SAndi Kleen  */
361735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
362ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
363bdf1ee5dSIlpo Järvinen {
364bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
365bdf1ee5dSIlpo Järvinen 
366bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
367bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
368bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
369bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
370bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
371bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
372bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
373ea1627c2SEric Dumazet 				th->cwr = 1;
374bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
375bdf1ee5dSIlpo Järvinen 			}
37630e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
377bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
378bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
379bdf1ee5dSIlpo Järvinen 		}
380bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
381ea1627c2SEric Dumazet 			th->ece = 1;
382bdf1ee5dSIlpo Järvinen 	}
383bdf1ee5dSIlpo Järvinen }
384bdf1ee5dSIlpo Järvinen 
385e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
386e870a8efSIlpo Järvinen  * auto increment end seqno.
387e870a8efSIlpo Järvinen  */
388e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
389e870a8efSIlpo Järvinen {
3902e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
391e870a8efSIlpo Järvinen 
3924de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
393e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
394e870a8efSIlpo Järvinen 
395cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
396e870a8efSIlpo Järvinen 
397e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
398a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
399e870a8efSIlpo Järvinen 		seq++;
400e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
401e870a8efSIlpo Järvinen }
402e870a8efSIlpo Järvinen 
403a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40433f5f57eSIlpo Järvinen {
40533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40633f5f57eSIlpo Järvinen }
40733f5f57eSIlpo Järvinen 
40833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
40933ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41033ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41189e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4122100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41360e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
41460e2a778SUrsula Braun 
41560e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
41660e2a778SUrsula Braun {
41760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
41860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
41960e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
42060e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
42160e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
42260e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
42360e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
42460e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
42560e2a778SUrsula Braun 		}
42660e2a778SUrsula Braun 	}
42760e2a778SUrsula Braun #endif
42860e2a778SUrsula Braun }
42933ad798cSAdam Langley 
43033ad798cSAdam Langley struct tcp_out_options {
4312100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4322100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
43333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
435bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
436bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4372100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4382100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
43933ad798cSAdam Langley };
44033ad798cSAdam Langley 
44167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
44267edfef7SAndi Kleen  *
44367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
444fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
445fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4468e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
447fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
448fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
449fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
450fd6149d3SIlpo Järvinen  *
451fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
452fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
453fd6149d3SIlpo Järvinen  */
45433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
455bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
456bd0388aeSWilliam Allen Simpson {
4572100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
458bd0388aeSWilliam Allen Simpson 
459bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4601a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4611a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
462bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
463bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46433ad798cSAdam Langley 		ptr += 4;
46533ad798cSAdam Langley 	}
46633ad798cSAdam Langley 
467fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
468fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
469fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
470fd6149d3SIlpo Järvinen 			       opts->mss);
471fd6149d3SIlpo Järvinen 	}
472fd6149d3SIlpo Järvinen 
473bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
474bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
479bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
48033ad798cSAdam Langley 		} else {
481496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
48240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
48340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48540efc6faSStephen Hemminger 		}
48633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48833ad798cSAdam Langley 	}
48933ad798cSAdam Langley 
490bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
49333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49533ad798cSAdam Langley 	}
49633ad798cSAdam Langley 
497bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
49833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
50033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
50133ad798cSAdam Langley 			       opts->ws);
50233ad798cSAdam Langley 	}
50333ad798cSAdam Langley 
50433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50740efc6faSStephen Hemminger 		int this_sack;
50840efc6faSStephen Hemminger 
50940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
51040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
51140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
51233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
51340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5142de979bdSStephen Hemminger 
51533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51633ad798cSAdam Langley 		     ++this_sack) {
51740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
51840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
51940efc6faSStephen Hemminger 		}
5202de979bdSStephen Hemminger 
52140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
52240efc6faSStephen Hemminger 	}
5232100c8d2SYuchung Cheng 
5242100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5252100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5267f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5277f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5282100c8d2SYuchung Cheng 
5297f9b838bSDaniel Lee 		if (foc->exp) {
5307f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5317f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5322100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5337f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5347f9b838bSDaniel Lee 		} else {
5357f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5367f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5377f9b838bSDaniel Lee 			*p++ = len;
5382100c8d2SYuchung Cheng 		}
5397f9b838bSDaniel Lee 
5407f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5417f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5427f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5437f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5447f9b838bSDaniel Lee 		}
5457f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5462100c8d2SYuchung Cheng 	}
54760e2a778SUrsula Braun 
54860e2a778SUrsula Braun 	smc_options_write(ptr, &options);
54960e2a778SUrsula Braun }
55060e2a778SUrsula Braun 
55160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
55260e2a778SUrsula Braun 			   struct tcp_out_options *opts,
55360e2a778SUrsula Braun 			   unsigned int *remaining)
55460e2a778SUrsula Braun {
55560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
55660e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
55760e2a778SUrsula Braun 		if (tp->syn_smc) {
55860e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
55960e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
56060e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
56160e2a778SUrsula Braun 			}
56260e2a778SUrsula Braun 		}
56360e2a778SUrsula Braun 	}
56460e2a778SUrsula Braun #endif
56560e2a778SUrsula Braun }
56660e2a778SUrsula Braun 
56760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
56860e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
56960e2a778SUrsula Braun 				struct tcp_out_options *opts,
57060e2a778SUrsula Braun 				unsigned int *remaining)
57160e2a778SUrsula Braun {
57260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57460e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
57560e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57660e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
57760e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
57860e2a778SUrsula Braun 			}
57960e2a778SUrsula Braun 		}
58060e2a778SUrsula Braun 	}
58160e2a778SUrsula Braun #endif
58240efc6faSStephen Hemminger }
58340efc6faSStephen Hemminger 
58467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58567edfef7SAndi Kleen  * network wire format yet.
58667edfef7SAndi Kleen  */
58795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
58833ad798cSAdam Langley 				struct tcp_out_options *opts,
589cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
590cf533ea5SEric Dumazet {
59133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
59295c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
593783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
59433ad798cSAdam Langley 
5958c2320e8SEric Dumazet 	*md5 = NULL;
596cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
597921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
5986015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
59933ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
60033ad798cSAdam Langley 		if (*md5) {
60133ad798cSAdam Langley 			opts->options |= OPTION_MD5;
602bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
603cfb6eeb4SYOSHIFUJI Hideaki 		}
6048c2320e8SEric Dumazet 	}
605cfb6eeb4SYOSHIFUJI Hideaki #endif
60633ad798cSAdam Langley 
60733ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
60833ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
60933ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61033ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61133ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61233ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61333ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
61433ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
61533ad798cSAdam Langley 	 * going out.  */
61633ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
617bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
61833ad798cSAdam Langley 
6195d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
62033ad798cSAdam Langley 		opts->options |= OPTION_TS;
6217faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
62233ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
623bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62433ad798cSAdam Langley 	}
6259bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
62633ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
62789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
628bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
62933ad798cSAdam Langley 	}
630f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
63133ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
632b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
633bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
63433ad798cSAdam Langley 	}
63533ad798cSAdam Langley 
636783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6372646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6382646c831SDaniel Lee 
6392646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6402646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
641783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
642783237e8SYuchung Cheng 		if (remaining >= need) {
643783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
644783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
645783237e8SYuchung Cheng 			remaining -= need;
646783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6472646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
648783237e8SYuchung Cheng 		}
649783237e8SYuchung Cheng 	}
650bd0388aeSWilliam Allen Simpson 
65160e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
65260e2a778SUrsula Braun 
653bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65433ad798cSAdam Langley }
65533ad798cSAdam Langley 
65667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
65760e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
65860e2a778SUrsula Braun 				       struct request_sock *req,
65995c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
66033ad798cSAdam Langley 				       struct tcp_out_options *opts,
66180f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6628336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6634957faadSWilliam Allen Simpson {
66433ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
66595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
66633ad798cSAdam Langley 
66733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
66880f03e27SEric Dumazet 	if (md5) {
66933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6704957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6714957faadSWilliam Allen Simpson 
6724957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6734957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6744957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6754957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6764957faadSWilliam Allen Simpson 		 */
677de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
67833ad798cSAdam Langley 	}
67933ad798cSAdam Langley #endif
68033ad798cSAdam Langley 
6814957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68233ad798cSAdam Langley 	opts->mss = mss;
6834957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
68433ad798cSAdam Langley 
68533ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
68633ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
68789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6884957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
68933ad798cSAdam Langley 	}
690de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
69133ad798cSAdam Langley 		opts->options |= OPTION_TS;
69295a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
69333ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6944957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
69533ad798cSAdam Langley 	}
69633ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
69733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
698de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6994957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70033ad798cSAdam Langley 	}
7017f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7027f9b838bSDaniel Lee 		u32 need = foc->len;
7037f9b838bSDaniel Lee 
7047f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7057f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7068336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7078336886fSJerry Chu 		if (remaining >= need) {
7088336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7098336886fSJerry Chu 			opts->fastopen_cookie = foc;
7108336886fSJerry Chu 			remaining -= need;
7118336886fSJerry Chu 		}
7128336886fSJerry Chu 	}
7134957faadSWilliam Allen Simpson 
71460e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
71560e2a778SUrsula Braun 
7164957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
71733ad798cSAdam Langley }
71833ad798cSAdam Langley 
71967edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
72067edfef7SAndi Kleen  * final wire format yet.
72167edfef7SAndi Kleen  */
72295c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
72333ad798cSAdam Langley 					struct tcp_out_options *opts,
724cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
725cf533ea5SEric Dumazet {
72633ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
72795c96174SEric Dumazet 	unsigned int size = 0;
728cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
72933ad798cSAdam Langley 
7305843ef42SAndi Kleen 	opts->options = 0;
7315843ef42SAndi Kleen 
7328c2320e8SEric Dumazet 	*md5 = NULL;
73333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
734921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
7356015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
73633ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7378c2320e8SEric Dumazet 		if (*md5) {
73833ad798cSAdam Langley 			opts->options |= OPTION_MD5;
73933ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
74033ad798cSAdam Langley 		}
7418c2320e8SEric Dumazet 	}
74233ad798cSAdam Langley #endif
74333ad798cSAdam Langley 
74433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
74533ad798cSAdam Langley 		opts->options |= OPTION_TS;
7467faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
74733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
74833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
74933ad798cSAdam Langley 	}
75033ad798cSAdam Langley 
751cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
752cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
75395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
75433ad798cSAdam Langley 		opts->num_sack_blocks =
75595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
75633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
75733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
7589424e2e7SEric Dumazet 		if (likely(opts->num_sack_blocks))
75933ad798cSAdam Langley 			size += TCPOLEN_SACK_BASE_ALIGNED +
76033ad798cSAdam Langley 				opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
76133ad798cSAdam Langley 	}
76233ad798cSAdam Langley 
76333ad798cSAdam Langley 	return size;
76440efc6faSStephen Hemminger }
7651da177e4SLinus Torvalds 
76646d3ceabSEric Dumazet 
76746d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
76846d3ceabSEric Dumazet  *
76946d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
77046d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
77146d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
77246d3ceabSEric Dumazet  *
77346d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
77446d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7758e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
77646d3ceabSEric Dumazet  *
77746d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
77846d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
77946d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
78046d3ceabSEric Dumazet  */
78146d3ceabSEric Dumazet struct tsq_tasklet {
78246d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
78346d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
78446d3ceabSEric Dumazet };
78546d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
78646d3ceabSEric Dumazet 
78773a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
7886f458dfbSEric Dumazet {
7896f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7906f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
791f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
792f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
793f9616c35SEric Dumazet 
794f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
7953a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
7963a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
797f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
7983a91d29fSKoichiro Den 		}
799f9616c35SEric Dumazet 
800f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
801bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8026f458dfbSEric Dumazet 	}
803f9616c35SEric Dumazet }
80473a6bab5SEric Dumazet 
80573a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
80673a6bab5SEric Dumazet {
80773a6bab5SEric Dumazet 	bh_lock_sock(sk);
80873a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
80973a6bab5SEric Dumazet 		tcp_tsq_write(sk);
81073a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
81173a6bab5SEric Dumazet 		sock_hold(sk);
81273a6bab5SEric Dumazet 	bh_unlock_sock(sk);
81373a6bab5SEric Dumazet }
81446d3ceabSEric Dumazet /*
8158e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
81646d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8178e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
81846d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
81946d3ceabSEric Dumazet  */
82046d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
82146d3ceabSEric Dumazet {
82246d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
82346d3ceabSEric Dumazet 	LIST_HEAD(list);
82446d3ceabSEric Dumazet 	unsigned long flags;
82546d3ceabSEric Dumazet 	struct list_head *q, *n;
82646d3ceabSEric Dumazet 	struct tcp_sock *tp;
82746d3ceabSEric Dumazet 	struct sock *sk;
82846d3ceabSEric Dumazet 
82946d3ceabSEric Dumazet 	local_irq_save(flags);
83046d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
83146d3ceabSEric Dumazet 	local_irq_restore(flags);
83246d3ceabSEric Dumazet 
83346d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
83446d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
83546d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
83646d3ceabSEric Dumazet 
83746d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8380a9648f1SEric Dumazet 		smp_mb__before_atomic();
8397aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8407aa5470cSEric Dumazet 
8416f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
84246d3ceabSEric Dumazet 		sk_free(sk);
84346d3ceabSEric Dumazet 	}
84446d3ceabSEric Dumazet }
84546d3ceabSEric Dumazet 
84640fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
84740fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
84840fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
84940fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
85046d3ceabSEric Dumazet /**
85146d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
85246d3ceabSEric Dumazet  * @sk: socket
85346d3ceabSEric Dumazet  *
85446d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
85546d3ceabSEric Dumazet  * actions before socket release.
85646d3ceabSEric Dumazet  */
85746d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
85846d3ceabSEric Dumazet {
8596f458dfbSEric Dumazet 	unsigned long flags, nflags;
86046d3ceabSEric Dumazet 
8616f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8626f458dfbSEric Dumazet 	do {
8637aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8646f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8656f458dfbSEric Dumazet 			return;
8666f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8677aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8686f458dfbSEric Dumazet 
86973a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
87073a6bab5SEric Dumazet 		tcp_tsq_write(sk);
87173a6bab5SEric Dumazet 		__sock_put(sk);
87273a6bab5SEric Dumazet 	}
873c3f9b018SEric Dumazet 	/* Here begins the tricky part :
874c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
875c3f9b018SEric Dumazet 	 * 1) BH disabled
876c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
877c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
878c3f9b018SEric Dumazet 	 *
879c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
880c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
881c3f9b018SEric Dumazet 	 */
882c3f9b018SEric Dumazet 	sock_release_ownership(sk);
883c3f9b018SEric Dumazet 
88440fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8856f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
886144d56e9SEric Dumazet 		__sock_put(sk);
887144d56e9SEric Dumazet 	}
88840fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8896f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
890144d56e9SEric Dumazet 		__sock_put(sk);
891144d56e9SEric Dumazet 	}
89240fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8934fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
894144d56e9SEric Dumazet 		__sock_put(sk);
895144d56e9SEric Dumazet 	}
89646d3ceabSEric Dumazet }
89746d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
89846d3ceabSEric Dumazet 
89946d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
90046d3ceabSEric Dumazet {
90146d3ceabSEric Dumazet 	int i;
90246d3ceabSEric Dumazet 
90346d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
90446d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
90546d3ceabSEric Dumazet 
90646d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
90746d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
90846d3ceabSEric Dumazet 			     tcp_tasklet_func,
90946d3ceabSEric Dumazet 			     (unsigned long)tsq);
91046d3ceabSEric Dumazet 	}
91146d3ceabSEric Dumazet }
91246d3ceabSEric Dumazet 
91346d3ceabSEric Dumazet /*
91446d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9158e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
91646d3ceabSEric Dumazet  * hold qdisc lock.
91746d3ceabSEric Dumazet  */
918d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
91946d3ceabSEric Dumazet {
92046d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
92146d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
922408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9239b462d02SEric Dumazet 
9249b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9259b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9269b462d02SEric Dumazet 	 */
92714afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9289b462d02SEric Dumazet 
9299b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9309b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9319b462d02SEric Dumazet 	 * This gives :
9329b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9339b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9349b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9359b462d02SEric Dumazet 	 */
93614afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9379b462d02SEric Dumazet 		goto out;
93846d3ceabSEric Dumazet 
9397aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
94046d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
941a9b204d1SEric Dumazet 		bool empty;
94246d3ceabSEric Dumazet 
943408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
944408f0a6cSEric Dumazet 			goto out;
945408f0a6cSEric Dumazet 
94673a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9477aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
948408f0a6cSEric Dumazet 		if (nval != oval)
949408f0a6cSEric Dumazet 			continue;
950408f0a6cSEric Dumazet 
95146d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
95246d3ceabSEric Dumazet 		local_irq_save(flags);
953903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
954a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
95546d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
956a9b204d1SEric Dumazet 		if (empty)
95746d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
95846d3ceabSEric Dumazet 		local_irq_restore(flags);
9599b462d02SEric Dumazet 		return;
96046d3ceabSEric Dumazet 	}
9619b462d02SEric Dumazet out:
9629b462d02SEric Dumazet 	sk_free(sk);
96346d3ceabSEric Dumazet }
96446d3ceabSEric Dumazet 
96573a6bab5SEric Dumazet /* Note: Called under soft irq.
96673a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
967218af599SEric Dumazet  */
968218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
969218af599SEric Dumazet {
970218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
971218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
972218af599SEric Dumazet 
97373a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
97473a6bab5SEric Dumazet 	sock_put(sk);
975218af599SEric Dumazet 
976218af599SEric Dumazet 	return HRTIMER_NORESTART;
977218af599SEric Dumazet }
978218af599SEric Dumazet 
979a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
980a7a25630SEric Dumazet 				      u64 prior_wstamp)
981e2080072SEric Dumazet {
982ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
983ab408b6dSEric Dumazet 
984ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
98576a9ebe8SEric Dumazet 		unsigned long rate = sk->sk_pacing_rate;
986ab408b6dSEric Dumazet 
987ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
988ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
989ab408b6dSEric Dumazet 		 * this is a minor annoyance.
990ab408b6dSEric Dumazet 		 */
99176a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
992a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
993a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
994a7a25630SEric Dumazet 
995a7a25630SEric Dumazet 			/* take into account OS jitter */
996a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
997a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
998ab408b6dSEric Dumazet 		}
999ab408b6dSEric Dumazet 	}
1000e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1001e2080072SEric Dumazet }
1002e2080072SEric Dumazet 
10031da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10041da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10051da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10061da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10071da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10081da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10091da177e4SLinus Torvalds  * device.
10101da177e4SLinus Torvalds  *
10111da177e4SLinus Torvalds  * We are working here with either a clone of the original
10121da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10131da177e4SLinus Torvalds  */
10142987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10152987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10161da177e4SLinus Torvalds {
10176687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1018dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1019dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1020dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
102133ad798cSAdam Langley 	struct tcp_out_options opts;
102295c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10238c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1024cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10251da177e4SLinus Torvalds 	struct tcphdr *th;
1026a7a25630SEric Dumazet 	u64 prior_wstamp;
10271da177e4SLinus Torvalds 	int err;
10281da177e4SLinus Torvalds 
1029dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10306f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
10317f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
10327f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
10337f12422cSYuchung Cheng 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1034ccdbb6e9SEric Dumazet 	if (clone_it) {
10356f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10366f094b9eSLawrence Brakmo 			- tp->snd_una;
10378c72c65bSEric Dumazet 		oskb = skb;
1038e2080072SEric Dumazet 
1039e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1040e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1041e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1042dfb4b9dcSDavid S. Miller 			else
1043e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1044e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1045e2080072SEric Dumazet 
1046dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1047dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1048dfb4b9dcSDavid S. Miller 	}
10495f6188a8SEric Dumazet 
1050dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1051dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
105233ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10531da177e4SLinus Torvalds 
1054051ba674SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
105533ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1056051ba674SEric Dumazet 	} else {
105733ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
105833ad798cSAdam Langley 							   &md5);
1059051ba674SEric Dumazet 		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1060051ba674SEric Dumazet 		 * at receiver : This slightly improve GRO performance.
1061051ba674SEric Dumazet 		 * Note that we do not force the PSH flag for non GSO packets,
1062051ba674SEric Dumazet 		 * because they might be sent under high congestion events,
1063051ba674SEric Dumazet 		 * and in this case it is better to delay the delivery of 1-MSS
1064051ba674SEric Dumazet 		 * packets and thus the corresponding ACK packet that would
1065051ba674SEric Dumazet 		 * release the following packet.
1066051ba674SEric Dumazet 		 */
1067051ba674SEric Dumazet 		if (tcp_skb_pcount(skb) > 1)
1068051ba674SEric Dumazet 			tcb->tcp_flags |= TCPHDR_PSH;
1069051ba674SEric Dumazet 	}
107033ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10711da177e4SLinus Torvalds 
1072547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1073b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
107473a6bab5SEric Dumazet 	 * which holds one reference to sk.
1075b2532eb9SEric Dumazet 	 *
1076b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1077b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1078547669d4SEric Dumazet 	 */
1079b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10801da177e4SLinus Torvalds 
108138ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
108238ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
108338ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
108438ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
108538ab52e8SEric Dumazet 	 */
108638ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
108738ab52e8SEric Dumazet 
1088aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1089aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
109046d3ceabSEric Dumazet 
109146d3ceabSEric Dumazet 	skb_orphan(skb);
109246d3ceabSEric Dumazet 	skb->sk = sk;
10931d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1094b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
109514afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10961da177e4SLinus Torvalds 
1097c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1098c3a2e837SJulian Anastasov 
10991da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1100ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1101c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1102c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
11031da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
11042987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1105df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
11064de075e0SEric Dumazet 					tcb->tcp_flags);
1107dfb4b9dcSDavid S. Miller 
11081da177e4SLinus Torvalds 	th->check		= 0;
11091da177e4SLinus Torvalds 	th->urg_ptr		= 0;
11101da177e4SLinus Torvalds 
111133f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11127691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11137691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11141da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11151da177e4SLinus Torvalds 			th->urg = 1;
11167691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11170eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11187691367dSHerbert Xu 			th->urg = 1;
11197691367dSHerbert Xu 		}
11201da177e4SLinus Torvalds 	}
11211da177e4SLinus Torvalds 
1122bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
112351466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1124ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1125ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1126ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1127ea1627c2SEric Dumazet 	} else {
1128ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1129ea1627c2SEric Dumazet 		 * is never scaled.
1130ea1627c2SEric Dumazet 		 */
1131ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1132ea1627c2SEric Dumazet 	}
1133cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1134cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1135cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1136a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1137bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
113839f8e58eSEric Dumazet 					       md5, sk, skb);
1139cfb6eeb4SYOSHIFUJI Hideaki 	}
1140cfb6eeb4SYOSHIFUJI Hideaki #endif
1141cfb6eeb4SYOSHIFUJI Hideaki 
1142bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11431da177e4SLinus Torvalds 
11444de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
114527cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11461da177e4SLinus Torvalds 
1147a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1148cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1149a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1150ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1151a44d6eacSMartin KaFai Lau 	}
11521da177e4SLinus Torvalds 
1153bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1154aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1155aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11561da177e4SLinus Torvalds 
11572efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1158f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1159cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1160f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1161cd7d8498SEric Dumazet 
1162d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1163971f10ecSEric Dumazet 
1164971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1165971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1166971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1167971f10ecSEric Dumazet 
1168a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1169a842fe14SEric Dumazet 
1170b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11717faee5c0SEric Dumazet 
11728c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11735ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11748c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11758c72c65bSEric Dumazet 	}
1176fc225799SEric Dumazet 	if (!err && oskb) {
1177a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1178fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1179fc225799SEric Dumazet 	}
11808c72c65bSEric Dumazet 	return err;
11811da177e4SLinus Torvalds }
11821da177e4SLinus Torvalds 
11832987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11842987babbSYuchung Cheng 			    gfp_t gfp_mask)
11852987babbSYuchung Cheng {
11862987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11872987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11882987babbSYuchung Cheng }
11892987babbSYuchung Cheng 
119067edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11911da177e4SLinus Torvalds  *
11921da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11931da177e4SLinus Torvalds  * otherwise socket can stall.
11941da177e4SLinus Torvalds  */
11951da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11961da177e4SLinus Torvalds {
11971da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11981da177e4SLinus Torvalds 
11991da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
12000f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1201f4a775d1SEric Dumazet 	__skb_header_release(skb);
1202fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
1203ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
12043ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
12051da177e4SLinus Torvalds }
12061da177e4SLinus Torvalds 
120767edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
12085bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1209f6302d1dSDavid S. Miller {
12104a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1211f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1212f6302d1dSDavid S. Miller 		 * non-TSO case.
1213f6302d1dSDavid S. Miller 		 */
1214cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1215f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1216f6302d1dSDavid S. Miller 	} else {
1217cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1218f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12191da177e4SLinus Torvalds 	}
12201da177e4SLinus Torvalds }
12211da177e4SLinus Torvalds 
1222797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1223797108d1SIlpo Järvinen  * tweaks to fix counters
1224797108d1SIlpo Järvinen  */
1225cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1226797108d1SIlpo Järvinen {
1227797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1228797108d1SIlpo Järvinen 
1229797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1230797108d1SIlpo Järvinen 
1231797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1232797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1233797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1234797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1235797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1236797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1237797108d1SIlpo Järvinen 
1238797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1239797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1240797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1241797108d1SIlpo Järvinen 
1242797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1243797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1244713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1245797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1246797108d1SIlpo Järvinen 
1247797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1248797108d1SIlpo Järvinen }
1249797108d1SIlpo Järvinen 
12500a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12510a2cf20cSSoheil Hassas Yeganeh {
12520a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12530a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12540a2cf20cSSoheil Hassas Yeganeh }
12550a2cf20cSSoheil Hassas Yeganeh 
1256490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1257490cc7d0SWillem de Bruijn {
1258490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1259490cc7d0SWillem de Bruijn 
12600a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1261490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1262490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1263490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1264490cc7d0SWillem de Bruijn 
1265490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1266490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1267490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1268b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1269b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1270490cc7d0SWillem de Bruijn 	}
1271490cc7d0SWillem de Bruijn }
1272490cc7d0SWillem de Bruijn 
1273a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1274a166140eSMartin KaFai Lau {
1275a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1276a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1277a166140eSMartin KaFai Lau }
1278a166140eSMartin KaFai Lau 
127975c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
128075c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
128175c119afSEric Dumazet 					 struct sk_buff *buff,
128275c119afSEric Dumazet 					 struct sock *sk,
128375c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
128475c119afSEric Dumazet {
128575c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
128675c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
128775c119afSEric Dumazet 	else
128875c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
128975c119afSEric Dumazet }
129075c119afSEric Dumazet 
12911da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12921da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12931da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12941da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12951da177e4SLinus Torvalds  */
129675c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
129775c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12986cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12991da177e4SLinus Torvalds {
13001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13011da177e4SLinus Torvalds 	struct sk_buff *buff;
13026475be16SDavid S. Miller 	int nsize, old_factor;
1303b617158dSEric Dumazet 	long limit;
1304b60b49eaSHerbert Xu 	int nlen;
13059ce01461SIlpo Järvinen 	u8 flags;
13061da177e4SLinus Torvalds 
13072fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
13082fceec13SIlpo Järvinen 		return -EINVAL;
13096a438bbeSStephen Hemminger 
13101da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
13111da177e4SLinus Torvalds 	if (nsize < 0)
13121da177e4SLinus Torvalds 		nsize = 0;
13131da177e4SLinus Torvalds 
1314b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1315b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1316b617158dSEric Dumazet 	 * SO_SNDBUF values.
1317b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1318b617158dSEric Dumazet 	 */
1319b617158dSEric Dumazet 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1320b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1321b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1322b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1323b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1324f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1325f070ef2aSEric Dumazet 		return -ENOMEM;
1326f070ef2aSEric Dumazet 	}
1327f070ef2aSEric Dumazet 
13286cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13291da177e4SLinus Torvalds 		return -ENOMEM;
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1332eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
133351456b29SIan Morris 	if (!buff)
13341da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
133541477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
1336ef5cb973SHerbert Xu 
1337ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
13383ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1339b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1340b60b49eaSHerbert Xu 	buff->truesize += nlen;
1341b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13441da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13451da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13461da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13494de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13504de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13514de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1352e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1353a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13561da177e4SLinus Torvalds 
135798be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13581da177e4SLinus Torvalds 
1359a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1360490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13611da177e4SLinus Torvalds 
13626475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13636475be16SDavid S. Miller 
13641da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13655bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13665bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13671da177e4SLinus Torvalds 
1368b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1369b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1370b9f64820SYuchung Cheng 
13716475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13726475be16SDavid S. Miller 	 * adjust the various packet counters.
13736475be16SDavid S. Miller 	 */
1374cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13756475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13766475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13771da177e4SLinus Torvalds 
1378797108d1SIlpo Järvinen 		if (diff)
1379797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13801da177e4SLinus Torvalds 	}
13811da177e4SLinus Torvalds 
13821da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1383f4a775d1SEric Dumazet 	__skb_header_release(buff);
138475c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1385f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1386e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	return 0;
13891da177e4SLinus Torvalds }
13901da177e4SLinus Torvalds 
1391f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1392f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13931da177e4SLinus Torvalds  */
13947162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13951da177e4SLinus Torvalds {
13967b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13971da177e4SLinus Torvalds 	int i, k, eat;
13981da177e4SLinus Torvalds 
13994fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
14004fa48bf3SEric Dumazet 	if (eat) {
14014fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
14024fa48bf3SEric Dumazet 		len -= eat;
14034fa48bf3SEric Dumazet 		if (!len)
14047162fb24SEric Dumazet 			return 0;
14054fa48bf3SEric Dumazet 	}
14061da177e4SLinus Torvalds 	eat = len;
14071da177e4SLinus Torvalds 	k = 0;
14087b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
14097b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
14107b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
14119e903e08SEric Dumazet 
14129e903e08SEric Dumazet 		if (size <= eat) {
1413aff65da0SIan Campbell 			skb_frag_unref(skb, i);
14149e903e08SEric Dumazet 			eat -= size;
14151da177e4SLinus Torvalds 		} else {
14167b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
14171da177e4SLinus Torvalds 			if (eat) {
1418b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[k], eat);
14197b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
14201da177e4SLinus Torvalds 				eat = 0;
14211da177e4SLinus Torvalds 			}
14221da177e4SLinus Torvalds 			k++;
14231da177e4SLinus Torvalds 		}
14241da177e4SLinus Torvalds 	}
14257b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14261da177e4SLinus Torvalds 
14271da177e4SLinus Torvalds 	skb->data_len -= len;
14281da177e4SLinus Torvalds 	skb->len = skb->data_len;
14297162fb24SEric Dumazet 	return len;
14301da177e4SLinus Torvalds }
14311da177e4SLinus Torvalds 
143267edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14331da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14341da177e4SLinus Torvalds {
14357162fb24SEric Dumazet 	u32 delta_truesize;
14367162fb24SEric Dumazet 
143714bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14381da177e4SLinus Torvalds 		return -ENOMEM;
14391da177e4SLinus Torvalds 
14407162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14411da177e4SLinus Torvalds 
14421da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
144384fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14441da177e4SLinus Torvalds 
14457162fb24SEric Dumazet 	if (delta_truesize) {
14467162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
1447ab4e846aSEric Dumazet 		sk_wmem_queued_add(sk, -delta_truesize);
14487162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14491da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14507162fb24SEric Dumazet 	}
14511da177e4SLinus Torvalds 
14525b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14531da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14545bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds 	return 0;
14571da177e4SLinus Torvalds }
14581da177e4SLinus Torvalds 
14591b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14601b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14615d424d5aSJohn Heffner {
1462cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1463cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14645d424d5aSJohn Heffner 	int mss_now;
14655d424d5aSJohn Heffner 
14665d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14675d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14685d424d5aSJohn Heffner 	 */
14695d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14705d424d5aSJohn Heffner 
147167469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
147267469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
147367469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
147467469601SEric Dumazet 
147567469601SEric Dumazet 		if (dst && dst_allfrag(dst))
147667469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
147767469601SEric Dumazet 	}
147867469601SEric Dumazet 
14795d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14805d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14815d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14825d424d5aSJohn Heffner 
14835d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14845d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14855d424d5aSJohn Heffner 
14865d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14875f3e2bf0SEric Dumazet 	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
14885d424d5aSJohn Heffner 	return mss_now;
14895d424d5aSJohn Heffner }
14905d424d5aSJohn Heffner 
14911b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14921b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14931b63edd6SYuchung Cheng {
14941b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14951b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14961b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14971b63edd6SYuchung Cheng }
14981b63edd6SYuchung Cheng 
14995d424d5aSJohn Heffner /* Inverse of above */
150067469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
15015d424d5aSJohn Heffner {
1502cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1503cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
15045d424d5aSJohn Heffner 	int mtu;
15055d424d5aSJohn Heffner 
15065d424d5aSJohn Heffner 	mtu = mss +
15075d424d5aSJohn Heffner 	      tp->tcp_header_len +
15085d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
15095d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
15105d424d5aSJohn Heffner 
151167469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
151267469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
151367469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
151467469601SEric Dumazet 
151567469601SEric Dumazet 		if (dst && dst_allfrag(dst))
151667469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
151767469601SEric Dumazet 	}
15185d424d5aSJohn Heffner 	return mtu;
15195d424d5aSJohn Heffner }
1520556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
15215d424d5aSJohn Heffner 
152267edfef7SAndi Kleen /* MTU probing init per socket */
15235d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
15245d424d5aSJohn Heffner {
15255d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15265d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1527b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15285d424d5aSJohn Heffner 
1529b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15305d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15315d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1532b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15335d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
153405cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1535c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15365d424d5aSJohn Heffner }
15374bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15385d424d5aSJohn Heffner 
15391da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15421da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1545caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15461da177e4SLinus Torvalds    It also does not include TCP options.
15471da177e4SLinus Torvalds 
1548d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15491da177e4SLinus Torvalds 
15501da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15511da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15521da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15531da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15541da177e4SLinus Torvalds 
15551da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15561da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15571da177e4SLinus Torvalds 
1558d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1559d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15601da177e4SLinus Torvalds  */
15611da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15621da177e4SLinus Torvalds {
15631da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1564d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15655d424d5aSJohn Heffner 	int mss_now;
15661da177e4SLinus Torvalds 
15675d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15685d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15691da177e4SLinus Torvalds 
15705d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1571409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds 	/* And store cached results */
1574d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15755d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15765d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1577c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15781da177e4SLinus Torvalds 
15791da177e4SLinus Torvalds 	return mss_now;
15801da177e4SLinus Torvalds }
15814bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15821da177e4SLinus Torvalds 
15831da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15841da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15851da177e4SLinus Torvalds  */
15860c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15871da177e4SLinus Torvalds {
1588cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1589cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1590c1b4a7e6SDavid S. Miller 	u32 mss_now;
159195c96174SEric Dumazet 	unsigned int header_len;
159233ad798cSAdam Langley 	struct tcp_out_options opts;
159333ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15941da177e4SLinus Torvalds 
1595c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1596c1b4a7e6SDavid S. Miller 
15971da177e4SLinus Torvalds 	if (dst) {
15981da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1599d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
16001da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
16011da177e4SLinus Torvalds 	}
16021da177e4SLinus Torvalds 
160333ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
160433ad798cSAdam Langley 		     sizeof(struct tcphdr);
160533ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
160633ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
160733ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
160833ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
160933ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
161033ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
161133ad798cSAdam Langley 		mss_now -= delta;
161233ad798cSAdam Langley 	}
1613cfb6eeb4SYOSHIFUJI Hideaki 
16141da177e4SLinus Torvalds 	return mss_now;
16151da177e4SLinus Torvalds }
16161da177e4SLinus Torvalds 
161786fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
161886fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
161986fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
162086fd14adSWeiping Pan  */
162186fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1622a762a980SDavid S. Miller {
16239e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1624a762a980SDavid S. Miller 
162586fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
162686fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
162786fd14adSWeiping Pan 		/* Limited by application or receiver window. */
162886fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
162986fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
163086fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
163186fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
163286fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
163386fd14adSWeiping Pan 		}
163486fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
163586fd14adSWeiping Pan 	}
1636c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
163786fd14adSWeiping Pan }
163886fd14adSWeiping Pan 
1639ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1640a762a980SDavid S. Miller {
16411b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1642a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1643a762a980SDavid S. Miller 
1644ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1645ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1646ca8a2263SNeal Cardwell 	 */
1647ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1648ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1649ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1650ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1651ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1652ca8a2263SNeal Cardwell 	}
1653e114a710SEric Dumazet 
165424901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1655a762a980SDavid S. Miller 		/* Network is feed fully. */
1656a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1657c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1658a762a980SDavid S. Miller 	} else {
1659a762a980SDavid S. Miller 		/* Network starves. */
1660a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1661a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1662a762a980SDavid S. Miller 
1663b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1664c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16651b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1666a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1667b0f71bd3SFrancis Yan 
1668b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1669b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1670b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1671b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
167275c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1673b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1674b0f71bd3SFrancis Yan 		 */
167575c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1676b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1677b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1678b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1679a762a980SDavid S. Miller 	}
1680a762a980SDavid S. Miller }
1681a762a980SDavid S. Miller 
1682d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1683d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1684d4589926SEric Dumazet {
1685d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1686d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1687d4589926SEric Dumazet }
1688d4589926SEric Dumazet 
1689d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1690d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1691d4589926SEric Dumazet  * The test is really :
1692d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1693d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1694d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1695d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16960e3a4803SIlpo Järvinen  */
1697d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1698d4589926SEric Dumazet 				const struct sk_buff *skb)
1699d4589926SEric Dumazet {
1700d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1701d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1702d4589926SEric Dumazet }
1703d4589926SEric Dumazet 
1704d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1705d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1706d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1707d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1708d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1709d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1710d4589926SEric Dumazet  */
1711d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1712cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1713d4589926SEric Dumazet {
1714d4589926SEric Dumazet 	return partial &&
1715d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1716d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1717d4589926SEric Dumazet }
1718605ad7f1SEric Dumazet 
1719605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1720605ad7f1SEric Dumazet  * to send one TSO packet per ms
1721605ad7f1SEric Dumazet  */
1722dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
17231b3878caSNeal Cardwell 			    int min_tso_segs)
1724605ad7f1SEric Dumazet {
1725605ad7f1SEric Dumazet 	u32 bytes, segs;
1726605ad7f1SEric Dumazet 
172776a9ebe8SEric Dumazet 	bytes = min_t(unsigned long,
172876a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift,
1729605ad7f1SEric Dumazet 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1730605ad7f1SEric Dumazet 
1731605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1732605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1733605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1734605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1735605ad7f1SEric Dumazet 	 */
17361b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1737605ad7f1SEric Dumazet 
1738350c9f48SEric Dumazet 	return segs;
1739605ad7f1SEric Dumazet }
1740605ad7f1SEric Dumazet 
1741ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1742ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1743ed6e7268SNeal Cardwell  */
1744ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1745ed6e7268SNeal Cardwell {
1746ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1747dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1748ed6e7268SNeal Cardwell 
1749dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1750dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1751dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1752dcb8c9b4SEric Dumazet 
1753dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1754350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1755ed6e7268SNeal Cardwell }
1756ed6e7268SNeal Cardwell 
1757d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1758d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1759d4589926SEric Dumazet 					const struct sk_buff *skb,
1760d4589926SEric Dumazet 					unsigned int mss_now,
1761d4589926SEric Dumazet 					unsigned int max_segs,
1762d4589926SEric Dumazet 					int nonagle)
1763c1b4a7e6SDavid S. Miller {
1764cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1765d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1766c1b4a7e6SDavid S. Miller 
176790840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17681485348dSBen Hutchings 	max_len = mss_now * max_segs;
17690e3a4803SIlpo Järvinen 
17701485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17711485348dSBen Hutchings 		return max_len;
17720e3a4803SIlpo Järvinen 
17735ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17745ea3a748SIlpo Järvinen 
17751485348dSBen Hutchings 	if (max_len <= needed)
17761485348dSBen Hutchings 		return max_len;
17770e3a4803SIlpo Järvinen 
1778d4589926SEric Dumazet 	partial = needed % mss_now;
1779d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1780d4589926SEric Dumazet 	 * to include this last segment in this skb.
1781d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1782d4589926SEric Dumazet 	 */
1783cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1784d4589926SEric Dumazet 		return needed - partial;
1785d4589926SEric Dumazet 
1786d4589926SEric Dumazet 	return needed;
1787c1b4a7e6SDavid S. Miller }
1788c1b4a7e6SDavid S. Miller 
1789c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1790c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1791c1b4a7e6SDavid S. Miller  */
1792cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1793cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1794c1b4a7e6SDavid S. Miller {
1795d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1796c1b4a7e6SDavid S. Miller 
1797c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17984de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17994de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1800c1b4a7e6SDavid S. Miller 		return 1;
1801c1b4a7e6SDavid S. Miller 
1802c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1803c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1804d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1805c1b4a7e6SDavid S. Miller 		return 0;
1806d649a7a8SEric Dumazet 
1807d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1808d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1809d649a7a8SEric Dumazet 	 */
1810d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1811d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1812c1b4a7e6SDavid S. Miller }
1813c1b4a7e6SDavid S. Miller 
1814b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
181567edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1816c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1817c1b4a7e6SDavid S. Miller  */
18185bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1819c1b4a7e6SDavid S. Miller {
1820c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1821c1b4a7e6SDavid S. Miller 
1822f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
18235bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1824c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1825c1b4a7e6SDavid S. Miller 	}
1826c1b4a7e6SDavid S. Miller 	return tso_segs;
1827c1b4a7e6SDavid S. Miller }
1828c1b4a7e6SDavid S. Miller 
1829c1b4a7e6SDavid S. Miller 
1830a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1831c1b4a7e6SDavid S. Miller  * sent now.
1832c1b4a7e6SDavid S. Miller  */
1833a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1834c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1835c1b4a7e6SDavid S. Miller {
1836c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1837c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1838c1b4a7e6SDavid S. Miller 	 *
1839c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1840c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1841c1b4a7e6SDavid S. Miller 	 */
1842c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1843a2a385d6SEric Dumazet 		return true;
1844c1b4a7e6SDavid S. Miller 
18459b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18469b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1847a2a385d6SEric Dumazet 		return true;
1848c1b4a7e6SDavid S. Miller 
1849cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1850a2a385d6SEric Dumazet 		return true;
1851c1b4a7e6SDavid S. Miller 
1852a2a385d6SEric Dumazet 	return false;
1853c1b4a7e6SDavid S. Miller }
1854c1b4a7e6SDavid S. Miller 
1855c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1856a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1857a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1858056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1859c1b4a7e6SDavid S. Miller {
1860c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1861c1b4a7e6SDavid S. Miller 
1862c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1863c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1864c1b4a7e6SDavid S. Miller 
186590840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1866c1b4a7e6SDavid S. Miller }
1867c1b4a7e6SDavid S. Miller 
1868c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1869c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1870c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1871c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1872c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1873c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1874c1b4a7e6SDavid S. Miller  */
187556483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1876c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1877c1b4a7e6SDavid S. Miller {
1878c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
187956483341SEric Dumazet 	struct sk_buff *buff;
18809ce01461SIlpo Järvinen 	u8 flags;
1881c1b4a7e6SDavid S. Miller 
1882c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1883c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
188456483341SEric Dumazet 		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
188556483341SEric Dumazet 				    skb, len, mss_now, gfp);
1886c1b4a7e6SDavid S. Miller 
1887eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
188851456b29SIan Morris 	if (unlikely(!buff))
1889c1b4a7e6SDavid S. Miller 		return -ENOMEM;
189041477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
1891c1b4a7e6SDavid S. Miller 
1892ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
18933ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1894b60b49eaSHerbert Xu 	buff->truesize += nlen;
1895c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1896c1b4a7e6SDavid S. Miller 
1897c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1898c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1899c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1900c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1901c1b4a7e6SDavid S. Miller 
1902c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
19034de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
19044de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
19054de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1906c1b4a7e6SDavid S. Miller 
1907c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1908c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1909c1b4a7e6SDavid S. Miller 
1910a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1911a166140eSMartin KaFai Lau 
191298be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1913c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1914490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1915c1b4a7e6SDavid S. Miller 
1916c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
19175bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
19185bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1919c1b4a7e6SDavid S. Miller 
1920c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1921f4a775d1SEric Dumazet 	__skb_header_release(buff);
192256483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
1923c1b4a7e6SDavid S. Miller 
1924c1b4a7e6SDavid S. Miller 	return 0;
1925c1b4a7e6SDavid S. Miller }
1926c1b4a7e6SDavid S. Miller 
1927c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1928c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1929c1b4a7e6SDavid S. Miller  *
1930c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1931c1b4a7e6SDavid S. Miller  */
1932ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1933f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
1934f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
1935f9bfe4e6SEric Dumazet 				 u32 max_segs)
1936c1b4a7e6SDavid S. Miller {
19376687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1938f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
193950c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
194050c8339eSEric Dumazet 	struct sk_buff *head;
1941ad9f4f50SEric Dumazet 	int win_divisor;
1942f1c6ea38SEric Dumazet 	s64 delta;
1943c1b4a7e6SDavid S. Miller 
194499d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1945ae8064acSJohn Heffner 		goto send_now;
1946ae8064acSJohn Heffner 
19475f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
1948a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
1949a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
1950a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
19515f852eb5SEric Dumazet 	 */
1952a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1953a682850aSEric Dumazet 	if (delta > 0)
1954ae8064acSJohn Heffner 		goto send_now;
1955908a75c1SDavid S. Miller 
1956c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1957c1b4a7e6SDavid S. Miller 
1958c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1959c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1960c1b4a7e6SDavid S. Miller 
196190840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1962c1b4a7e6SDavid S. Miller 
1963c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1964c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1965c1b4a7e6SDavid S. Miller 
1966c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1967c1b4a7e6SDavid S. Miller 
1968ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1969605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1970ae8064acSJohn Heffner 		goto send_now;
1971ba244fe9SDavid S. Miller 
197262ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
197362ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
197462ad2761SIlpo Järvinen 		goto send_now;
197562ad2761SIlpo Järvinen 
19765bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1977ad9f4f50SEric Dumazet 	if (win_divisor) {
1978c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1979c1b4a7e6SDavid S. Miller 
1980c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1981c1b4a7e6SDavid S. Miller 		 * just use it.
1982c1b4a7e6SDavid S. Miller 		 */
1983ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1984c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1985ae8064acSJohn Heffner 			goto send_now;
1986c1b4a7e6SDavid S. Miller 	} else {
1987c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1988c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1989c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1990c1b4a7e6SDavid S. Miller 		 * then send now.
1991c1b4a7e6SDavid S. Miller 		 */
19926b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1993ae8064acSJohn Heffner 			goto send_now;
1994c1b4a7e6SDavid S. Miller 	}
1995c1b4a7e6SDavid S. Miller 
199675c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
199775c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
199875c119afSEric Dumazet 	if (!head)
199975c119afSEric Dumazet 		goto send_now;
2000f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
200150c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
2002f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
200350c8339eSEric Dumazet 		goto send_now;
200450c8339eSEric Dumazet 
2005f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
2006f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
2007f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
2008f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
2009f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
2010f9bfe4e6SEric Dumazet 	 */
2011f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
2012f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
2013ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2014f9bfe4e6SEric Dumazet 			return true;
2015f9bfe4e6SEric Dumazet 		}
2016f9bfe4e6SEric Dumazet 	} else {
2017f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2018f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2019f9bfe4e6SEric Dumazet 			return true;
2020f9bfe4e6SEric Dumazet 		}
2021f9bfe4e6SEric Dumazet 	}
2022f9bfe4e6SEric Dumazet 
2023f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2024d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2025d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2026f9bfe4e6SEric Dumazet 		goto send_now;
2027ca8a2263SNeal Cardwell 
2028a2a385d6SEric Dumazet 	return true;
2029ae8064acSJohn Heffner 
2030ae8064acSJohn Heffner send_now:
2031a2a385d6SEric Dumazet 	return false;
2032c1b4a7e6SDavid S. Miller }
2033c1b4a7e6SDavid S. Miller 
203405cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
203505cbc0dbSFan Du {
203605cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
203705cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
203805cbc0dbSFan Du 	struct net *net = sock_net(sk);
203905cbc0dbSFan Du 	u32 interval;
204005cbc0dbSFan Du 	s32 delta;
204105cbc0dbSFan Du 
204205cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
2043c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
204405cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
204505cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
204605cbc0dbSFan Du 
204705cbc0dbSFan Du 		/* Update current search range */
204805cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
204905cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
205005cbc0dbSFan Du 			sizeof(struct tcphdr) +
205105cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
205205cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
205305cbc0dbSFan Du 
205405cbc0dbSFan Du 		/* Update probe time stamp */
2055c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
205605cbc0dbSFan Du 	}
205705cbc0dbSFan Du }
205805cbc0dbSFan Du 
2059808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2060808cf9e3SIlya Lesokhin {
2061808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2062808cf9e3SIlya Lesokhin 
2063808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2064808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2065808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2066808cf9e3SIlya Lesokhin 			break;
2067808cf9e3SIlya Lesokhin 
2068888a5c53SWillem de Bruijn 		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2069808cf9e3SIlya Lesokhin 			return false;
2070808cf9e3SIlya Lesokhin 
2071808cf9e3SIlya Lesokhin 		len -= skb->len;
2072808cf9e3SIlya Lesokhin 	}
2073808cf9e3SIlya Lesokhin 
2074808cf9e3SIlya Lesokhin 	return true;
2075808cf9e3SIlya Lesokhin }
2076808cf9e3SIlya Lesokhin 
20775d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
207867edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
207967edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
208067edfef7SAndi Kleen  * changes resulting in larger path MTUs.
208167edfef7SAndi Kleen  *
20825d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20835d424d5aSJohn Heffner  *         1 if a probe was sent,
2084056834d9SIlpo Järvinen  *         -1 otherwise
2085056834d9SIlpo Järvinen  */
20865d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20875d424d5aSJohn Heffner {
20885d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
208912a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20905d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20916b58e0a5SFan Du 	struct net *net = sock_net(sk);
20925d424d5aSJohn Heffner 	int probe_size;
209391cc17c0SIlpo Järvinen 	int size_needed;
209412a59abcSEric Dumazet 	int copy, len;
20955d424d5aSJohn Heffner 	int mss_now;
20966b58e0a5SFan Du 	int interval;
20975d424d5aSJohn Heffner 
20985d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20995d424d5aSJohn Heffner 	 * not in recovery,
21005d424d5aSJohn Heffner 	 * have enough cwnd, and
210112a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
210212a59abcSEric Dumazet 	 */
210312a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
21045d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
21055d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
21065d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
210712a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
21085d424d5aSJohn Heffner 		return -1;
21095d424d5aSJohn Heffner 
21106b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
21116b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
21126b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
21136b58e0a5SFan Du 	 */
21140c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
21156b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
21166b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
211791cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
21186b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
211905cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
212005cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
212105cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
212205cbc0dbSFan Du 	 */
21236b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
212405cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
212505cbc0dbSFan Du 		/* Check whether enough time has elaplased for
212605cbc0dbSFan Du 		 * another round of probing.
212705cbc0dbSFan Du 		 */
212805cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
21295d424d5aSJohn Heffner 		return -1;
21305d424d5aSJohn Heffner 	}
21315d424d5aSJohn Heffner 
21325d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
21337f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
21345d424d5aSJohn Heffner 		return -1;
21355d424d5aSJohn Heffner 
213691cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
21375d424d5aSJohn Heffner 		return -1;
213890840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
21395d424d5aSJohn Heffner 		return 0;
21405d424d5aSJohn Heffner 
2141d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2142d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2143d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
21445d424d5aSJohn Heffner 			return -1;
21455d424d5aSJohn Heffner 		else
21465d424d5aSJohn Heffner 			return 0;
21475d424d5aSJohn Heffner 	}
21485d424d5aSJohn Heffner 
2149808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2150808cf9e3SIlya Lesokhin 		return -1;
2151808cf9e3SIlya Lesokhin 
21525d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2153eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
215451456b29SIan Morris 	if (!nskb)
21555d424d5aSJohn Heffner 		return -1;
2156ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, nskb->truesize);
21573ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21585d424d5aSJohn Heffner 
2159fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
216041477662SJakub Kicinski 	skb_copy_decrypted(nskb, skb);
21615d424d5aSJohn Heffner 
21625d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21635d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21644de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21655d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21665d424d5aSJohn Heffner 	nskb->csum = 0;
216798be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21685d424d5aSJohn Heffner 
216950c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21702b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
217150c4817eSIlpo Järvinen 
21725d424d5aSJohn Heffner 	len = 0;
2173234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21745d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21755d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21765d424d5aSJohn Heffner 
21775d424d5aSJohn Heffner 		if (skb->len <= copy) {
21785d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21795d424d5aSJohn Heffner 			 * Throw it away. */
21804de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2181808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2182808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2183808cf9e3SIlya Lesokhin 			 */
2184808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2185888a5c53SWillem de Bruijn 			tcp_skb_collapse_tstamp(nskb, skb);
2186fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21873ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21885d424d5aSJohn Heffner 		} else {
21894de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2190a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21915d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21925d424d5aSJohn Heffner 				skb_pull(skb, copy);
21935d424d5aSJohn Heffner 			} else {
21945d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21955bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21965d424d5aSJohn Heffner 			}
21975d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21985d424d5aSJohn Heffner 		}
21995d424d5aSJohn Heffner 
22005d424d5aSJohn Heffner 		len += copy;
2201234b6860SIlpo Järvinen 
2202234b6860SIlpo Järvinen 		if (len >= probe_size)
2203234b6860SIlpo Järvinen 			break;
22045d424d5aSJohn Heffner 	}
22055bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
22065d424d5aSJohn Heffner 
22075d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
22087faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
22097faee5c0SEric Dumazet 	 */
22105d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
22115d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
22125d424d5aSJohn Heffner 		 * effectively two packets. */
22135d424d5aSJohn Heffner 		tp->snd_cwnd--;
221466f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
22155d424d5aSJohn Heffner 
22165d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
22170e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
22180e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
22195d424d5aSJohn Heffner 
22205d424d5aSJohn Heffner 		return 1;
22215d424d5aSJohn Heffner 	}
22225d424d5aSJohn Heffner 
22235d424d5aSJohn Heffner 	return -1;
22245d424d5aSJohn Heffner }
22255d424d5aSJohn Heffner 
2226864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2227218af599SEric Dumazet {
2228864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2229864e5c09SEric Dumazet 
2230864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2231864e5c09SEric Dumazet 		return false;
2232864e5c09SEric Dumazet 
2233864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2234864e5c09SEric Dumazet 		return false;
2235864e5c09SEric Dumazet 
2236864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2237864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2238864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2239864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2240864e5c09SEric Dumazet 		sock_hold(sk);
2241864e5c09SEric Dumazet 	}
2242864e5c09SEric Dumazet 	return true;
2243218af599SEric Dumazet }
2244218af599SEric Dumazet 
2245f9616c35SEric Dumazet /* TCP Small Queues :
2246f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2247f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2248f9616c35SEric Dumazet  * This allows for :
2249f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2250f9616c35SEric Dumazet  *  - faster recovery
2251f9616c35SEric Dumazet  *  - high rates
2252f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2253f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2254f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2255f9616c35SEric Dumazet  */
2256f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2257f9616c35SEric Dumazet 				  unsigned int factor)
2258f9616c35SEric Dumazet {
225976a9ebe8SEric Dumazet 	unsigned long limit;
2260f9616c35SEric Dumazet 
226176a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
226276a9ebe8SEric Dumazet 		      2 * skb->truesize,
226376a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift);
2264c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
226576a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
22669184d8bbSEric Dumazet 			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2267f9616c35SEric Dumazet 	limit <<= factor;
2268f9616c35SEric Dumazet 
2269a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2270a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
2271a842fe14SEric Dumazet 		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2272a842fe14SEric Dumazet 
2273a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2274a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2275a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2276a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2277a842fe14SEric Dumazet 		 */
2278a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2279a842fe14SEric Dumazet 		limit += extra_bytes;
2280a842fe14SEric Dumazet 	}
228114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
228275c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
228375eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
228475eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
228575eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
228675eefc6cSEric Dumazet 		 */
228775c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
228875eefc6cSEric Dumazet 			return false;
228975eefc6cSEric Dumazet 
22907aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2291f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2292f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2293f9616c35SEric Dumazet 		 * test again the condition.
2294f9616c35SEric Dumazet 		 */
2295f9616c35SEric Dumazet 		smp_mb__after_atomic();
229614afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2297f9616c35SEric Dumazet 			return true;
2298f9616c35SEric Dumazet 	}
2299f9616c35SEric Dumazet 	return false;
2300f9616c35SEric Dumazet }
2301f9616c35SEric Dumazet 
230205b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
230305b055e8SFrancis Yan {
2304628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2305efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
230605b055e8SFrancis Yan 
2307efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2308efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
230905b055e8SFrancis Yan 	tp->chrono_start = now;
231005b055e8SFrancis Yan 	tp->chrono_type = new;
231105b055e8SFrancis Yan }
231205b055e8SFrancis Yan 
231305b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
231405b055e8SFrancis Yan {
231505b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
231605b055e8SFrancis Yan 
231705b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
23180f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
23190f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
232005b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
232105b055e8SFrancis Yan 	 */
232205b055e8SFrancis Yan 	if (type > tp->chrono_type)
232305b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
232405b055e8SFrancis Yan }
232505b055e8SFrancis Yan 
232605b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
232705b055e8SFrancis Yan {
232805b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
232905b055e8SFrancis Yan 
23300f87230dSFrancis Yan 
23310f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
23320f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
23330f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
23340f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
23350f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
23360f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
23370f87230dSFrancis Yan 	 */
233875c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
233905b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
23400f87230dSFrancis Yan 	else if (type == tp->chrono_type)
23410f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
234205b055e8SFrancis Yan }
234305b055e8SFrancis Yan 
23441da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
23451da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
23461da177e4SLinus Torvalds  * window for us.
23471da177e4SLinus Torvalds  *
2348f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2349f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2350f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2351f8269a49SIlpo Järvinen  *
23526ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
23536ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
23546ba8a3b1SNandita Dukkipati 
2355a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2356a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
23571da177e4SLinus Torvalds  */
2358a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2359d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
23601da177e4SLinus Torvalds {
23611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
236292df7b51SDavid S. Miller 	struct sk_buff *skb;
2363c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2364c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23655d424d5aSJohn Heffner 	int result;
23665615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2367605ad7f1SEric Dumazet 	u32 max_segs;
23681da177e4SLinus Torvalds 
2369c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23705d424d5aSJohn Heffner 
2371ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2372d5dd9175SIlpo Järvinen 	if (!push_one) {
23735d424d5aSJohn Heffner 		/* Do MTU probing. */
2374d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2375d5dd9175SIlpo Järvinen 		if (!result) {
2376a2a385d6SEric Dumazet 			return false;
23775d424d5aSJohn Heffner 		} else if (result > 0) {
23785d424d5aSJohn Heffner 			sent_pkts = 1;
23795d424d5aSJohn Heffner 		}
2380d5dd9175SIlpo Järvinen 	}
23815d424d5aSJohn Heffner 
2382ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2383fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2384c8ac3774SHerbert Xu 		unsigned int limit;
2385c8ac3774SHerbert Xu 
238679861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
238779861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
238879861919SEric Dumazet 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
238979861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2390bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
239179861919SEric Dumazet 			goto repair; /* Skip network transmission */
239279861919SEric Dumazet 		}
239379861919SEric Dumazet 
2394218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2395218af599SEric Dumazet 			break;
2396218af599SEric Dumazet 
23975bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2398c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2399c1b4a7e6SDavid S. Miller 
2400b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
24016ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
24026ba8a3b1SNandita Dukkipati 			if (push_one == 2)
24036ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
24046ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
24056ba8a3b1SNandita Dukkipati 			else
2406b68e9f85SHerbert Xu 				break;
24076ba8a3b1SNandita Dukkipati 		}
2408b68e9f85SHerbert Xu 
24095615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
24105615f886SFrancis Yan 			is_rwnd_limited = true;
2411b68e9f85SHerbert Xu 			break;
24125615f886SFrancis Yan 		}
2413b68e9f85SHerbert Xu 
2414d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2415aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2416aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2417aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2418aa93466bSDavid S. Miller 				break;
2419c1b4a7e6SDavid S. Miller 		} else {
2420ca8a2263SNeal Cardwell 			if (!push_one &&
2421605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2422f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2423aa93466bSDavid S. Miller 				break;
2424c1b4a7e6SDavid S. Miller 		}
2425aa93466bSDavid S. Miller 
2426605ad7f1SEric Dumazet 		limit = mss_now;
2427d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2428605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2429605ad7f1SEric Dumazet 						    min_t(unsigned int,
2430605ad7f1SEric Dumazet 							  cwnd_quota,
2431605ad7f1SEric Dumazet 							  max_segs),
2432605ad7f1SEric Dumazet 						    nonagle);
2433605ad7f1SEric Dumazet 
2434605ad7f1SEric Dumazet 		if (skb->len > limit &&
243556483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2436605ad7f1SEric Dumazet 			break;
2437605ad7f1SEric Dumazet 
2438f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
243946d3ceabSEric Dumazet 			break;
2440c9eeec26SEric Dumazet 
2441*1f85e626SEric Dumazet 		/* Argh, we hit an empty skb(), presumably a thread
2442*1f85e626SEric Dumazet 		 * is sleeping in sendmsg()/sk_stream_wait_memory().
2443*1f85e626SEric Dumazet 		 * We do not want to send a pure-ack packet and have
2444*1f85e626SEric Dumazet 		 * a strange looking rtx queue with empty packet(s).
2445*1f85e626SEric Dumazet 		 */
2446*1f85e626SEric Dumazet 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2447*1f85e626SEric Dumazet 			break;
2448*1f85e626SEric Dumazet 
2449d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
24501da177e4SLinus Torvalds 			break;
24511da177e4SLinus Torvalds 
2452ec342325SAndrew Vagin repair:
24531da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
24541da177e4SLinus Torvalds 		 * This call will increment packets_out.
24551da177e4SLinus Torvalds 		 */
245666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
24571da177e4SLinus Torvalds 
24581da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2459a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2460d5dd9175SIlpo Järvinen 
2461d5dd9175SIlpo Järvinen 		if (push_one)
2462d5dd9175SIlpo Järvinen 			break;
24631da177e4SLinus Torvalds 	}
24641da177e4SLinus Torvalds 
24655615f886SFrancis Yan 	if (is_rwnd_limited)
24665615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
24675615f886SFrancis Yan 	else
24685615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
24695615f886SFrancis Yan 
2470aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2471684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2472684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24736ba8a3b1SNandita Dukkipati 
24746ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24756ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2476ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2477d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2478ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2479a2a385d6SEric Dumazet 		return false;
24801da177e4SLinus Torvalds 	}
248175c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24826ba8a3b1SNandita Dukkipati }
24836ba8a3b1SNandita Dukkipati 
2484ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24856ba8a3b1SNandita Dukkipati {
24866ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24876ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2488a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24892ae21cf5SEric Dumazet 	int early_retrans;
24906ba8a3b1SNandita Dukkipati 
24916ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24926ba8a3b1SNandita Dukkipati 	 * finishes.
24936ba8a3b1SNandita Dukkipati 	 */
2494d983ea6fSEric Dumazet 	if (rcu_access_pointer(tp->fastopen_rsk))
24956ba8a3b1SNandita Dukkipati 		return false;
24966ba8a3b1SNandita Dukkipati 
24972ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
24986ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2499b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
25006ba8a3b1SNandita Dukkipati 	 */
25012ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2502bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2503b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2504b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
25056ba8a3b1SNandita Dukkipati 		return false;
25066ba8a3b1SNandita Dukkipati 
2507bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2508f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2509f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
25106ba8a3b1SNandita Dukkipati 	 */
2511bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2512bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
25136ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2514bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2515bb4d991aSYuchung Cheng 		else
2516bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2517bb4d991aSYuchung Cheng 	} else {
2518bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2519bb4d991aSYuchung Cheng 	}
25206ba8a3b1SNandita Dukkipati 
2521a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2522ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2523ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2524ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2525a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2526a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
25276ba8a3b1SNandita Dukkipati 
25283f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
25293f80e08fSEric Dumazet 			     TCP_RTO_MAX, NULL);
25306ba8a3b1SNandita Dukkipati 	return true;
25316ba8a3b1SNandita Dukkipati }
25326ba8a3b1SNandita Dukkipati 
25331f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
25341f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
25351f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
25361f3279aeSEric Dumazet  */
25371f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
25381f3279aeSEric Dumazet 				    const struct sk_buff *skb)
25391f3279aeSEric Dumazet {
254039bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2541c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
25421f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
25431f3279aeSEric Dumazet 		return true;
25441f3279aeSEric Dumazet 	}
25451f3279aeSEric Dumazet 	return false;
25461f3279aeSEric Dumazet }
25471f3279aeSEric Dumazet 
2548b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
25496ba8a3b1SNandita Dukkipati  * retransmit the last segment.
25506ba8a3b1SNandita Dukkipati  */
25516ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
25526ba8a3b1SNandita Dukkipati {
25539b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
25546ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
25556ba8a3b1SNandita Dukkipati 	int pcount;
25566ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
25576ba8a3b1SNandita Dukkipati 
2558b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
255975c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2560b340b264SYuchung Cheng 		pcount = tp->packets_out;
2561b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2562b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2563b340b264SYuchung Cheng 			goto probe_sent;
25646ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25656ba8a3b1SNandita Dukkipati 	}
256675c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2567b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2568b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2569b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2570b2b7af86SYuchung Cheng 			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2571b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2572b2b7af86SYuchung Cheng 		return;
2573b2b7af86SYuchung Cheng 	}
25746ba8a3b1SNandita Dukkipati 
25759b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
25769b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
25779b717a8dSNandita Dukkipati 		goto rearm_timer;
25789b717a8dSNandita Dukkipati 
25791f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25801f3279aeSEric Dumazet 		goto rearm_timer;
25811f3279aeSEric Dumazet 
25826ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25836ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25846ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25856ba8a3b1SNandita Dukkipati 
25866ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
258775c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
258875c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25896cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25906ba8a3b1SNandita Dukkipati 			goto rearm_timer;
259175c119afSEric Dumazet 		skb = skb_rb_next(skb);
25926ba8a3b1SNandita Dukkipati 	}
25936ba8a3b1SNandita Dukkipati 
25946ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25956ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25966ba8a3b1SNandita Dukkipati 
259710d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2598b340b264SYuchung Cheng 		goto rearm_timer;
25996ba8a3b1SNandita Dukkipati 
26009b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
26019b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
26029b717a8dSNandita Dukkipati 
2603b340b264SYuchung Cheng probe_sent:
2604c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2605fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2606fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2607b340b264SYuchung Cheng rearm_timer:
2608fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
26091da177e4SLinus Torvalds }
26101da177e4SLinus Torvalds 
2611a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2612a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2613a762a980SDavid S. Miller  * The socket must be locked by the caller.
2614a762a980SDavid S. Miller  */
26159e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
26169e412ba7SIlpo Järvinen 			       int nonagle)
2617a762a980SDavid S. Miller {
2618726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2619726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2620726e07a8SIlpo Järvinen 	 * all will be happy.
2621726e07a8SIlpo Järvinen 	 */
2622726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2623726e07a8SIlpo Järvinen 		return;
2624726e07a8SIlpo Järvinen 
262599a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
26267450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
26279e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2628a762a980SDavid S. Miller }
2629a762a980SDavid S. Miller 
2630c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2631c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2632c1b4a7e6SDavid S. Miller  */
2633c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2634c1b4a7e6SDavid S. Miller {
2635fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2636c1b4a7e6SDavid S. Miller 
2637c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2638c1b4a7e6SDavid S. Miller 
2639d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2640c1b4a7e6SDavid S. Miller }
2641c1b4a7e6SDavid S. Miller 
26421da177e4SLinus Torvalds /* This function returns the amount that we can raise the
26431da177e4SLinus Torvalds  * usable window based on the following constraints
26441da177e4SLinus Torvalds  *
26451da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
26461da177e4SLinus Torvalds  * 2. We limit memory per socket
26471da177e4SLinus Torvalds  *
26481da177e4SLinus Torvalds  * RFC 1122:
26491da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
26501da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
26511da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
26521da177e4SLinus Torvalds  *
26531da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
26541da177e4SLinus Torvalds  * it at least MSS bytes.
26551da177e4SLinus Torvalds  *
26561da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
26571da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
26581da177e4SLinus Torvalds  *
26591da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
26601da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
26611da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
26621da177e4SLinus Torvalds  * window to always advance by a single byte.
26631da177e4SLinus Torvalds  *
26641da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
26651da177e4SLinus Torvalds  * then this will not be a problem.
26661da177e4SLinus Torvalds  *
26671da177e4SLinus Torvalds  * BSD seems to make the following compromise:
26681da177e4SLinus Torvalds  *
26691da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
26701da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
26711da177e4SLinus Torvalds  *	then set the window to 0.
26721da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
26731da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
26741da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26751da177e4SLinus Torvalds  *
26761da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26771da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26781da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26791da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26801da177e4SLinus Torvalds  * because the pipeline is full.
26811da177e4SLinus Torvalds  *
26821da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26831da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26841da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26851da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26861da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26871da177e4SLinus Torvalds  *
26881da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26891da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26901da177e4SLinus Torvalds  *
26911da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26921da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26931da177e4SLinus Torvalds  */
26941da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26951da177e4SLinus Torvalds {
2696463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
26971da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2698caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
26991da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
27001da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
27011da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
27021da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
27031da177e4SLinus Torvalds 	 */
2704463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
27051da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
270686c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
270786c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
27081da177e4SLinus Torvalds 	int window;
27091da177e4SLinus Torvalds 
271006425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
27111da177e4SLinus Torvalds 		mss = full_space;
271206425c30SEric Dumazet 		if (mss <= 0)
271306425c30SEric Dumazet 			return 0;
271406425c30SEric Dumazet 	}
2715b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2716463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
27171da177e4SLinus Torvalds 
2718b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2719056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2720056834d9SIlpo Järvinen 					       4U * tp->advmss);
27211da177e4SLinus Torvalds 
272286c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
272386c1a045SFlorian Westphal 		 * increase it due to wscale.
272486c1a045SFlorian Westphal 		 */
272586c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
272686c1a045SFlorian Westphal 
272786c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
272886c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
272986c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
273086c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
273186c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
273286c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
273386c1a045SFlorian Westphal 		 */
273486c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
27351da177e4SLinus Torvalds 			return 0;
27361da177e4SLinus Torvalds 	}
27371da177e4SLinus Torvalds 
27381da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
27391da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
27401da177e4SLinus Torvalds 
27411da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
27421da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
27431da177e4SLinus Torvalds 	 */
27441da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
27451da177e4SLinus Torvalds 		window = free_space;
27461da177e4SLinus Torvalds 
27471da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
27481da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
27491da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
27501da177e4SLinus Torvalds 		 */
27511935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
27521da177e4SLinus Torvalds 	} else {
27531935299dSGao Feng 		window = tp->rcv_wnd;
27541da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
27551da177e4SLinus Torvalds 		 * Window clamp already applied above.
27561da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
27571da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
27581da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
27591da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
27601da177e4SLinus Torvalds 		 * is too small.
27611da177e4SLinus Torvalds 		 */
27621da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
27631935299dSGao Feng 			window = rounddown(free_space, mss);
276484565070SJohn Heffner 		else if (mss == full_space &&
2765b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
276684565070SJohn Heffner 			window = free_space;
27671da177e4SLinus Torvalds 	}
27681da177e4SLinus Torvalds 
27691da177e4SLinus Torvalds 	return window;
27701da177e4SLinus Torvalds }
27711da177e4SLinus Torvalds 
2772cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2773082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2774082ac2d5SMartin KaFai Lau {
27750a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27760a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27770a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2778082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2779082ac2d5SMartin KaFai Lau 
27800a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2781082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27822de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27832de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2784082ac2d5SMartin KaFai Lau 	}
2785082ac2d5SMartin KaFai Lau }
2786082ac2d5SMartin KaFai Lau 
27874a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2788f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27891da177e4SLinus Torvalds {
27901da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
279175c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
279213dde04fSWei Yongjun 	int next_skb_size;
27931da177e4SLinus Torvalds 
2794058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27951da177e4SLinus Torvalds 
2796058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
27971da177e4SLinus Torvalds 
2798f8071cdeSEric Dumazet 	if (next_skb_size) {
2799f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2800f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2801f8071cdeSEric Dumazet 				      next_skb_size);
28023b4929f6SEric Dumazet 		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
2803f8071cdeSEric Dumazet 			return false;
2804f8071cdeSEric Dumazet 	}
28052b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2806a6963a6bSIlpo Järvinen 
28071da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
28081da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
28091da177e4SLinus Torvalds 
2810e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
28114de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
28121da177e4SLinus Torvalds 
28131da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
28141da177e4SLinus Torvalds 	 * packet counting does not break.
28151da177e4SLinus Torvalds 	 */
28164828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2817a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2818b7689205SIlpo Järvinen 
2819b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2820ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2821ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2822ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2823b7689205SIlpo Järvinen 
2824797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2825797108d1SIlpo Järvinen 
2826082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2827082ac2d5SMartin KaFai Lau 
282875c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2829f8071cdeSEric Dumazet 	return true;
28301da177e4SLinus Torvalds }
28311da177e4SLinus Torvalds 
283267edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2833a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
28344a17fc3aSIlpo Järvinen {
28354a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2836a2a385d6SEric Dumazet 		return false;
28374a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2838a2a385d6SEric Dumazet 		return false;
28392331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
28404a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2841a2a385d6SEric Dumazet 		return false;
28424a17fc3aSIlpo Järvinen 
2843a2a385d6SEric Dumazet 	return true;
28444a17fc3aSIlpo Järvinen }
28454a17fc3aSIlpo Järvinen 
284667edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
284767edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
284867edfef7SAndi Kleen  */
28494a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
28504a17fc3aSIlpo Järvinen 				     int space)
28514a17fc3aSIlpo Järvinen {
28524a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
28534a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2854a2a385d6SEric Dumazet 	bool first = true;
28554a17fc3aSIlpo Järvinen 
2856e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
28574a17fc3aSIlpo Järvinen 		return;
28584de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
28594a17fc3aSIlpo Järvinen 		return;
28604a17fc3aSIlpo Järvinen 
286175c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
28624a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
28634a17fc3aSIlpo Järvinen 			break;
28644a17fc3aSIlpo Järvinen 
2865a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2866a643b5d4SMartin KaFai Lau 			break;
2867a643b5d4SMartin KaFai Lau 
28684a17fc3aSIlpo Järvinen 		space -= skb->len;
28694a17fc3aSIlpo Järvinen 
28704a17fc3aSIlpo Järvinen 		if (first) {
2871a2a385d6SEric Dumazet 			first = false;
28724a17fc3aSIlpo Järvinen 			continue;
28734a17fc3aSIlpo Järvinen 		}
28744a17fc3aSIlpo Järvinen 
28754a17fc3aSIlpo Järvinen 		if (space < 0)
28764a17fc3aSIlpo Järvinen 			break;
28774a17fc3aSIlpo Järvinen 
28784a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28794a17fc3aSIlpo Järvinen 			break;
28804a17fc3aSIlpo Järvinen 
2881f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2882f8071cdeSEric Dumazet 			break;
28834a17fc3aSIlpo Järvinen 	}
28844a17fc3aSIlpo Järvinen }
28854a17fc3aSIlpo Järvinen 
28861da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28871da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28881da177e4SLinus Torvalds  * error occurred which prevented the send.
28891da177e4SLinus Torvalds  */
289010d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28911da177e4SLinus Torvalds {
28925d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
289310d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28947d227cd2SSridhar Samudrala 	unsigned int cur_mss;
289510d3be56SEric Dumazet 	int diff, len, err;
28961da177e4SLinus Torvalds 
289710d3be56SEric Dumazet 
289810d3be56SEric Dumazet 	/* Inconclusive MTU probe */
289910d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
29005d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
29015d424d5aSJohn Heffner 
29021da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2903caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
29041da177e4SLinus Torvalds 	 */
290514afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2906ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2907ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
29081da177e4SLinus Torvalds 		return -EAGAIN;
29091da177e4SLinus Torvalds 
29101f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
29111f3279aeSEric Dumazet 		return -EBUSY;
29121f3279aeSEric Dumazet 
29131da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
29147f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
29157f582b24SEric Dumazet 			WARN_ON_ONCE(1);
29167f582b24SEric Dumazet 			return -EINVAL;
29177f582b24SEric Dumazet 		}
29181da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
29191da177e4SLinus Torvalds 			return -ENOMEM;
29201da177e4SLinus Torvalds 	}
29211da177e4SLinus Torvalds 
29227d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
29237d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
29247d227cd2SSridhar Samudrala 
29250c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
29267d227cd2SSridhar Samudrala 
29271da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
29281da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
29291da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
29301da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
29311da177e4SLinus Torvalds 	 */
29329d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
29339d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
29341da177e4SLinus Torvalds 		return -EAGAIN;
29351da177e4SLinus Torvalds 
293610d3be56SEric Dumazet 	len = cur_mss * segs;
293710d3be56SEric Dumazet 	if (skb->len > len) {
293875c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
293975c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
29401da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
294102276f3cSIlpo Järvinen 	} else {
2942c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2943c52e2421SEric Dumazet 			return -ENOMEM;
294410d3be56SEric Dumazet 
294510d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
294610d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
294710d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
294810d3be56SEric Dumazet 		if (diff)
294910d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
295010d3be56SEric Dumazet 		if (skb->len < cur_mss)
295110d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
29521da177e4SLinus Torvalds 	}
29531da177e4SLinus Torvalds 
295449213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
295549213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
295649213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
295749213555SDaniel Borkmann 
2958678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2959678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2960678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2961678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2962678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2963678550c6SYuchung Cheng 	tp->total_retrans += segs;
2964fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2965678550c6SYuchung Cheng 
296650bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
296750bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
296850bceae9SThomas Graf 	 * beyond what csum_start can cover.
296950bceae9SThomas Graf 	 */
297050bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
297150bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
297210a81980SEric Dumazet 		struct sk_buff *nskb;
297310a81980SEric Dumazet 
2974e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
297510a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2976c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2977117632e6SEric Dumazet 				     -ENOBUFS;
2978e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2979e2080072SEric Dumazet 
29805889e2c0SYousuk Seung 		if (!err) {
2981a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
29825889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29835889e2c0SYousuk Seung 		}
2984117632e6SEric Dumazet 	} else {
2985c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2986117632e6SEric Dumazet 	}
2987c84a5711SYuchung Cheng 
29887f12422cSYuchung Cheng 	/* To avoid taking spuriously low RTT samples based on a timestamp
29897f12422cSYuchung Cheng 	 * for a transmit that never happened, always mark EVER_RETRANS
29907f12422cSYuchung Cheng 	 */
29917f12422cSYuchung Cheng 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
29927f12422cSYuchung Cheng 
2993a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2994a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2995a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2996a31ad29eSLawrence Brakmo 
2997fc9f3501SEric Dumazet 	if (likely(!err)) {
2998e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2999678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
3000ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3001fc9f3501SEric Dumazet 	}
3002c84a5711SYuchung Cheng 	return err;
300393b174adSYuchung Cheng }
300493b174adSYuchung Cheng 
300510d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
300693b174adSYuchung Cheng {
300793b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
300810d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
30091da177e4SLinus Torvalds 
30101da177e4SLinus Torvalds 	if (err == 0) {
30111da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
30121da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3013e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
30141da177e4SLinus Torvalds 		}
30151da177e4SLinus Torvalds #endif
30161da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
30171da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
30187ae18975SYuchung Cheng 	}
30191da177e4SLinus Torvalds 
30207ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
30211da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
30227faee5c0SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp(skb);
30231da177e4SLinus Torvalds 
30246e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
30256e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
30266e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
30271da177e4SLinus Torvalds 	return err;
30281da177e4SLinus Torvalds }
30291da177e4SLinus Torvalds 
30301da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
30311da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
30321da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
30331da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
30341da177e4SLinus Torvalds  */
30351da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
30361da177e4SLinus Torvalds {
30376687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3038b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
30391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3040840a3cbeSYuchung Cheng 	u32 max_segs;
304161eb55f4SIlpo Järvinen 	int mib_idx;
30426a438bbeSStephen Hemminger 
304345e77d31SIlpo Järvinen 	if (!tp->packets_out)
304445e77d31SIlpo Järvinen 		return;
304545e77d31SIlpo Järvinen 
304675c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3047b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3048ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
304975c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3050dca0aaf8SEric Dumazet 		__u8 sacked;
305110d3be56SEric Dumazet 		int segs;
30521da177e4SLinus Torvalds 
3053218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3054218af599SEric Dumazet 			break;
3055218af599SEric Dumazet 
30566a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
305751456b29SIan Morris 		if (!hole)
30586a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
30596a438bbeSStephen Hemminger 
306010d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
306110d3be56SEric Dumazet 		if (segs <= 0)
30621da177e4SLinus Torvalds 			return;
3063dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3064a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3065a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3066a3d2e9f8SEric Dumazet 		 */
3067a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
30680e1c54c2SIlpo Järvinen 
3069840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3070006f582cSIlpo Järvinen 			break;
30710e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
307251456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
30730e1c54c2SIlpo Järvinen 				hole = skb;
307461eb55f4SIlpo Järvinen 			continue;
30751da177e4SLinus Torvalds 
30760e1c54c2SIlpo Järvinen 		} else {
30770e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30780e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30790e1c54c2SIlpo Järvinen 			else
30800e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30810e1c54c2SIlpo Järvinen 		}
30820e1c54c2SIlpo Järvinen 
30830e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
308461eb55f4SIlpo Järvinen 			continue;
308540b215e5SPavel Emelyanov 
3086f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3087f9616c35SEric Dumazet 			return;
3088f9616c35SEric Dumazet 
308910d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30901da177e4SLinus Torvalds 			return;
309124ab6becSYuchung Cheng 
3092de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30931da177e4SLinus Torvalds 
3094684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3095a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3096a262f0cdSNandita Dukkipati 
309775c119afSEric Dumazet 		if (skb == rtx_head &&
309857dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
30993f80e08fSEric Dumazet 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
31003f421baaSArnaldo Carvalho de Melo 					     inet_csk(sk)->icsk_rto,
31013f80e08fSEric Dumazet 					     TCP_RTO_MAX,
31023f80e08fSEric Dumazet 					     skb);
31031da177e4SLinus Torvalds 	}
31041da177e4SLinus Torvalds }
31051da177e4SLinus Torvalds 
3106d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3107d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3108845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3109845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3110a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3111a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3112d83769a5SEric Dumazet  */
3113a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3114d83769a5SEric Dumazet {
3115e805605cSJohannes Weiner 	int amt;
3116d83769a5SEric Dumazet 
3117d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3118d83769a5SEric Dumazet 		return;
3119d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3120d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3121e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3122e805605cSJohannes Weiner 
3123baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3124baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3125d83769a5SEric Dumazet }
3126d83769a5SEric Dumazet 
3127845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3128845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
31291da177e4SLinus Torvalds  */
31301da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
31311da177e4SLinus Torvalds {
3132845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
31331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31341da177e4SLinus Torvalds 
3135845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3136845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3137845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3138845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
31391da177e4SLinus Torvalds 	 */
314075c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
314175c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
314275c119afSEric Dumazet 
314375c119afSEric Dumazet 	if (tskb) {
3144845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3145845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
31461da177e4SLinus Torvalds 		tp->write_seq++;
314775c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3148845704a5SEric Dumazet 			/* This means tskb was already sent.
3149845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3150845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3151845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3152845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3153845704a5SEric Dumazet 			 */
3154e0d694d6SEric Dumazet 			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3155845704a5SEric Dumazet 			return;
3156845704a5SEric Dumazet 		}
31571da177e4SLinus Torvalds 	} else {
3158845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3159d1edc085SColin Ian King 		if (unlikely(!skb))
3160845704a5SEric Dumazet 			return;
3161d1edc085SColin Ian King 
3162e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3163d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3164a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
31651da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3166e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3167a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
31681da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
31691da177e4SLinus Torvalds 	}
3170845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
31711da177e4SLinus Torvalds }
31721da177e4SLinus Torvalds 
31731da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
31741da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
31751da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
317665bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31771da177e4SLinus Torvalds  */
3178dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31791da177e4SLinus Torvalds {
31801da177e4SLinus Torvalds 	struct sk_buff *skb;
31811da177e4SLinus Torvalds 
31827cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31837cc2b043SGao Feng 
31841da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31851da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31861da177e4SLinus Torvalds 	if (!skb) {
31874e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31881da177e4SLinus Torvalds 		return;
31891da177e4SLinus Torvalds 	}
31901da177e4SLinus Torvalds 
31911da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31921da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3193e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3194a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31959a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
31961da177e4SLinus Torvalds 	/* Send it off. */
3197dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
31984e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3199c24b14c4SSong Liu 
3200c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3201c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3202c24b14c4SSong Liu 	 */
3203c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
32041da177e4SLinus Torvalds }
32051da177e4SLinus Torvalds 
320667edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
320767edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
32081da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
32091da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
32101da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
32111da177e4SLinus Torvalds  */
32121da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
32131da177e4SLinus Torvalds {
32141da177e4SLinus Torvalds 	struct sk_buff *skb;
32151da177e4SLinus Torvalds 
321675c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
321751456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
321875c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
32191da177e4SLinus Torvalds 		return -EFAULT;
32201da177e4SLinus Torvalds 	}
32214de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
32221da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3223e2080072SEric Dumazet 			struct sk_buff *nskb;
3224e2080072SEric Dumazet 
3225e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3226e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3227e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
322851456b29SIan Morris 			if (!nskb)
32291da177e4SLinus Torvalds 				return -ENOMEM;
3230e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
323175c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3232f4a775d1SEric Dumazet 			__skb_header_release(nskb);
323375c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3234ab4e846aSEric Dumazet 			sk_wmem_queued_add(sk, nskb->truesize);
32353ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
32361da177e4SLinus Torvalds 			skb = nskb;
32371da177e4SLinus Torvalds 		}
32381da177e4SLinus Torvalds 
32394de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3240735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
32411da177e4SLinus Torvalds 	}
3242dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
32431da177e4SLinus Torvalds }
32441da177e4SLinus Torvalds 
32454aea39c1SEric Dumazet /**
32464aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
32474aea39c1SEric Dumazet  * sk: listener socket
32484aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
32494aea39c1SEric Dumazet  * req: request_sock pointer
32504aea39c1SEric Dumazet  *
32514aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
32524aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
32534aea39c1SEric Dumazet  */
32545d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3255e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3256ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3257b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
32581da177e4SLinus Torvalds {
32592e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
32605d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
326180f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
32625d062de7SEric Dumazet 	struct tcp_out_options opts;
32635d062de7SEric Dumazet 	struct sk_buff *skb;
3264bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
32655d062de7SEric Dumazet 	struct tcphdr *th;
3266f5fff5dcSTom Quetchenbach 	int mss;
3267a842fe14SEric Dumazet 	u64 now;
32681da177e4SLinus Torvalds 
3269ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
32704aea39c1SEric Dumazet 	if (unlikely(!skb)) {
32714aea39c1SEric Dumazet 		dst_release(dst);
32721da177e4SLinus Torvalds 		return NULL;
32734aea39c1SEric Dumazet 	}
32741da177e4SLinus Torvalds 	/* Reserve space for headers. */
32751da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32761da177e4SLinus Torvalds 
3277b3d05147SEric Dumazet 	switch (synack_type) {
3278b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32799e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3280b3d05147SEric Dumazet 		break;
3281b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3282b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3283b3d05147SEric Dumazet 		 * to avoid false sharing.
3284b3d05147SEric Dumazet 		 */
3285b3d05147SEric Dumazet 		break;
3286b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3287ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3288ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3289ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3290ca6fb065SEric Dumazet 		 */
3291ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3292b3d05147SEric Dumazet 		break;
3293ca6fb065SEric Dumazet 	}
32944aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
32951da177e4SLinus Torvalds 
32963541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3297f5fff5dcSTom Quetchenbach 
329833ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3299a842fe14SEric Dumazet 	now = tcp_clock_ns();
33008b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
33018b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
3302200ecef6SEric Dumazet 		skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
33038b5f12d0SFlorian Westphal 	else
33048b5f12d0SFlorian Westphal #endif
33059e450c1eSYuchung Cheng 	{
3306a842fe14SEric Dumazet 		skb->skb_mstamp_ns = now;
33079e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
33089e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
33099e450c1eSYuchung Cheng 	}
331080f03e27SEric Dumazet 
331180f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
331280f03e27SEric Dumazet 	rcu_read_lock();
3313fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
331480f03e27SEric Dumazet #endif
331558d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
331660e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
331760e2a778SUrsula Braun 					     foc) + sizeof(*th);
331833ad798cSAdam Langley 
3319aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3320aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
33211da177e4SLinus Torvalds 
3322ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
33231da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
33241da177e4SLinus Torvalds 	th->syn = 1;
33251da177e4SLinus Torvalds 	th->ack = 1;
33266ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3327b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3328634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3329e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
33303b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
33313b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
33328336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
33338336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
33341da177e4SLinus Torvalds 
33351da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3336ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
33375d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
33381da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
333990bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3340cfb6eeb4SYOSHIFUJI Hideaki 
3341cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3342cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
334380f03e27SEric Dumazet 	if (md5)
3344bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
334539f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
334680f03e27SEric Dumazet 	rcu_read_unlock();
3347cfb6eeb4SYOSHIFUJI Hideaki #endif
3348cfb6eeb4SYOSHIFUJI Hideaki 
3349a842fe14SEric Dumazet 	skb->skb_mstamp_ns = now;
3350a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3351a842fe14SEric Dumazet 
33521da177e4SLinus Torvalds 	return skb;
33531da177e4SLinus Torvalds }
33544bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
33551da177e4SLinus Torvalds 
335681164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
335781164413SDaniel Borkmann {
335881164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
335981164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
336081164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
336181164413SDaniel Borkmann 
336281164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
336381164413SDaniel Borkmann 		return;
336481164413SDaniel Borkmann 
336581164413SDaniel Borkmann 	rcu_read_lock();
336681164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
336781164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
336881164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
336981164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
337081164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
337181164413SDaniel Borkmann 	}
337281164413SDaniel Borkmann 	rcu_read_unlock();
337381164413SDaniel Borkmann }
337481164413SDaniel Borkmann 
337567edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3376f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
33771da177e4SLinus Torvalds {
3378cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
33791da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33801da177e4SLinus Torvalds 	__u8 rcv_wscale;
338113d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
33821da177e4SLinus Torvalds 
33831da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33841da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33851da177e4SLinus Torvalds 	 */
33865d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33875d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33885d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33891da177e4SLinus Torvalds 
3390cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
339100db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3392cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3393cfb6eeb4SYOSHIFUJI Hideaki #endif
3394cfb6eeb4SYOSHIFUJI Hideaki 
33951da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
33961da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
33971da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
33981da177e4SLinus Torvalds 	tp->max_window = 0;
33995d424d5aSJohn Heffner 	tcp_mtup_init(sk);
34001da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
34011da177e4SLinus Torvalds 
340281164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
340381164413SDaniel Borkmann 
34041da177e4SLinus Torvalds 	if (!tp->window_clamp)
34051da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
34063541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3407f5fff5dcSTom Quetchenbach 
34081da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
34091da177e4SLinus Torvalds 
3410e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3411e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3412e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3413e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3414e88c64f0SHagen Paul Pfeifer 
341513d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
341613d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
341713d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
341813d3b1ebSLawrence Brakmo 
3419ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
34201da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
34211da177e4SLinus Torvalds 				  &tp->rcv_wnd,
34221da177e4SLinus Torvalds 				  &tp->window_clamp,
34239bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
342431d12926Slaurent chavey 				  &rcv_wscale,
342513d3b1ebSLawrence Brakmo 				  rcv_wnd);
34261da177e4SLinus Torvalds 
34271da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
34281da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
34291da177e4SLinus Torvalds 
34301da177e4SLinus Torvalds 	sk->sk_err = 0;
34311da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
34321da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3433ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
34347f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
34351da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
34361da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
343733f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3438e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3439ee995283SPavel Emelyanov 
3440ee995283SPavel Emelyanov 	if (likely(!tp->repair))
34411da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3442c7781a6eSAndrew Vagin 	else
344370eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3444ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
34457db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
34461da177e4SLinus Torvalds 
34478550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3448463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
34491da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
34501da177e4SLinus Torvalds }
34511da177e4SLinus Torvalds 
3452783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3453783237e8SYuchung Cheng {
3454783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3455783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3456783237e8SYuchung Cheng 
3457783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3458f4a775d1SEric Dumazet 	__skb_header_release(skb);
3459ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
3460783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
34610f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3462783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3463783237e8SYuchung Cheng }
3464783237e8SYuchung Cheng 
3465783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3466783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3467783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3468783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3469783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3470783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3471783237e8SYuchung Cheng  */
3472783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3473783237e8SYuchung Cheng {
3474783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3475783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3476065263f4SWei Wang 	int space, err = 0;
3477355a901eSEric Dumazet 	struct sk_buff *syn_data;
3478783237e8SYuchung Cheng 
347967da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3480065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3481783237e8SYuchung Cheng 		goto fallback;
3482783237e8SYuchung Cheng 
3483783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3484783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3485783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3486783237e8SYuchung Cheng 	 */
34873541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34883541f9e8SEric Dumazet 
34891b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3490783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3491783237e8SYuchung Cheng 
3492f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3493f5ddcbbbSEric Dumazet 
3494f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3495f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3496f5ddcbbbSEric Dumazet 
3497eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3498355a901eSEric Dumazet 	if (!syn_data)
3499783237e8SYuchung Cheng 		goto fallback;
3500355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3501355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
350207e100f9SEric Dumazet 	if (space) {
350307e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
350457be5bdaSAl Viro 					    &fo->data->msg_iter);
350557be5bdaSAl Viro 		if (unlikely(!copied)) {
3506ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3507355a901eSEric Dumazet 			kfree_skb(syn_data);
3508783237e8SYuchung Cheng 			goto fallback;
3509783237e8SYuchung Cheng 		}
351057be5bdaSAl Viro 		if (copied != space) {
351157be5bdaSAl Viro 			skb_trim(syn_data, copied);
351257be5bdaSAl Viro 			space = copied;
351357be5bdaSAl Viro 		}
3514f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
351507e100f9SEric Dumazet 	}
3516355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3517355a901eSEric Dumazet 	if (space == fo->size)
3518355a901eSEric Dumazet 		fo->data = NULL;
3519355a901eSEric Dumazet 	fo->copied = space;
3520783237e8SYuchung Cheng 
3521355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
35220f87230dSFrancis Yan 	if (syn_data->len)
35230f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3524355a901eSEric Dumazet 
3525355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3526355a901eSEric Dumazet 
3527d3edd06eSEric Dumazet 	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3528355a901eSEric Dumazet 
3529355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3530355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3531355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3532355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3533431a9124SEric Dumazet 	 */
3534355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3535355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3536355a901eSEric Dumazet 	if (!err) {
353767da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
353875c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3539f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3540783237e8SYuchung Cheng 		goto done;
3541783237e8SYuchung Cheng 	}
3542783237e8SYuchung Cheng 
354375c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
354475c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3545b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3546b5b7db8dSEric Dumazet 
3547783237e8SYuchung Cheng fallback:
3548783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3549783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3550783237e8SYuchung Cheng 		fo->cookie.len = 0;
3551783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3552783237e8SYuchung Cheng 	if (err)
3553783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3554783237e8SYuchung Cheng done:
3555783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3556783237e8SYuchung Cheng 	return err;
3557783237e8SYuchung Cheng }
3558783237e8SYuchung Cheng 
355967edfef7SAndi Kleen /* Build a SYN and send it off. */
35601da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
35611da177e4SLinus Torvalds {
35621da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35631da177e4SLinus Torvalds 	struct sk_buff *buff;
3564ee586811SEric Paris 	int err;
35651da177e4SLinus Torvalds 
3566de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
35678ba60924SEric Dumazet 
35688ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
35698ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
35708ba60924SEric Dumazet 
35711da177e4SLinus Torvalds 	tcp_connect_init(sk);
35721da177e4SLinus Torvalds 
35732b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
35742b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
35752b916477SAndrey Vagin 		return 0;
35762b916477SAndrey Vagin 	}
35772b916477SAndrey Vagin 
3578eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3579355a901eSEric Dumazet 	if (unlikely(!buff))
35801da177e4SLinus Torvalds 		return -ENOBUFS;
35811da177e4SLinus Torvalds 
3582a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
35839a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35849a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3585783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3586735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
358775c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35881da177e4SLinus Torvalds 
3589783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3590783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3591783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3592ee586811SEric Paris 	if (err == -ECONNREFUSED)
3593ee586811SEric Paris 		return err;
3594bd37a088SWei Yongjun 
3595bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3596bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3597bd37a088SWei Yongjun 	 */
3598e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3599bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3600b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3601b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3602e0d694d6SEric Dumazet 		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3603b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3604b5b7db8dSEric Dumazet 	}
360581cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
36061da177e4SLinus Torvalds 
36071da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
36083f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
36093f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
36101da177e4SLinus Torvalds 	return 0;
36111da177e4SLinus Torvalds }
36124bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
36131da177e4SLinus Torvalds 
36141da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
36151da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
36161da177e4SLinus Torvalds  * for details.
36171da177e4SLinus Torvalds  */
36181da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
36191da177e4SLinus Torvalds {
3620463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3621463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
36221da177e4SLinus Torvalds 	unsigned long timeout;
36231da177e4SLinus Torvalds 
36241da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3625463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
36261da177e4SLinus Torvalds 		int max_ato = HZ / 2;
36271da177e4SLinus Torvalds 
362831954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
3629056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
36301da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
36311da177e4SLinus Torvalds 
36321da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
36331da177e4SLinus Torvalds 
36341da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3635463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
36361da177e4SLinus Torvalds 		 * directly.
36371da177e4SLinus Torvalds 		 */
3638740b0f18SEric Dumazet 		if (tp->srtt_us) {
3639740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3640740b0f18SEric Dumazet 					TCP_DELACK_MIN);
36411da177e4SLinus Torvalds 
36421da177e4SLinus Torvalds 			if (rtt < max_ato)
36431da177e4SLinus Torvalds 				max_ato = rtt;
36441da177e4SLinus Torvalds 		}
36451da177e4SLinus Torvalds 
36461da177e4SLinus Torvalds 		ato = min(ato, max_ato);
36471da177e4SLinus Torvalds 	}
36481da177e4SLinus Torvalds 
36491da177e4SLinus Torvalds 	/* Stay within the limit we were given */
36501da177e4SLinus Torvalds 	timeout = jiffies + ato;
36511da177e4SLinus Torvalds 
36521da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3653463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
36541da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
36551da177e4SLinus Torvalds 		 * send ACK now.
36561da177e4SLinus Torvalds 		 */
3657463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3658463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
36591da177e4SLinus Torvalds 			tcp_send_ack(sk);
36601da177e4SLinus Torvalds 			return;
36611da177e4SLinus Torvalds 		}
36621da177e4SLinus Torvalds 
3663463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3664463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
36651da177e4SLinus Torvalds 	}
3666463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3667463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3668463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
36691da177e4SLinus Torvalds }
36701da177e4SLinus Torvalds 
36711da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
36722987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
36731da177e4SLinus Torvalds {
36741da177e4SLinus Torvalds 	struct sk_buff *buff;
36751da177e4SLinus Torvalds 
3676058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3677058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3678058dc334SIlpo Järvinen 		return;
3679058dc334SIlpo Järvinen 
36801da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
36811da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
36821da177e4SLinus Torvalds 	 * sock.
36831da177e4SLinus Torvalds 	 */
36847450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36857450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36867450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3687463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3688463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36893f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36903f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36911da177e4SLinus Torvalds 		return;
36921da177e4SLinus Torvalds 	}
36931da177e4SLinus Torvalds 
36941da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36951da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3696a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
36971da177e4SLinus Torvalds 
369898781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
369998781965SEric Dumazet 	 * too much.
370098781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
370198781965SEric Dumazet 	 */
370298781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
370398781965SEric Dumazet 
37041da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
37052987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
37061da177e4SLinus Torvalds }
370727cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
37082987babbSYuchung Cheng 
37092987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
37102987babbSYuchung Cheng {
37112987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
37121da177e4SLinus Torvalds }
37131da177e4SLinus Torvalds 
37141da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
37151da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
37161da177e4SLinus Torvalds  *
37171da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
37181da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
37191da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
37201da177e4SLinus Torvalds  *
37211da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
37221da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
37231da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
37241da177e4SLinus Torvalds  */
3725e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
37261da177e4SLinus Torvalds {
37271da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37281da177e4SLinus Torvalds 	struct sk_buff *skb;
37291da177e4SLinus Torvalds 
37301da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
37317450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
37327450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
373351456b29SIan Morris 	if (!skb)
37341da177e4SLinus Torvalds 		return -1;
37351da177e4SLinus Torvalds 
37361da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
37371da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
37381da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
37391da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
37401da177e4SLinus Torvalds 	 * send it.
37411da177e4SLinus Torvalds 	 */
3742a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3743e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
37447450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
37451da177e4SLinus Torvalds }
37461da177e4SLinus Torvalds 
3747385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3748ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3749ee995283SPavel Emelyanov {
3750ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3751ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
37529a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3753e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3754ee995283SPavel Emelyanov 	}
3755ee995283SPavel Emelyanov }
3756ee995283SPavel Emelyanov 
375767edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3758e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
37591da177e4SLinus Torvalds {
37601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37611da177e4SLinus Torvalds 	struct sk_buff *skb;
37621da177e4SLinus Torvalds 
3763058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3764058dc334SIlpo Järvinen 		return -1;
3765058dc334SIlpo Järvinen 
376600db4124SIan Morris 	skb = tcp_send_head(sk);
376700db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
37681da177e4SLinus Torvalds 		int err;
37690c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
377090840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
37711da177e4SLinus Torvalds 
37721da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
37731da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
37741da177e4SLinus Torvalds 
37751da177e4SLinus Torvalds 		/* We are probing the opening of a window
37761da177e4SLinus Torvalds 		 * but the window size is != 0
37771da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
37781da177e4SLinus Torvalds 		 */
37791da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
37801da177e4SLinus Torvalds 		    skb->len > mss) {
37811da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
37824de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
378375c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
378475c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37851da177e4SLinus Torvalds 				return -1;
37861da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37875bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37881da177e4SLinus Torvalds 
37894de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3790dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
379166f5fe62SIlpo Järvinen 		if (!err)
379266f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37931da177e4SLinus Torvalds 		return err;
37941da177e4SLinus Torvalds 	} else {
379533f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3796e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3797e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
37981da177e4SLinus Torvalds 	}
37991da177e4SLinus Torvalds }
38001da177e4SLinus Torvalds 
38011da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
38021da177e4SLinus Torvalds  * a partial packet else a zero probe.
38031da177e4SLinus Torvalds  */
38041da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
38051da177e4SLinus Torvalds {
3806463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
38071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3808c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3809c1d5674fSYuchung Cheng 	unsigned long timeout;
38101da177e4SLinus Torvalds 	int err;
38111da177e4SLinus Torvalds 
3812e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
38131da177e4SLinus Torvalds 
381475c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
38151da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
38166687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3817463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
38181da177e4SLinus Torvalds 		return;
38191da177e4SLinus Torvalds 	}
38201da177e4SLinus Torvalds 
3821c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
38221da177e4SLinus Torvalds 	if (err <= 0) {
3823c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3824463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
3825c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
38261da177e4SLinus Torvalds 	} else {
38271da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
3828c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
38291da177e4SLinus Torvalds 		 */
3830c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
38311da177e4SLinus Torvalds 	}
3832c1d5674fSYuchung Cheng 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
38331da177e4SLinus Torvalds }
38345db92c99SOctavian Purdila 
3835ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
38365db92c99SOctavian Purdila {
38375db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
38385db92c99SOctavian Purdila 	struct flowi fl;
38395db92c99SOctavian Purdila 	int res;
38405db92c99SOctavian Purdila 
384158d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3842b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
38435db92c99SOctavian Purdila 	if (!res) {
384490bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
384502a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
38467e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
38477e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3848cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
38495db92c99SOctavian Purdila 	}
38505db92c99SOctavian Purdila 	return res;
38515db92c99SOctavian Purdila }
38525db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3853