xref: /linux/net/ipv4/tcp_output.c (revision 0aadc73995d08f6b0dc061c14a564ffa46f5914e)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
41eda7acddSPeter Krystad #include <net/mptcp.h>
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds #include <linux/compiler.h>
445a0e3ad6STejun Heo #include <linux/gfp.h>
451da177e4SLinus Torvalds #include <linux/module.h>
4660e2a778SUrsula Braun #include <linux/static_key.h>
471da177e4SLinus Torvalds 
48e086101bSCong Wang #include <trace/events/tcp.h>
4935089bb2SDavid S. Miller 
509799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
519799ccb0SEric Dumazet  * ensuring monotically increasing values.
529799ccb0SEric Dumazet  */
539799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
549799ccb0SEric Dumazet {
559799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
569799ccb0SEric Dumazet 
575f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
58e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
599799ccb0SEric Dumazet }
609799ccb0SEric Dumazet 
6146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
63519855c5SWilliam Allen Simpson 
6467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6575c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
666ff03ac3SIlpo Järvinen {
676ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
686ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
709e412ba7SIlpo Järvinen 
71e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
728512430eSIlpo Järvinen 
7375c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7475c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7575c119afSEric Dumazet 
7685369750SCambda Zhu 	if (tp->highest_sack == NULL)
7785369750SCambda Zhu 		tp->highest_sack = skb;
7885369750SCambda Zhu 
7966f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
80bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
81750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
82f19c29e3SYuchung Cheng 
83f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
84f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
854bfe744fSEric Dumazet 	tcp_check_space(sk);
866a5dc9e5SEric Dumazet }
871da177e4SLinus Torvalds 
88a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
89a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
901da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
911da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
921da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
931da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
941da177e4SLinus Torvalds  */
95cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
961da177e4SLinus Torvalds {
97cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
989e412ba7SIlpo Järvinen 
99a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
100a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
101a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1021da177e4SLinus Torvalds 		return tp->snd_nxt;
1031da177e4SLinus Torvalds 	else
10490840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1081da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1111da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1121da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1131da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1141da177e4SLinus Torvalds  *    large MSS.
1151da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1161da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1171da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1181da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1191da177e4SLinus Torvalds  *    probably even Jumbo".
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1221da177e4SLinus Torvalds {
1231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
124cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1251da177e4SLinus Torvalds 	int mss = tp->advmss;
1261da177e4SLinus Torvalds 
1270dbaee3bSDavid S. Miller 	if (dst) {
1280dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1290dbaee3bSDavid S. Miller 
1300dbaee3bSDavid S. Miller 		if (metric < mss) {
1310dbaee3bSDavid S. Miller 			mss = metric;
1321da177e4SLinus Torvalds 			tp->advmss = mss;
1331da177e4SLinus Torvalds 		}
1340dbaee3bSDavid S. Miller 	}
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds 	return (__u16)mss;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1406f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1416f021c62SEric Dumazet  */
1426f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1431da177e4SLinus Torvalds {
144463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1456f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
14640570375SEric Dumazet 	u32 cwnd = tcp_snd_cwnd(tp);
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1511da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1521da177e4SLinus Torvalds 
153463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1541da177e4SLinus Torvalds 		cwnd >>= 1;
15540570375SEric Dumazet 	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
156c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1571da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
162cf533ea5SEric Dumazet 				struct sock *sk)
1631da177e4SLinus Torvalds {
164463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
165d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1661da177e4SLinus Torvalds 
16705c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16805c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16905c5a46dSNeal Cardwell 
1704a41f453SWei Wang 	tp->lsndtime = now;
1714d8f24eeSWei Wang 
1724d8f24eeSWei Wang 	/* If it is a reply for ato after last received
173562b1fdfSHaiyang Zhang 	 * packet, increase pingpong count.
1744d8f24eeSWei Wang 	 */
1754d8f24eeSWei Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
176562b1fdfSHaiyang Zhang 		inet_csk_inc_pingpong_cnt(sk);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
17967edfef7SAndi Kleen /* Account for an ACK we sent. */
180059217c1SNeal Cardwell static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
1811da177e4SLinus Torvalds {
1825d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1835d9f4262SEric Dumazet 
1842b195850SEric Dumazet 	if (unlikely(tp->compressed_ack)) {
185200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
1862b195850SEric Dumazet 			      tp->compressed_ack);
1872b195850SEric Dumazet 		tp->compressed_ack = 0;
1885d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1895d9f4262SEric Dumazet 			__sock_put(sk);
1905d9f4262SEric Dumazet 	}
19127cde44aSYuchung Cheng 
19227cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19327cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
194059217c1SNeal Cardwell 	tcp_dec_quickack_mode(sk);
195463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1961da177e4SLinus Torvalds }
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1991da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2001da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2011da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2021da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2031da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2041da177e4SLinus Torvalds  */
205ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2061da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20731d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20831d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2091da177e4SLinus Torvalds {
2101da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2131da177e4SLinus Torvalds 	if (*window_clamp == 0)
214589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2151da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2181da177e4SLinus Torvalds 	if (space > mss)
219589c49cbSGao Feng 		space = rounddown(space, mss);
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22215d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22315d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22415d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22515d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22615d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22715d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2281da177e4SLinus Torvalds 	 */
2290f1e4d06SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
2301da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23115d99e02SRick Jones 	else
232a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
233a337531bSYuchung Cheng 
234a337531bSYuchung Cheng 	if (init_rcv_wnd)
235a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23615d99e02SRick Jones 
23719bf6261SEric Dumazet 	*rcv_wscale = 0;
2381da177e4SLinus Torvalds 	if (wscale_ok) {
239589c49cbSGao Feng 		/* Set window scaling on max possible window */
24002739545SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
2411227c177SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
242316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24319bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24419bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2451da177e4SLinus Torvalds 	}
2461da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
247589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2481da177e4SLinus Torvalds }
2494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2501da177e4SLinus Torvalds 
2511da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2521da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2531da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2541da177e4SLinus Torvalds  * frame.
2551da177e4SLinus Torvalds  */
25640efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2571da177e4SLinus Torvalds {
2581da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
259b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
260e2142825SMenglong Dong 	u32 old_win = tp->rcv_wnd;
261e2142825SMenglong Dong 	u32 cur_win, new_win;
2621da177e4SLinus Torvalds 
263e2142825SMenglong Dong 	/* Make the window 0 if we failed to queue the data because we
264e2142825SMenglong Dong 	 * are out of memory. The window is temporary, so we don't store
265e2142825SMenglong Dong 	 * it on the socket.
266e2142825SMenglong Dong 	 */
267e2142825SMenglong Dong 	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
268e2142825SMenglong Dong 		return 0;
269e2142825SMenglong Dong 
270e2142825SMenglong Dong 	cur_win = tcp_receive_window(tp);
271e2142825SMenglong Dong 	new_win = __tcp_select_window(sk);
2721da177e4SLinus Torvalds 	if (new_win < cur_win) {
2731da177e4SLinus Torvalds 		/* Danger Will Robinson!
2741da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2751da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2761da177e4SLinus Torvalds 		 * window in time.  --DaveM
2771da177e4SLinus Torvalds 		 *
2781da177e4SLinus Torvalds 		 * Relax Will Robinson.
2791da177e4SLinus Torvalds 		 */
280b650d953Smfreemon@cloudflare.com 		if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
281b650d953Smfreemon@cloudflare.com 			/* Never shrink the offered window */
2828e165e20SFlorian Westphal 			if (new_win == 0)
283b650d953Smfreemon@cloudflare.com 				NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
284607bfbf2SPatrick McHardy 			new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2851da177e4SLinus Torvalds 		}
286b650d953Smfreemon@cloudflare.com 	}
287b650d953Smfreemon@cloudflare.com 
2881da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2891da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2921da177e4SLinus Torvalds 	 * scaled window.
2931da177e4SLinus Torvalds 	 */
294ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
295b650d953Smfreemon@cloudflare.com 	    READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
2961da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2971da177e4SLinus Torvalds 	else
2981da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3011da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3021da177e4SLinus Torvalds 
30331770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3048e165e20SFlorian Westphal 	if (new_win == 0) {
30531770e34SFlorian Westphal 		tp->pred_flags = 0;
3068e165e20SFlorian Westphal 		if (old_win)
307b650d953Smfreemon@cloudflare.com 			NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
3088e165e20SFlorian Westphal 	} else if (old_win == 0) {
309b650d953Smfreemon@cloudflare.com 		NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
3108e165e20SFlorian Westphal 	}
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds 	return new_win;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
31567edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
316735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
317bdf1ee5dSIlpo Järvinen {
31830e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31930e502a3SDaniel Borkmann 
3204de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
321bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3224de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32391b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
32491b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
32530e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
326bdf1ee5dSIlpo Järvinen }
327bdf1ee5dSIlpo Järvinen 
32867edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
329735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
330bdf1ee5dSIlpo Järvinen {
331bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
33291b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
3334785a667SKuniyuki Iwashima 	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
33491b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
335f7b3bec6SFlorian Westphal 
336f7b3bec6SFlorian Westphal 	if (!use_ecn) {
337f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
338f7b3bec6SFlorian Westphal 
339f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
340f7b3bec6SFlorian Westphal 			use_ecn = true;
341f7b3bec6SFlorian Westphal 	}
342bdf1ee5dSIlpo Järvinen 
343bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
344f7b3bec6SFlorian Westphal 
345f7b3bec6SFlorian Westphal 	if (use_ecn) {
3464de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
347bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34891b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34930e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
350bdf1ee5dSIlpo Järvinen 	}
351bdf1ee5dSIlpo Järvinen }
352bdf1ee5dSIlpo Järvinen 
35349213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
35449213555SDaniel Borkmann {
35512b8d9caSKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
35649213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
35749213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35849213555SDaniel Borkmann 		 */
35949213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
36049213555SDaniel Borkmann }
36149213555SDaniel Borkmann 
362735d3831SFlorian Westphal static void
3636ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
364bdf1ee5dSIlpo Järvinen {
3656ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
366bdf1ee5dSIlpo Järvinen 		th->ece = 1;
367bdf1ee5dSIlpo Järvinen }
368bdf1ee5dSIlpo Järvinen 
36967edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
37067edfef7SAndi Kleen  * be sent.
37167edfef7SAndi Kleen  */
372735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
373ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
374bdf1ee5dSIlpo Järvinen {
375bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
376bdf1ee5dSIlpo Järvinen 
377bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
378bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
379bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
380bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
381bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
382bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
383bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
384ea1627c2SEric Dumazet 				th->cwr = 1;
385bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
386bdf1ee5dSIlpo Järvinen 			}
38730e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
388bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
389bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
390bdf1ee5dSIlpo Järvinen 		}
391bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
392ea1627c2SEric Dumazet 			th->ece = 1;
393bdf1ee5dSIlpo Järvinen 	}
394bdf1ee5dSIlpo Järvinen }
395bdf1ee5dSIlpo Järvinen 
396e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
397e870a8efSIlpo Järvinen  * auto increment end seqno.
398e870a8efSIlpo Järvinen  */
399e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
400e870a8efSIlpo Järvinen {
4012e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
402e870a8efSIlpo Järvinen 
4034de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
404e870a8efSIlpo Järvinen 
405cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
406e870a8efSIlpo Järvinen 
407e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
408a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
409e870a8efSIlpo Järvinen 		seq++;
410e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
411e870a8efSIlpo Järvinen }
412e870a8efSIlpo Järvinen 
413a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41433f5f57eSIlpo Järvinen {
41533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41633f5f57eSIlpo Järvinen }
41733f5f57eSIlpo Järvinen 
4183b65abb8SLeonard Crestez #define OPTION_SACK_ADVERTISE	BIT(0)
4193b65abb8SLeonard Crestez #define OPTION_TS		BIT(1)
4203b65abb8SLeonard Crestez #define OPTION_MD5		BIT(2)
4213b65abb8SLeonard Crestez #define OPTION_WSCALE		BIT(3)
4223b65abb8SLeonard Crestez #define OPTION_FAST_OPEN_COOKIE	BIT(8)
4233b65abb8SLeonard Crestez #define OPTION_SMC		BIT(9)
4243b65abb8SLeonard Crestez #define OPTION_MPTCP		BIT(10)
42560e2a778SUrsula Braun 
42660e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
42760e2a778SUrsula Braun {
42860e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
42960e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
43060e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
43160e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
43260e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
43360e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
43460e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
43560e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
43660e2a778SUrsula Braun 		}
43760e2a778SUrsula Braun 	}
43860e2a778SUrsula Braun #endif
43960e2a778SUrsula Braun }
44033ad798cSAdam Langley 
44133ad798cSAdam Langley struct tcp_out_options {
4422100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4432100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
44433ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
44533ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
446bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
447331fca43SMartin KaFai Lau 	u8 bpf_opt_len;		/* length of BPF hdr option */
448bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4492100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4502100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
451eda7acddSPeter Krystad 	struct mptcp_out_options mptcp;
45233ad798cSAdam Langley };
45333ad798cSAdam Langley 
454ea66758cSPaolo Abeni static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
455ea66758cSPaolo Abeni 				struct tcp_sock *tp,
456fa3fe2b1SFlorian Westphal 				struct tcp_out_options *opts)
457eda7acddSPeter Krystad {
458eda7acddSPeter Krystad #if IS_ENABLED(CONFIG_MPTCP)
459eda7acddSPeter Krystad 	if (unlikely(OPTION_MPTCP & opts->options))
460ea66758cSPaolo Abeni 		mptcp_write_options(th, ptr, tp, &opts->mptcp);
461eda7acddSPeter Krystad #endif
462eda7acddSPeter Krystad }
463eda7acddSPeter Krystad 
464331fca43SMartin KaFai Lau #ifdef CONFIG_CGROUP_BPF
4650813a841SMartin KaFai Lau static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
4660813a841SMartin KaFai Lau 					enum tcp_synack_type synack_type)
4670813a841SMartin KaFai Lau {
4680813a841SMartin KaFai Lau 	if (unlikely(!skb))
4690813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
4700813a841SMartin KaFai Lau 
4710813a841SMartin KaFai Lau 	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
4720813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
4730813a841SMartin KaFai Lau 
4740813a841SMartin KaFai Lau 	return 0;
4750813a841SMartin KaFai Lau }
4760813a841SMartin KaFai Lau 
477331fca43SMartin KaFai Lau /* req, syn_skb and synack_type are used when writing synack */
478331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
479331fca43SMartin KaFai Lau 				  struct request_sock *req,
480331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
481331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
482331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
483331fca43SMartin KaFai Lau 				  unsigned int *remaining)
484331fca43SMartin KaFai Lau {
4850813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
4860813a841SMartin KaFai Lau 	int err;
4870813a841SMartin KaFai Lau 
488331fca43SMartin KaFai Lau 	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
489331fca43SMartin KaFai Lau 					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
490331fca43SMartin KaFai Lau 	    !*remaining)
491331fca43SMartin KaFai Lau 		return;
492331fca43SMartin KaFai Lau 
4930813a841SMartin KaFai Lau 	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
4940813a841SMartin KaFai Lau 
4950813a841SMartin KaFai Lau 	/* init sock_ops */
4960813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
4970813a841SMartin KaFai Lau 
4980813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
4990813a841SMartin KaFai Lau 
5000813a841SMartin KaFai Lau 	if (req) {
5010813a841SMartin KaFai Lau 		/* The listen "sk" cannot be passed here because
5020813a841SMartin KaFai Lau 		 * it is not locked.  It would not make too much
5030813a841SMartin KaFai Lau 		 * sense to do bpf_setsockopt(listen_sk) based
5040813a841SMartin KaFai Lau 		 * on individual connection request also.
5050813a841SMartin KaFai Lau 		 *
5060813a841SMartin KaFai Lau 		 * Thus, "req" is passed here and the cgroup-bpf-progs
5070813a841SMartin KaFai Lau 		 * of the listen "sk" will be run.
5080813a841SMartin KaFai Lau 		 *
5090813a841SMartin KaFai Lau 		 * "req" is also used here for fastopen even the "sk" here is
5100813a841SMartin KaFai Lau 		 * a fullsock "child" sk.  It is to keep the behavior
5110813a841SMartin KaFai Lau 		 * consistent between fastopen and non-fastopen on
5120813a841SMartin KaFai Lau 		 * the bpf programming side.
513331fca43SMartin KaFai Lau 		 */
5140813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5150813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5160813a841SMartin KaFai Lau 	} else {
5170813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5180813a841SMartin KaFai Lau 
5190813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5200813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5210813a841SMartin KaFai Lau 	}
5220813a841SMartin KaFai Lau 
5230813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5240813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = *remaining;
5250813a841SMartin KaFai Lau 	/* tcp_current_mss() does not pass a skb */
5260813a841SMartin KaFai Lau 	if (skb)
5270813a841SMartin KaFai Lau 		bpf_skops_init_skb(&sock_ops, skb, 0);
5280813a841SMartin KaFai Lau 
5290813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5300813a841SMartin KaFai Lau 
5310813a841SMartin KaFai Lau 	if (err || sock_ops.remaining_opt_len == *remaining)
5320813a841SMartin KaFai Lau 		return;
5330813a841SMartin KaFai Lau 
5340813a841SMartin KaFai Lau 	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
5350813a841SMartin KaFai Lau 	/* round up to 4 bytes */
5360813a841SMartin KaFai Lau 	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
5370813a841SMartin KaFai Lau 
5380813a841SMartin KaFai Lau 	*remaining -= opts->bpf_opt_len;
539331fca43SMartin KaFai Lau }
540331fca43SMartin KaFai Lau 
541331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
542331fca43SMartin KaFai Lau 				    struct request_sock *req,
543331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
544331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
545331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
546331fca43SMartin KaFai Lau {
5470813a841SMartin KaFai Lau 	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
5480813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
5490813a841SMartin KaFai Lau 	int err;
5500813a841SMartin KaFai Lau 
5510813a841SMartin KaFai Lau 	if (likely(!max_opt_len))
552331fca43SMartin KaFai Lau 		return;
553331fca43SMartin KaFai Lau 
5540813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
5550813a841SMartin KaFai Lau 
5560813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
5570813a841SMartin KaFai Lau 
5580813a841SMartin KaFai Lau 	if (req) {
5590813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5600813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5610813a841SMartin KaFai Lau 	} else {
5620813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5630813a841SMartin KaFai Lau 
5640813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5650813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5660813a841SMartin KaFai Lau 	}
5670813a841SMartin KaFai Lau 
5680813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5690813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = max_opt_len;
5700813a841SMartin KaFai Lau 	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
5710813a841SMartin KaFai Lau 	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
5720813a841SMartin KaFai Lau 
5730813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5740813a841SMartin KaFai Lau 
5750813a841SMartin KaFai Lau 	if (err)
5760813a841SMartin KaFai Lau 		nr_written = 0;
5770813a841SMartin KaFai Lau 	else
5780813a841SMartin KaFai Lau 		nr_written = max_opt_len - sock_ops.remaining_opt_len;
5790813a841SMartin KaFai Lau 
5800813a841SMartin KaFai Lau 	if (nr_written < max_opt_len)
5810813a841SMartin KaFai Lau 		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
5820813a841SMartin KaFai Lau 		       max_opt_len - nr_written);
583331fca43SMartin KaFai Lau }
584331fca43SMartin KaFai Lau #else
585331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
586331fca43SMartin KaFai Lau 				  struct request_sock *req,
587331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
588331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
589331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
590331fca43SMartin KaFai Lau 				  unsigned int *remaining)
591331fca43SMartin KaFai Lau {
592331fca43SMartin KaFai Lau }
593331fca43SMartin KaFai Lau 
594331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
595331fca43SMartin KaFai Lau 				    struct request_sock *req,
596331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
597331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
598331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
599331fca43SMartin KaFai Lau {
600331fca43SMartin KaFai Lau }
601331fca43SMartin KaFai Lau #endif
602331fca43SMartin KaFai Lau 
60367edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
60467edfef7SAndi Kleen  *
60567edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
606fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
607fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
6088e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
609fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
610fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
611fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
612fd6149d3SIlpo Järvinen  *
613fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
614fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
615fd6149d3SIlpo Järvinen  */
616ea66758cSPaolo Abeni static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
617bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
618bd0388aeSWilliam Allen Simpson {
619ea66758cSPaolo Abeni 	__be32 *ptr = (__be32 *)(th + 1);
6202100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
621bd0388aeSWilliam Allen Simpson 
622bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
6231a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
6241a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
625bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
626bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
62733ad798cSAdam Langley 		ptr += 4;
62833ad798cSAdam Langley 	}
62933ad798cSAdam Langley 
630fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
631fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
632fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
633fd6149d3SIlpo Järvinen 			       opts->mss);
634fd6149d3SIlpo Järvinen 	}
635fd6149d3SIlpo Järvinen 
636bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
637bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
63833ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
63933ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
64033ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
64133ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
642bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
64333ad798cSAdam Langley 		} else {
644496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
64540efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
64640efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
64740efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
64840efc6faSStephen Hemminger 		}
64933ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
65033ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
65133ad798cSAdam Langley 	}
65233ad798cSAdam Langley 
653bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
65433ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
65533ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
65633ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
65733ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
65833ad798cSAdam Langley 	}
65933ad798cSAdam Langley 
660bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
66133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
66233ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
66333ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
66433ad798cSAdam Langley 			       opts->ws);
66533ad798cSAdam Langley 	}
66633ad798cSAdam Langley 
66733ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
66833ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
66933ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
67040efc6faSStephen Hemminger 		int this_sack;
67140efc6faSStephen Hemminger 
67240efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
67340efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
67440efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
67533ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
67640efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
6772de979bdSStephen Hemminger 
67833ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
67933ad798cSAdam Langley 		     ++this_sack) {
68040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
68140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
68240efc6faSStephen Hemminger 		}
6832de979bdSStephen Hemminger 
68440efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
68540efc6faSStephen Hemminger 	}
6862100c8d2SYuchung Cheng 
6872100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
6882100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
6897f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
6907f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
6912100c8d2SYuchung Cheng 
6927f9b838bSDaniel Lee 		if (foc->exp) {
6937f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
6947f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
6952100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
6967f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
6977f9b838bSDaniel Lee 		} else {
6987f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
6997f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
7007f9b838bSDaniel Lee 			*p++ = len;
7012100c8d2SYuchung Cheng 		}
7027f9b838bSDaniel Lee 
7037f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
7047f9b838bSDaniel Lee 		if ((len & 3) == 2) {
7057f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
7067f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
7077f9b838bSDaniel Lee 		}
7087f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
7092100c8d2SYuchung Cheng 	}
71060e2a778SUrsula Braun 
71160e2a778SUrsula Braun 	smc_options_write(ptr, &options);
712eda7acddSPeter Krystad 
713ea66758cSPaolo Abeni 	mptcp_options_write(th, ptr, tp, opts);
71460e2a778SUrsula Braun }
71560e2a778SUrsula Braun 
71660e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
71760e2a778SUrsula Braun 			   struct tcp_out_options *opts,
71860e2a778SUrsula Braun 			   unsigned int *remaining)
71960e2a778SUrsula Braun {
72060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
72160e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
72260e2a778SUrsula Braun 		if (tp->syn_smc) {
72360e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
72460e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
72560e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
72660e2a778SUrsula Braun 			}
72760e2a778SUrsula Braun 		}
72860e2a778SUrsula Braun 	}
72960e2a778SUrsula Braun #endif
73060e2a778SUrsula Braun }
73160e2a778SUrsula Braun 
73260e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
73360e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
73460e2a778SUrsula Braun 				struct tcp_out_options *opts,
73560e2a778SUrsula Braun 				unsigned int *remaining)
73660e2a778SUrsula Braun {
73760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
73860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
73960e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
74060e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
74160e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
74260e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
74360e2a778SUrsula Braun 			}
74460e2a778SUrsula Braun 		}
74560e2a778SUrsula Braun 	}
74660e2a778SUrsula Braun #endif
74740efc6faSStephen Hemminger }
74840efc6faSStephen Hemminger 
749cec37a6eSPeter Krystad static void mptcp_set_option_cond(const struct request_sock *req,
750cec37a6eSPeter Krystad 				  struct tcp_out_options *opts,
751cec37a6eSPeter Krystad 				  unsigned int *remaining)
752cec37a6eSPeter Krystad {
753cec37a6eSPeter Krystad 	if (rsk_is_mptcp(req)) {
754cec37a6eSPeter Krystad 		unsigned int size;
755cec37a6eSPeter Krystad 
756cec37a6eSPeter Krystad 		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
757cec37a6eSPeter Krystad 			if (*remaining >= size) {
758cec37a6eSPeter Krystad 				opts->options |= OPTION_MPTCP;
759cec37a6eSPeter Krystad 				*remaining -= size;
760cec37a6eSPeter Krystad 			}
761cec37a6eSPeter Krystad 		}
762cec37a6eSPeter Krystad 	}
763cec37a6eSPeter Krystad }
764cec37a6eSPeter Krystad 
76567edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
76667edfef7SAndi Kleen  * network wire format yet.
76767edfef7SAndi Kleen  */
76895c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
76933ad798cSAdam Langley 				struct tcp_out_options *opts,
770cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
771cf533ea5SEric Dumazet {
77233ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
77395c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
774783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
77533ad798cSAdam Langley 
7768c2320e8SEric Dumazet 	*md5 = NULL;
777cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
778459837b5SDmitry Safonov 	if (static_branch_unlikely(&tcp_md5_needed.key) &&
7796015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
78033ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
78133ad798cSAdam Langley 		if (*md5) {
78233ad798cSAdam Langley 			opts->options |= OPTION_MD5;
783bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
784cfb6eeb4SYOSHIFUJI Hideaki 		}
7858c2320e8SEric Dumazet 	}
786cfb6eeb4SYOSHIFUJI Hideaki #endif
78733ad798cSAdam Langley 
78833ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
78933ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
79033ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
79133ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
79233ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
79333ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
79433ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
79533ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
79633ad798cSAdam Langley 	 * going out.  */
79733ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
798bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
79933ad798cSAdam Langley 
8003666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
80133ad798cSAdam Langley 		opts->options |= OPTION_TS;
802614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
80333ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
804bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
80533ad798cSAdam Langley 	}
8063666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
80733ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
80889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
809bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
81033ad798cSAdam Langley 	}
8113666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
81233ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
813b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
814bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
81533ad798cSAdam Langley 	}
81633ad798cSAdam Langley 
817783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
8182646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
8192646c831SDaniel Lee 
8202646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
8212646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
822783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
823783237e8SYuchung Cheng 		if (remaining >= need) {
824783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
825783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
826783237e8SYuchung Cheng 			remaining -= need;
827783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
8282646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
829783237e8SYuchung Cheng 		}
830783237e8SYuchung Cheng 	}
831bd0388aeSWilliam Allen Simpson 
83260e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
83360e2a778SUrsula Braun 
834cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
835cec37a6eSPeter Krystad 		unsigned int size;
836cec37a6eSPeter Krystad 
837cc7972eaSChristoph Paasch 		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
838cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
839cec37a6eSPeter Krystad 			remaining -= size;
840cec37a6eSPeter Krystad 		}
841cec37a6eSPeter Krystad 	}
842cec37a6eSPeter Krystad 
843331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
844331fca43SMartin KaFai Lau 
845bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
84633ad798cSAdam Langley }
84733ad798cSAdam Langley 
84867edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
84960e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
85060e2a778SUrsula Braun 				       struct request_sock *req,
85195c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
85233ad798cSAdam Langley 				       struct tcp_out_options *opts,
85380f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
854e114e1e8SEric Dumazet 				       struct tcp_fastopen_cookie *foc,
855331fca43SMartin KaFai Lau 				       enum tcp_synack_type synack_type,
856331fca43SMartin KaFai Lau 				       struct sk_buff *syn_skb)
8574957faadSWilliam Allen Simpson {
85833ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
85995c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
86033ad798cSAdam Langley 
86133ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
86280f03e27SEric Dumazet 	if (md5) {
86333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
8644957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
8654957faadSWilliam Allen Simpson 
8664957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
8674957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
8684957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
8694957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
8704957faadSWilliam Allen Simpson 		 */
871e114e1e8SEric Dumazet 		if (synack_type != TCP_SYNACK_COOKIE)
872de213e5eSEric Dumazet 			ireq->tstamp_ok &= !ireq->sack_ok;
87333ad798cSAdam Langley 	}
87433ad798cSAdam Langley #endif
87533ad798cSAdam Langley 
8764957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
87733ad798cSAdam Langley 	opts->mss = mss;
8784957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
87933ad798cSAdam Langley 
88033ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
88133ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
88289e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
8834957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
88433ad798cSAdam Langley 	}
885de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
88633ad798cSAdam Langley 		opts->options |= OPTION_TS;
887614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
888614e8316SEric Dumazet 			      tcp_rsk(req)->ts_off;
889eba20811SEric Dumazet 		opts->tsecr = READ_ONCE(req->ts_recent);
8904957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
89133ad798cSAdam Langley 	}
89233ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
89333ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
894de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
8954957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
89633ad798cSAdam Langley 	}
8977f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
8987f9b838bSDaniel Lee 		u32 need = foc->len;
8997f9b838bSDaniel Lee 
9007f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
9017f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
9028336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
9038336886fSJerry Chu 		if (remaining >= need) {
9048336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
9058336886fSJerry Chu 			opts->fastopen_cookie = foc;
9068336886fSJerry Chu 			remaining -= need;
9078336886fSJerry Chu 		}
9088336886fSJerry Chu 	}
9094957faadSWilliam Allen Simpson 
910cec37a6eSPeter Krystad 	mptcp_set_option_cond(req, opts, &remaining);
911cec37a6eSPeter Krystad 
91260e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
91360e2a778SUrsula Braun 
914331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
915331fca43SMartin KaFai Lau 			      synack_type, opts, &remaining);
916331fca43SMartin KaFai Lau 
9174957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
91833ad798cSAdam Langley }
91933ad798cSAdam Langley 
92067edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
92167edfef7SAndi Kleen  * final wire format yet.
92267edfef7SAndi Kleen  */
92395c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
92433ad798cSAdam Langley 					struct tcp_out_options *opts,
925cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
926cf533ea5SEric Dumazet {
92733ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
92895c96174SEric Dumazet 	unsigned int size = 0;
929cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
93033ad798cSAdam Langley 
9315843ef42SAndi Kleen 	opts->options = 0;
9325843ef42SAndi Kleen 
9338c2320e8SEric Dumazet 	*md5 = NULL;
93433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
935459837b5SDmitry Safonov 	if (static_branch_unlikely(&tcp_md5_needed.key) &&
9366015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
93733ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
9388c2320e8SEric Dumazet 		if (*md5) {
93933ad798cSAdam Langley 			opts->options |= OPTION_MD5;
94033ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
94133ad798cSAdam Langley 		}
9428c2320e8SEric Dumazet 	}
94333ad798cSAdam Langley #endif
94433ad798cSAdam Langley 
94533ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
94633ad798cSAdam Langley 		opts->options |= OPTION_TS;
947614e8316SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
948614e8316SEric Dumazet 				tp->tsoffset : 0;
94933ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
95033ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
95133ad798cSAdam Langley 	}
95233ad798cSAdam Langley 
953cec37a6eSPeter Krystad 	/* MPTCP options have precedence over SACK for the limited TCP
954cec37a6eSPeter Krystad 	 * option space because a MPTCP connection would be forced to
955cec37a6eSPeter Krystad 	 * fall back to regular TCP if a required multipath option is
956cec37a6eSPeter Krystad 	 * missing. SACK still gets a chance to use whatever space is
957cec37a6eSPeter Krystad 	 * left.
958cec37a6eSPeter Krystad 	 */
959cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
960cec37a6eSPeter Krystad 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
961cec37a6eSPeter Krystad 		unsigned int opt_size = 0;
962cec37a6eSPeter Krystad 
963cec37a6eSPeter Krystad 		if (mptcp_established_options(sk, skb, &opt_size, remaining,
964cec37a6eSPeter Krystad 					      &opts->mptcp)) {
965cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
966cec37a6eSPeter Krystad 			size += opt_size;
967cec37a6eSPeter Krystad 		}
968cec37a6eSPeter Krystad 	}
969cec37a6eSPeter Krystad 
970cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
971cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
97295c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
9739cfcca23SMat Martineau 		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
9749cfcca23SMat Martineau 					 TCPOLEN_SACK_PERBLOCK))
9759cfcca23SMat Martineau 			return size;
9769cfcca23SMat Martineau 
97733ad798cSAdam Langley 		opts->num_sack_blocks =
97895c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
97933ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
98033ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
9819cfcca23SMat Martineau 
98233ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
98333ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
98433ad798cSAdam Langley 	}
98533ad798cSAdam Langley 
986331fca43SMartin KaFai Lau 	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
987331fca43SMartin KaFai Lau 					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
988331fca43SMartin KaFai Lau 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
989331fca43SMartin KaFai Lau 
990331fca43SMartin KaFai Lau 		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
991331fca43SMartin KaFai Lau 
992331fca43SMartin KaFai Lau 		size = MAX_TCP_OPTION_SPACE - remaining;
993331fca43SMartin KaFai Lau 	}
994331fca43SMartin KaFai Lau 
99533ad798cSAdam Langley 	return size;
99640efc6faSStephen Hemminger }
9971da177e4SLinus Torvalds 
99846d3ceabSEric Dumazet 
99946d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
100046d3ceabSEric Dumazet  *
100146d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
100246d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
100346d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
100446d3ceabSEric Dumazet  *
100546d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
100646d3ceabSEric Dumazet  * needs to be reallocated in a driver.
10078e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
100846d3ceabSEric Dumazet  *
100946d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
101046d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
101146d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
101246d3ceabSEric Dumazet  */
101346d3ceabSEric Dumazet struct tsq_tasklet {
101446d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
101546d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
101646d3ceabSEric Dumazet };
101746d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
101846d3ceabSEric Dumazet 
101973a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
10206f458dfbSEric Dumazet {
10216f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
10226f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1023f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1024f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
1025f9616c35SEric Dumazet 
1026f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
102740570375SEric Dumazet 		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
10283a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
1029f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
10303a91d29fSKoichiro Den 		}
1031f9616c35SEric Dumazet 
1032f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1033bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
10346f458dfbSEric Dumazet 	}
1035f9616c35SEric Dumazet }
103673a6bab5SEric Dumazet 
103773a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
103873a6bab5SEric Dumazet {
103973a6bab5SEric Dumazet 	bh_lock_sock(sk);
104073a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
104173a6bab5SEric Dumazet 		tcp_tsq_write(sk);
104273a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
104373a6bab5SEric Dumazet 		sock_hold(sk);
104473a6bab5SEric Dumazet 	bh_unlock_sock(sk);
104573a6bab5SEric Dumazet }
104646d3ceabSEric Dumazet /*
10478e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
104846d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
10498e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
105046d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
105146d3ceabSEric Dumazet  */
1052c6533ca8SAllen Pais static void tcp_tasklet_func(struct tasklet_struct *t)
105346d3ceabSEric Dumazet {
1054c6533ca8SAllen Pais 	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
105546d3ceabSEric Dumazet 	LIST_HEAD(list);
105646d3ceabSEric Dumazet 	unsigned long flags;
105746d3ceabSEric Dumazet 	struct list_head *q, *n;
105846d3ceabSEric Dumazet 	struct tcp_sock *tp;
105946d3ceabSEric Dumazet 	struct sock *sk;
106046d3ceabSEric Dumazet 
106146d3ceabSEric Dumazet 	local_irq_save(flags);
106246d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
106346d3ceabSEric Dumazet 	local_irq_restore(flags);
106446d3ceabSEric Dumazet 
106546d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
106646d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
106746d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
106846d3ceabSEric Dumazet 
106946d3ceabSEric Dumazet 		sk = (struct sock *)tp;
10700a9648f1SEric Dumazet 		smp_mb__before_atomic();
10717aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
10727aa5470cSEric Dumazet 
10736f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
107446d3ceabSEric Dumazet 		sk_free(sk);
107546d3ceabSEric Dumazet 	}
107646d3ceabSEric Dumazet }
107746d3ceabSEric Dumazet 
107840fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
107940fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
108040fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
1081133c4c0dSEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED |	\
1082133c4c0dSEric Dumazet 			  TCPF_ACK_DEFERRED)
108346d3ceabSEric Dumazet /**
108446d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
108546d3ceabSEric Dumazet  * @sk: socket
108646d3ceabSEric Dumazet  *
108746d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
108846d3ceabSEric Dumazet  * actions before socket release.
108946d3ceabSEric Dumazet  */
109046d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
109146d3ceabSEric Dumazet {
1092fac30731SEric Dumazet 	unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1093fac30731SEric Dumazet 	unsigned long nflags;
109446d3ceabSEric Dumazet 
10956f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
10966f458dfbSEric Dumazet 	do {
10976f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
10986f458dfbSEric Dumazet 			return;
10996f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
1100fac30731SEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
11016f458dfbSEric Dumazet 
110273a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
110373a6bab5SEric Dumazet 		tcp_tsq_write(sk);
110473a6bab5SEric Dumazet 		__sock_put(sk);
110573a6bab5SEric Dumazet 	}
1106c3f9b018SEric Dumazet 
110740fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
11086f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
1109144d56e9SEric Dumazet 		__sock_put(sk);
1110144d56e9SEric Dumazet 	}
111140fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
11126f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
1113144d56e9SEric Dumazet 		__sock_put(sk);
1114144d56e9SEric Dumazet 	}
111540fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
11164fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1117144d56e9SEric Dumazet 		__sock_put(sk);
1118144d56e9SEric Dumazet 	}
1119133c4c0dSEric Dumazet 	if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
1120133c4c0dSEric Dumazet 		tcp_send_ack(sk);
112146d3ceabSEric Dumazet }
112246d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
112346d3ceabSEric Dumazet 
112446d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
112546d3ceabSEric Dumazet {
112646d3ceabSEric Dumazet 	int i;
112746d3ceabSEric Dumazet 
112846d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
112946d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
113046d3ceabSEric Dumazet 
113146d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
1132c6533ca8SAllen Pais 		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
113346d3ceabSEric Dumazet 	}
113446d3ceabSEric Dumazet }
113546d3ceabSEric Dumazet 
113646d3ceabSEric Dumazet /*
113746d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
11388e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
113946d3ceabSEric Dumazet  * hold qdisc lock.
114046d3ceabSEric Dumazet  */
1141d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
114246d3ceabSEric Dumazet {
114346d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
114446d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1145408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
1146b548b17aSEric Dumazet 	struct tsq_tasklet *tsq;
1147b548b17aSEric Dumazet 	bool empty;
11489b462d02SEric Dumazet 
11499b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
11509b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
11519b462d02SEric Dumazet 	 */
115214afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
11539b462d02SEric Dumazet 
11549b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
11559b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
11569b462d02SEric Dumazet 	 * This gives :
11579b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
11589b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
11599b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
11609b462d02SEric Dumazet 	 */
116114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
11629b462d02SEric Dumazet 		goto out;
116346d3ceabSEric Dumazet 
1164b548b17aSEric Dumazet 	oval = smp_load_acquire(&sk->sk_tsq_flags);
1165b548b17aSEric Dumazet 	do {
1166408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1167408f0a6cSEric Dumazet 			goto out;
1168408f0a6cSEric Dumazet 
116973a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1170b548b17aSEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
1171408f0a6cSEric Dumazet 
117246d3ceabSEric Dumazet 	/* queue this socket to tasklet queue */
117346d3ceabSEric Dumazet 	local_irq_save(flags);
1174903ceff7SChristoph Lameter 	tsq = this_cpu_ptr(&tsq_tasklet);
1175a9b204d1SEric Dumazet 	empty = list_empty(&tsq->head);
117646d3ceabSEric Dumazet 	list_add(&tp->tsq_node, &tsq->head);
1177a9b204d1SEric Dumazet 	if (empty)
117846d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
117946d3ceabSEric Dumazet 	local_irq_restore(flags);
11809b462d02SEric Dumazet 	return;
11819b462d02SEric Dumazet out:
11829b462d02SEric Dumazet 	sk_free(sk);
118346d3ceabSEric Dumazet }
118446d3ceabSEric Dumazet 
118573a6bab5SEric Dumazet /* Note: Called under soft irq.
118673a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
1187218af599SEric Dumazet  */
1188218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1189218af599SEric Dumazet {
1190218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1191218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
1192218af599SEric Dumazet 
119373a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
119473a6bab5SEric Dumazet 	sock_put(sk);
1195218af599SEric Dumazet 
1196218af599SEric Dumazet 	return HRTIMER_NORESTART;
1197218af599SEric Dumazet }
1198218af599SEric Dumazet 
1199a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1200a7a25630SEric Dumazet 				      u64 prior_wstamp)
1201e2080072SEric Dumazet {
1202ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1203ab408b6dSEric Dumazet 
1204ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
120528b24f90SEric Dumazet 		unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
1206ab408b6dSEric Dumazet 
1207ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
1208ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
1209ab408b6dSEric Dumazet 		 * this is a minor annoyance.
1210ab408b6dSEric Dumazet 		 */
121176a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1212a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1213a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1214a7a25630SEric Dumazet 
1215a7a25630SEric Dumazet 			/* take into account OS jitter */
1216a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
1217a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
1218ab408b6dSEric Dumazet 		}
1219ab408b6dSEric Dumazet 	}
1220e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1221e2080072SEric Dumazet }
1222e2080072SEric Dumazet 
122305e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
122405e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1225dd2e0b86SEric Dumazet INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
122605e22e83SEric Dumazet 
12271da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
12281da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
12291da177e4SLinus Torvalds  * transmission and possible later retransmissions.
12301da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
12311da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
12321da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
12331da177e4SLinus Torvalds  * device.
12341da177e4SLinus Torvalds  *
12351da177e4SLinus Torvalds  * We are working here with either a clone of the original
12361da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
12371da177e4SLinus Torvalds  */
12382987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
12392987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
12401da177e4SLinus Torvalds {
12416687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1242dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1243dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1244dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
124533ad798cSAdam Langley 	struct tcp_out_options opts;
124695c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
12478c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1248cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
12491da177e4SLinus Torvalds 	struct tcphdr *th;
1250a7a25630SEric Dumazet 	u64 prior_wstamp;
12511da177e4SLinus Torvalds 	int err;
12521da177e4SLinus Torvalds 
1253dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
12546f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
12557f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
12567f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1257a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
1258ccdbb6e9SEric Dumazet 	if (clone_it) {
12598c72c65bSEric Dumazet 		oskb = skb;
1260e2080072SEric Dumazet 
1261e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1262e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1263e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1264dfb4b9dcSDavid S. Miller 			else
1265e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1266e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1267e2080072SEric Dumazet 
1268dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1269dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1270b738a185SEric Dumazet 		/* retransmit skbs might have a non zero value in skb->dev
1271b738a185SEric Dumazet 		 * because skb->dev is aliased with skb->rbnode.rb_left
1272b738a185SEric Dumazet 		 */
1273b738a185SEric Dumazet 		skb->dev = NULL;
1274dfb4b9dcSDavid S. Miller 	}
12755f6188a8SEric Dumazet 
1276dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1277dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
127833ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
12791da177e4SLinus Torvalds 
1280051ba674SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
128133ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1282051ba674SEric Dumazet 	} else {
128333ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
128433ad798cSAdam Langley 							   &md5);
1285051ba674SEric Dumazet 		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1286051ba674SEric Dumazet 		 * at receiver : This slightly improve GRO performance.
1287051ba674SEric Dumazet 		 * Note that we do not force the PSH flag for non GSO packets,
1288051ba674SEric Dumazet 		 * because they might be sent under high congestion events,
1289051ba674SEric Dumazet 		 * and in this case it is better to delay the delivery of 1-MSS
1290051ba674SEric Dumazet 		 * packets and thus the corresponding ACK packet that would
1291051ba674SEric Dumazet 		 * release the following packet.
1292051ba674SEric Dumazet 		 */
1293051ba674SEric Dumazet 		if (tcp_skb_pcount(skb) > 1)
1294051ba674SEric Dumazet 			tcb->tcp_flags |= TCPHDR_PSH;
1295051ba674SEric Dumazet 	}
129633ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
12971da177e4SLinus Torvalds 
1298726e9e8bSEric Dumazet 	/* We set skb->ooo_okay to one if this packet can select
1299726e9e8bSEric Dumazet 	 * a different TX queue than prior packets of this flow,
1300726e9e8bSEric Dumazet 	 * to avoid self inflicted reorders.
1301726e9e8bSEric Dumazet 	 * The 'other' queue decision is based on current cpu number
1302726e9e8bSEric Dumazet 	 * if XPS is enabled, or sk->sk_txhash otherwise.
1303726e9e8bSEric Dumazet 	 * We can switch to another (and better) queue if:
1304726e9e8bSEric Dumazet 	 * 1) No packet with payload is in qdisc/device queues.
1305726e9e8bSEric Dumazet 	 *    Delays in TX completion can defeat the test
1306726e9e8bSEric Dumazet 	 *    even if packets were already sent.
1307726e9e8bSEric Dumazet 	 * 2) Or rtx queue is empty.
1308726e9e8bSEric Dumazet 	 *    This mitigates above case if ACK packets for
1309726e9e8bSEric Dumazet 	 *    all prior packets were already processed.
1310547669d4SEric Dumazet 	 */
1311726e9e8bSEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) ||
1312726e9e8bSEric Dumazet 			tcp_rtx_queue_empty(sk);
13131da177e4SLinus Torvalds 
131438ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
131538ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
131638ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
131738ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
131838ab52e8SEric Dumazet 	 */
131938ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
132038ab52e8SEric Dumazet 
1321aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1322aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
132346d3ceabSEric Dumazet 
132446d3ceabSEric Dumazet 	skb_orphan(skb);
132546d3ceabSEric Dumazet 	skb->sk = sk;
13261d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
132714afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
13281da177e4SLinus Torvalds 
1329eb44ad4eSEric Dumazet 	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
1330c3a2e837SJulian Anastasov 
13311da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1332ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1333c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1334c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
13351da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
13362987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1337df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
13384de075e0SEric Dumazet 					tcb->tcp_flags);
1339dfb4b9dcSDavid S. Miller 
13401da177e4SLinus Torvalds 	th->check		= 0;
13411da177e4SLinus Torvalds 	th->urg_ptr		= 0;
13421da177e4SLinus Torvalds 
134333f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
13447691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
13457691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
13461da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
13471da177e4SLinus Torvalds 			th->urg = 1;
13487691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
13490eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
13507691367dSHerbert Xu 			th->urg = 1;
13517691367dSHerbert Xu 		}
13521da177e4SLinus Torvalds 	}
13531da177e4SLinus Torvalds 
135451466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1355ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1356ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1357ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1358ea1627c2SEric Dumazet 	} else {
1359ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1360ea1627c2SEric Dumazet 		 * is never scaled.
1361ea1627c2SEric Dumazet 		 */
1362ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1363ea1627c2SEric Dumazet 	}
1364fa3fe2b1SFlorian Westphal 
1365ea66758cSPaolo Abeni 	tcp_options_write(th, tp, &opts);
1366fa3fe2b1SFlorian Westphal 
1367cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1368cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1369cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1370aba54656SEric Dumazet 		sk_gso_disable(sk);
1371bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
137239f8e58eSEric Dumazet 					       md5, sk, skb);
1373cfb6eeb4SYOSHIFUJI Hideaki 	}
1374cfb6eeb4SYOSHIFUJI Hideaki #endif
1375cfb6eeb4SYOSHIFUJI Hideaki 
1376331fca43SMartin KaFai Lau 	/* BPF prog is the last one writing header option */
1377331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1378331fca43SMartin KaFai Lau 
1379dd2e0b86SEric Dumazet 	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1380dd2e0b86SEric Dumazet 			   tcp_v6_send_check, tcp_v4_send_check,
1381dd2e0b86SEric Dumazet 			   sk, skb);
13821da177e4SLinus Torvalds 
13834de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1384059217c1SNeal Cardwell 		tcp_event_ack_sent(sk, rcv_nxt);
13851da177e4SLinus Torvalds 
1386a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1387cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1388a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1389ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1390a44d6eacSMartin KaFai Lau 	}
13911da177e4SLinus Torvalds 
1392bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1393aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1394aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
13951da177e4SLinus Torvalds 
13962efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
13970ae5b43dSYuchung Cheng 	skb_set_hash_from_sk(skb, sk);
1398f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1399cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1400f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1401cd7d8498SEric Dumazet 
1402d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1403971f10ecSEric Dumazet 
1404971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1405971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1406971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1407971f10ecSEric Dumazet 
1408a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1409a842fe14SEric Dumazet 
141005e22e83SEric Dumazet 	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
141105e22e83SEric Dumazet 				 inet6_csk_xmit, ip_queue_xmit,
141205e22e83SEric Dumazet 				 sk, skb, &inet->cork.fl);
14137faee5c0SEric Dumazet 
14148c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
14155ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
14168c72c65bSEric Dumazet 		err = net_xmit_eval(err);
14178c72c65bSEric Dumazet 	}
1418fc225799SEric Dumazet 	if (!err && oskb) {
1419a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1420fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1421fc225799SEric Dumazet 	}
14228c72c65bSEric Dumazet 	return err;
14231da177e4SLinus Torvalds }
14241da177e4SLinus Torvalds 
14252987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
14262987babbSYuchung Cheng 			    gfp_t gfp_mask)
14272987babbSYuchung Cheng {
14282987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
14292987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
14302987babbSYuchung Cheng }
14312987babbSYuchung Cheng 
143267edfef7SAndi Kleen /* This routine just queues the buffer for sending.
14331da177e4SLinus Torvalds  *
14341da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
14351da177e4SLinus Torvalds  * otherwise socket can stall.
14361da177e4SLinus Torvalds  */
14371da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
14381da177e4SLinus Torvalds {
14391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
14420f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1443f4a775d1SEric Dumazet 	__skb_header_release(skb);
1444fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
1445ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
14463ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
14471da177e4SLinus Torvalds }
14481da177e4SLinus Torvalds 
144967edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
14505bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1451f6302d1dSDavid S. Miller {
14524a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1453f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1454f6302d1dSDavid S. Miller 		 * non-TSO case.
1455f6302d1dSDavid S. Miller 		 */
1456cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1457f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1458f6302d1dSDavid S. Miller 	} else {
1459cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1460f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
14611da177e4SLinus Torvalds 	}
14621da177e4SLinus Torvalds }
14631da177e4SLinus Torvalds 
1464797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1465797108d1SIlpo Järvinen  * tweaks to fix counters
1466797108d1SIlpo Järvinen  */
1467cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1468797108d1SIlpo Järvinen {
1469797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1470797108d1SIlpo Järvinen 
1471797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1472797108d1SIlpo Järvinen 
1473797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1474797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1475797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1476797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1477797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1478797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1479797108d1SIlpo Järvinen 
1480797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1481797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1482797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1483797108d1SIlpo Järvinen 
1484797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1485797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1486713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1487797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1488797108d1SIlpo Järvinen 
1489797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1490797108d1SIlpo Järvinen }
1491797108d1SIlpo Järvinen 
14920a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
14930a2cf20cSSoheil Hassas Yeganeh {
14940a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
14950a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
14960a2cf20cSSoheil Hassas Yeganeh }
14970a2cf20cSSoheil Hassas Yeganeh 
1498490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1499490cc7d0SWillem de Bruijn {
1500490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1501490cc7d0SWillem de Bruijn 
15020a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1503490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1504490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1505490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1506490cc7d0SWillem de Bruijn 
1507490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1508490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1509490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1510b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1511b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1512490cc7d0SWillem de Bruijn 	}
1513490cc7d0SWillem de Bruijn }
1514490cc7d0SWillem de Bruijn 
1515a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1516a166140eSMartin KaFai Lau {
1517a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1518a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1519a166140eSMartin KaFai Lau }
1520a166140eSMartin KaFai Lau 
152175c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
152275c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
152375c119afSEric Dumazet 					 struct sk_buff *buff,
152475c119afSEric Dumazet 					 struct sock *sk,
152575c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
152675c119afSEric Dumazet {
152775c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
152875c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
152975c119afSEric Dumazet 	else
153075c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
153175c119afSEric Dumazet }
153275c119afSEric Dumazet 
15331da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
15341da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
15351da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
15361da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
15371da177e4SLinus Torvalds  */
153875c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
153975c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
15406cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
15411da177e4SLinus Torvalds {
15421da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
15431da177e4SLinus Torvalds 	struct sk_buff *buff;
1544b4a24397SEric Dumazet 	int old_factor;
1545b617158dSEric Dumazet 	long limit;
1546b60b49eaSHerbert Xu 	int nlen;
15479ce01461SIlpo Järvinen 	u8 flags;
15481da177e4SLinus Torvalds 
15492fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
15502fceec13SIlpo Järvinen 		return -EINVAL;
15516a438bbeSStephen Hemminger 
1552b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
15531da177e4SLinus Torvalds 
1554b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1555b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1556b617158dSEric Dumazet 	 * SO_SNDBUF values.
1557b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1558b617158dSEric Dumazet 	 */
15597c4e983cSAlexander Duyck 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1560b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1561b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1562b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1563b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1564f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1565f070ef2aSEric Dumazet 		return -ENOMEM;
1566f070ef2aSEric Dumazet 	}
1567f070ef2aSEric Dumazet 
1568c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, gfp))
15691da177e4SLinus Torvalds 		return -ENOMEM;
15701da177e4SLinus Torvalds 
15711da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
15725882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
157351456b29SIan Morris 	if (!buff)
15741da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
157541477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
15765a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
1577ef5cb973SHerbert Xu 
1578ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
15793ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1580b4a24397SEric Dumazet 	nlen = skb->len - len;
1581b60b49eaSHerbert Xu 	buff->truesize += nlen;
1582b60b49eaSHerbert Xu 	skb->truesize -= nlen;
15831da177e4SLinus Torvalds 
15841da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
15851da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
15861da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
15871da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
15904de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
15914de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
15924de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1593e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1594a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds 	skb_split(skb, buff, len);
15971da177e4SLinus Torvalds 
1598a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(buff, skb->tstamp, true);
1599490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
16001da177e4SLinus Torvalds 
16016475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
16026475be16SDavid S. Miller 
16031da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
16045bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
16055bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
16061da177e4SLinus Torvalds 
1607b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1608b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1609b9f64820SYuchung Cheng 
16106475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
16116475be16SDavid S. Miller 	 * adjust the various packet counters.
16126475be16SDavid S. Miller 	 */
1613cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
16146475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
16156475be16SDavid S. Miller 			tcp_skb_pcount(buff);
16161da177e4SLinus Torvalds 
1617797108d1SIlpo Järvinen 		if (diff)
1618797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
16191da177e4SLinus Torvalds 	}
16201da177e4SLinus Torvalds 
16211da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1622f4a775d1SEric Dumazet 	__skb_header_release(buff);
162375c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1624f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1625e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds 	return 0;
16281da177e4SLinus Torvalds }
16291da177e4SLinus Torvalds 
1630f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1631f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
16321da177e4SLinus Torvalds  */
16337162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
16341da177e4SLinus Torvalds {
16357b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
16361da177e4SLinus Torvalds 	int i, k, eat;
16371da177e4SLinus Torvalds 
1638b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
16391da177e4SLinus Torvalds 	eat = len;
16401da177e4SLinus Torvalds 	k = 0;
16417b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
16427b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
16437b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
16449e903e08SEric Dumazet 
16459e903e08SEric Dumazet 		if (size <= eat) {
1646aff65da0SIan Campbell 			skb_frag_unref(skb, i);
16479e903e08SEric Dumazet 			eat -= size;
16481da177e4SLinus Torvalds 		} else {
16497b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
16501da177e4SLinus Torvalds 			if (eat) {
1651b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[k], eat);
16527b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
16531da177e4SLinus Torvalds 				eat = 0;
16541da177e4SLinus Torvalds 			}
16551da177e4SLinus Torvalds 			k++;
16561da177e4SLinus Torvalds 		}
16571da177e4SLinus Torvalds 	}
16587b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
16591da177e4SLinus Torvalds 
16601da177e4SLinus Torvalds 	skb->data_len -= len;
16611da177e4SLinus Torvalds 	skb->len = skb->data_len;
16627162fb24SEric Dumazet 	return len;
16631da177e4SLinus Torvalds }
16641da177e4SLinus Torvalds 
166567edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
16661da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
16671da177e4SLinus Torvalds {
16687162fb24SEric Dumazet 	u32 delta_truesize;
16697162fb24SEric Dumazet 
1670c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
16711da177e4SLinus Torvalds 		return -ENOMEM;
16721da177e4SLinus Torvalds 
16737162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
16761da177e4SLinus Torvalds 
16777162fb24SEric Dumazet 	skb->truesize	   -= delta_truesize;
1678ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, -delta_truesize);
16799b65b17dSTalal Ahmad 	if (!skb_zcopy_pure(skb))
16807162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
16811da177e4SLinus Torvalds 
16825b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
16831da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
16845bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
16851da177e4SLinus Torvalds 
16861da177e4SLinus Torvalds 	return 0;
16871da177e4SLinus Torvalds }
16881da177e4SLinus Torvalds 
16891b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
16901b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
16915d424d5aSJohn Heffner {
1692cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1693cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
16945d424d5aSJohn Heffner 	int mss_now;
16955d424d5aSJohn Heffner 
16965d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
16975d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
16985d424d5aSJohn Heffner 	 */
16995d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
17005d424d5aSJohn Heffner 
17015d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
17025d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
17035d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
17045d424d5aSJohn Heffner 
17055d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
17065d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
17075d424d5aSJohn Heffner 
17085d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
170978eb166cSKuniyuki Iwashima 	mss_now = max(mss_now,
171078eb166cSKuniyuki Iwashima 		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
17115d424d5aSJohn Heffner 	return mss_now;
17125d424d5aSJohn Heffner }
17135d424d5aSJohn Heffner 
17141b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
17151b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
17161b63edd6SYuchung Cheng {
17171b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
17181b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
17191b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
17201b63edd6SYuchung Cheng }
1721c7bb4b89SEric Dumazet EXPORT_SYMBOL(tcp_mtu_to_mss);
17221b63edd6SYuchung Cheng 
17235d424d5aSJohn Heffner /* Inverse of above */
172467469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
17255d424d5aSJohn Heffner {
1726cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1727cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
17285d424d5aSJohn Heffner 
1729e57a3447SYan Zhai 	return mss +
17305d424d5aSJohn Heffner 	      tp->tcp_header_len +
17315d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
17325d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
17335d424d5aSJohn Heffner }
1734556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
17355d424d5aSJohn Heffner 
173667edfef7SAndi Kleen /* MTU probing init per socket */
17375d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
17385d424d5aSJohn Heffner {
17395d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
17405d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1741b0f9ca53SFan Du 	struct net *net = sock_net(sk);
17425d424d5aSJohn Heffner 
1743f47d00e0SKuniyuki Iwashima 	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
17445d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
17455d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
174688d78bc0SKuniyuki Iwashima 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
17475d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
174805cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1749c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
17505d424d5aSJohn Heffner }
17514bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
17525d424d5aSJohn Heffner 
17531da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
17541da177e4SLinus Torvalds 
17551da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
17561da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
17571da177e4SLinus Torvalds 
17581da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1759caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
17601da177e4SLinus Torvalds    It also does not include TCP options.
17611da177e4SLinus Torvalds 
1762d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
17631da177e4SLinus Torvalds 
17641da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
17651da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
17661da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
17671da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
17701da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
17711da177e4SLinus Torvalds 
1772d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1773d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
17741da177e4SLinus Torvalds  */
17751da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
17761da177e4SLinus Torvalds {
17771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1778d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
17795d424d5aSJohn Heffner 	int mss_now;
17801da177e4SLinus Torvalds 
17815d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
17825d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
17831da177e4SLinus Torvalds 
17845d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1785409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
17861da177e4SLinus Torvalds 
17871da177e4SLinus Torvalds 	/* And store cached results */
1788d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
17895d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
17905d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1791c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
17921da177e4SLinus Torvalds 
17931da177e4SLinus Torvalds 	return mss_now;
17941da177e4SLinus Torvalds }
17954bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
17961da177e4SLinus Torvalds 
17971da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
17981da177e4SLinus Torvalds  * and even PMTU discovery events into account.
17991da177e4SLinus Torvalds  */
18000c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
18011da177e4SLinus Torvalds {
1802cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1803cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1804c1b4a7e6SDavid S. Miller 	u32 mss_now;
180595c96174SEric Dumazet 	unsigned int header_len;
180633ad798cSAdam Langley 	struct tcp_out_options opts;
180733ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
18081da177e4SLinus Torvalds 
1809c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1810c1b4a7e6SDavid S. Miller 
18111da177e4SLinus Torvalds 	if (dst) {
18121da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1813d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
18141da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
18151da177e4SLinus Torvalds 	}
18161da177e4SLinus Torvalds 
181733ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
181833ad798cSAdam Langley 		     sizeof(struct tcphdr);
181933ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
182033ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
182133ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
182233ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
182333ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
182433ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
182533ad798cSAdam Langley 		mss_now -= delta;
182633ad798cSAdam Langley 	}
1827cfb6eeb4SYOSHIFUJI Hideaki 
18281da177e4SLinus Torvalds 	return mss_now;
18291da177e4SLinus Torvalds }
18301da177e4SLinus Torvalds 
183186fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
183286fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
183386fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
183486fd14adSWeiping Pan  */
183586fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1836a762a980SDavid S. Miller {
18379e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1838a762a980SDavid S. Miller 
183986fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
184086fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
184186fd14adSWeiping Pan 		/* Limited by application or receiver window. */
184286fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
184386fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
184440570375SEric Dumazet 		if (win_used < tcp_snd_cwnd(tp)) {
184586fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
184640570375SEric Dumazet 			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
184786fd14adSWeiping Pan 		}
184886fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
184986fd14adSWeiping Pan 	}
1850c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
185186fd14adSWeiping Pan }
185286fd14adSWeiping Pan 
1853ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1854a762a980SDavid S. Miller {
18551b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1856a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1857a762a980SDavid S. Miller 
1858f4ce91ceSNeal Cardwell 	/* Track the strongest available signal of the degree to which the cwnd
1859f4ce91ceSNeal Cardwell 	 * is fully utilized. If cwnd-limited then remember that fact for the
1860f4ce91ceSNeal Cardwell 	 * current window. If not cwnd-limited then track the maximum number of
1861f4ce91ceSNeal Cardwell 	 * outstanding packets in the current window. (If cwnd-limited then we
1862f4ce91ceSNeal Cardwell 	 * chose to not update tp->max_packets_out to avoid an extra else
1863f4ce91ceSNeal Cardwell 	 * clause with no functional impact.)
1864ca8a2263SNeal Cardwell 	 */
1865f4ce91ceSNeal Cardwell 	if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1866f4ce91ceSNeal Cardwell 	    is_cwnd_limited ||
1867f4ce91ceSNeal Cardwell 	    (!tp->is_cwnd_limited &&
1868f4ce91ceSNeal Cardwell 	     tp->packets_out > tp->max_packets_out)) {
1869ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1870f4ce91ceSNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1871f4ce91ceSNeal Cardwell 		tp->cwnd_usage_seq = tp->snd_nxt;
1872ca8a2263SNeal Cardwell 	}
1873e114a710SEric Dumazet 
187424901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1875a762a980SDavid S. Miller 		/* Network is feed fully. */
1876a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1877c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1878a762a980SDavid S. Miller 	} else {
1879a762a980SDavid S. Miller 		/* Network starves. */
1880a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1881a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1882a762a980SDavid S. Miller 
18834845b571SKuniyuki Iwashima 		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
1884c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
18851b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1886a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1887b0f71bd3SFrancis Yan 
1888b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1889b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1890b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1891b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
189275c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1893b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1894b0f71bd3SFrancis Yan 		 */
189575c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1896b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1897b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1898b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1899a762a980SDavid S. Miller 	}
1900a762a980SDavid S. Miller }
1901a762a980SDavid S. Miller 
1902d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1903d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1904d4589926SEric Dumazet {
1905d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1906d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1907d4589926SEric Dumazet }
1908d4589926SEric Dumazet 
1909d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1910d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1911d4589926SEric Dumazet  * The test is really :
1912d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1913d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1914d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1915d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
19160e3a4803SIlpo Järvinen  */
1917d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1918d4589926SEric Dumazet 				const struct sk_buff *skb)
1919d4589926SEric Dumazet {
1920d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1921d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1922d4589926SEric Dumazet }
1923d4589926SEric Dumazet 
1924d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1925d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1926d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1927d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1928d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1929d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1930d4589926SEric Dumazet  */
1931d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1932cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1933d4589926SEric Dumazet {
1934d4589926SEric Dumazet 	return partial &&
1935d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1936d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1937d4589926SEric Dumazet }
1938605ad7f1SEric Dumazet 
1939605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
194065466904SEric Dumazet  * depending on current pacing rate, and how close the peer is.
194165466904SEric Dumazet  *
194265466904SEric Dumazet  * Rationale is:
194365466904SEric Dumazet  * - For close peers, we rather send bigger packets to reduce
194465466904SEric Dumazet  *   cpu costs, because occasional losses will be repaired fast.
194565466904SEric Dumazet  * - For long distance/rtt flows, we would like to get ACK clocking
194665466904SEric Dumazet  *   with 1 ACK per ms.
194765466904SEric Dumazet  *
194865466904SEric Dumazet  * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
194965466904SEric Dumazet  * in bigger TSO bursts. We we cut the RTT-based allowance in half
195065466904SEric Dumazet  * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
195165466904SEric Dumazet  * is below 1500 bytes after 6 * ~500 usec = 3ms.
1952605ad7f1SEric Dumazet  */
1953dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
19541b3878caSNeal Cardwell 			    int min_tso_segs)
1955605ad7f1SEric Dumazet {
195665466904SEric Dumazet 	unsigned long bytes;
195765466904SEric Dumazet 	u32 r;
1958605ad7f1SEric Dumazet 
195928b24f90SEric Dumazet 	bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
1960605ad7f1SEric Dumazet 
19612455e61bSKuniyuki Iwashima 	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
196265466904SEric Dumazet 	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
196365466904SEric Dumazet 		bytes += sk->sk_gso_max_size >> r;
1964605ad7f1SEric Dumazet 
196565466904SEric Dumazet 	bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
196665466904SEric Dumazet 
196765466904SEric Dumazet 	return max_t(u32, bytes / mss_now, min_tso_segs);
1968605ad7f1SEric Dumazet }
1969605ad7f1SEric Dumazet 
1970ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1971ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1972ed6e7268SNeal Cardwell  */
1973ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1974ed6e7268SNeal Cardwell {
1975ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1976dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1977ed6e7268SNeal Cardwell 
1978dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1979dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1980e0bb4ab9SKuniyuki Iwashima 			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
1981dcb8c9b4SEric Dumazet 
1982dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1983350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1984ed6e7268SNeal Cardwell }
1985ed6e7268SNeal Cardwell 
1986d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1987d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1988d4589926SEric Dumazet 					const struct sk_buff *skb,
1989d4589926SEric Dumazet 					unsigned int mss_now,
1990d4589926SEric Dumazet 					unsigned int max_segs,
1991d4589926SEric Dumazet 					int nonagle)
1992c1b4a7e6SDavid S. Miller {
1993cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1994d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1995c1b4a7e6SDavid S. Miller 
199690840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
19971485348dSBen Hutchings 	max_len = mss_now * max_segs;
19980e3a4803SIlpo Järvinen 
19991485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
20001485348dSBen Hutchings 		return max_len;
20010e3a4803SIlpo Järvinen 
20025ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
20035ea3a748SIlpo Järvinen 
20041485348dSBen Hutchings 	if (max_len <= needed)
20051485348dSBen Hutchings 		return max_len;
20060e3a4803SIlpo Järvinen 
2007d4589926SEric Dumazet 	partial = needed % mss_now;
2008d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
2009d4589926SEric Dumazet 	 * to include this last segment in this skb.
2010d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
2011d4589926SEric Dumazet 	 */
2012cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
2013d4589926SEric Dumazet 		return needed - partial;
2014d4589926SEric Dumazet 
2015d4589926SEric Dumazet 	return needed;
2016c1b4a7e6SDavid S. Miller }
2017c1b4a7e6SDavid S. Miller 
2018c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
2019c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
2020c1b4a7e6SDavid S. Miller  */
2021cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2022cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
2023c1b4a7e6SDavid S. Miller {
2024d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
2025c1b4a7e6SDavid S. Miller 
2026c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
20274de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
20284de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
2029c1b4a7e6SDavid S. Miller 		return 1;
2030c1b4a7e6SDavid S. Miller 
2031c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
203240570375SEric Dumazet 	cwnd = tcp_snd_cwnd(tp);
2033d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
2034c1b4a7e6SDavid S. Miller 		return 0;
2035d649a7a8SEric Dumazet 
2036d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
2037d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
2038d649a7a8SEric Dumazet 	 */
2039d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
2040d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
2041c1b4a7e6SDavid S. Miller }
2042c1b4a7e6SDavid S. Miller 
2043b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
204467edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
2045c1b4a7e6SDavid S. Miller  * SKB onto the wire.
2046c1b4a7e6SDavid S. Miller  */
20475bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2048c1b4a7e6SDavid S. Miller {
2049c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
2050c1b4a7e6SDavid S. Miller 
2051f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
20525bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
2053c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
2054c1b4a7e6SDavid S. Miller 	}
2055c1b4a7e6SDavid S. Miller 	return tso_segs;
2056c1b4a7e6SDavid S. Miller }
2057c1b4a7e6SDavid S. Miller 
2058c1b4a7e6SDavid S. Miller 
2059a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
2060c1b4a7e6SDavid S. Miller  * sent now.
2061c1b4a7e6SDavid S. Miller  */
2062a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2063c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
2064c1b4a7e6SDavid S. Miller {
2065c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
2066c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
2067c1b4a7e6SDavid S. Miller 	 *
2068c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
2069c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
2070c1b4a7e6SDavid S. Miller 	 */
2071c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
2072a2a385d6SEric Dumazet 		return true;
2073c1b4a7e6SDavid S. Miller 
20749b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
20759b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2076a2a385d6SEric Dumazet 		return true;
2077c1b4a7e6SDavid S. Miller 
2078cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2079a2a385d6SEric Dumazet 		return true;
2080c1b4a7e6SDavid S. Miller 
2081a2a385d6SEric Dumazet 	return false;
2082c1b4a7e6SDavid S. Miller }
2083c1b4a7e6SDavid S. Miller 
2084c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
2085a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2086a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
2087056834d9SIlpo Järvinen 			     unsigned int cur_mss)
2088c1b4a7e6SDavid S. Miller {
2089c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2090c1b4a7e6SDavid S. Miller 
2091c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
2092c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2093c1b4a7e6SDavid S. Miller 
209490840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
2095c1b4a7e6SDavid S. Miller }
2096c1b4a7e6SDavid S. Miller 
2097c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2098c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
2099c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
2100c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
2101c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
2102c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
2103c1b4a7e6SDavid S. Miller  */
210456483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2105c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
2106c1b4a7e6SDavid S. Miller {
2107c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
210856483341SEric Dumazet 	struct sk_buff *buff;
21099ce01461SIlpo Järvinen 	u8 flags;
2110c1b4a7e6SDavid S. Miller 
2111c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
2112b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len);
2113c1b4a7e6SDavid S. Miller 
21145882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
211551456b29SIan Morris 	if (unlikely(!buff))
2116c1b4a7e6SDavid S. Miller 		return -ENOMEM;
211741477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
21185a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
2119c1b4a7e6SDavid S. Miller 
2120ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
21213ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
2122b60b49eaSHerbert Xu 	buff->truesize += nlen;
2123c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
2124c1b4a7e6SDavid S. Miller 
2125c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
2126c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2127c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2128c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2129c1b4a7e6SDavid S. Miller 
2130c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
21314de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
21324de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
21334de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
2134c1b4a7e6SDavid S. Miller 
2135a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
2136a166140eSMartin KaFai Lau 
2137c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
2138490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
2139c1b4a7e6SDavid S. Miller 
2140c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
21415bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
21425bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
2143c1b4a7e6SDavid S. Miller 
2144c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
2145f4a775d1SEric Dumazet 	__skb_header_release(buff);
214656483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2147c1b4a7e6SDavid S. Miller 
2148c1b4a7e6SDavid S. Miller 	return 0;
2149c1b4a7e6SDavid S. Miller }
2150c1b4a7e6SDavid S. Miller 
2151c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
2152c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2153c1b4a7e6SDavid S. Miller  *
2154c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
2155c1b4a7e6SDavid S. Miller  */
2156ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2157f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
2158f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
2159f9bfe4e6SEric Dumazet 				 u32 max_segs)
2160c1b4a7e6SDavid S. Miller {
21616687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2162f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
216350c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
216450c8339eSEric Dumazet 	struct sk_buff *head;
2165ad9f4f50SEric Dumazet 	int win_divisor;
2166f1c6ea38SEric Dumazet 	s64 delta;
2167c1b4a7e6SDavid S. Miller 
216899d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2169ae8064acSJohn Heffner 		goto send_now;
2170ae8064acSJohn Heffner 
21715f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
2172a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
2173a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2174a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
21755f852eb5SEric Dumazet 	 */
2176a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2177a682850aSEric Dumazet 	if (delta > 0)
2178ae8064acSJohn Heffner 		goto send_now;
2179908a75c1SDavid S. Miller 
2180c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
2181c1b4a7e6SDavid S. Miller 
2182c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
218340570375SEric Dumazet 	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2184c1b4a7e6SDavid S. Miller 
218590840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2186c1b4a7e6SDavid S. Miller 
2187c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
218840570375SEric Dumazet 	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2189c1b4a7e6SDavid S. Miller 
2190c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
2191c1b4a7e6SDavid S. Miller 
2192ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
2193605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
2194ae8064acSJohn Heffner 		goto send_now;
2195ba244fe9SDavid S. Miller 
219662ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
219762ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
219862ad2761SIlpo Järvinen 		goto send_now;
219962ad2761SIlpo Järvinen 
22005bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2201ad9f4f50SEric Dumazet 	if (win_divisor) {
220240570375SEric Dumazet 		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2203c1b4a7e6SDavid S. Miller 
2204c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
2205c1b4a7e6SDavid S. Miller 		 * just use it.
2206c1b4a7e6SDavid S. Miller 		 */
2207ad9f4f50SEric Dumazet 		chunk /= win_divisor;
2208c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
2209ae8064acSJohn Heffner 			goto send_now;
2210c1b4a7e6SDavid S. Miller 	} else {
2211c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
2212c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
2213c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
2214c1b4a7e6SDavid S. Miller 		 * then send now.
2215c1b4a7e6SDavid S. Miller 		 */
22166b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2217ae8064acSJohn Heffner 			goto send_now;
2218c1b4a7e6SDavid S. Miller 	}
2219c1b4a7e6SDavid S. Miller 
222075c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
222175c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
222275c119afSEric Dumazet 	if (!head)
222375c119afSEric Dumazet 		goto send_now;
2224f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
222550c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
2226f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
222750c8339eSEric Dumazet 		goto send_now;
222850c8339eSEric Dumazet 
2229f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
2230f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
2231f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
2232f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
2233f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
2234f9bfe4e6SEric Dumazet 	 */
2235f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
2236f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
2237ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2238f9bfe4e6SEric Dumazet 			return true;
2239f9bfe4e6SEric Dumazet 		}
2240f9bfe4e6SEric Dumazet 	} else {
2241f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2242f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2243f9bfe4e6SEric Dumazet 			return true;
2244f9bfe4e6SEric Dumazet 		}
2245f9bfe4e6SEric Dumazet 	}
2246f9bfe4e6SEric Dumazet 
2247f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2248d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2249d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2250f9bfe4e6SEric Dumazet 		goto send_now;
2251ca8a2263SNeal Cardwell 
2252a2a385d6SEric Dumazet 	return true;
2253ae8064acSJohn Heffner 
2254ae8064acSJohn Heffner send_now:
2255a2a385d6SEric Dumazet 	return false;
2256c1b4a7e6SDavid S. Miller }
2257c1b4a7e6SDavid S. Miller 
225805cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
225905cbc0dbSFan Du {
226005cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
226105cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
226205cbc0dbSFan Du 	struct net *net = sock_net(sk);
226305cbc0dbSFan Du 	u32 interval;
226405cbc0dbSFan Du 	s32 delta;
226505cbc0dbSFan Du 
22662a85388fSKuniyuki Iwashima 	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2267c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
226805cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
226905cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
227005cbc0dbSFan Du 
227105cbc0dbSFan Du 		/* Update current search range */
227205cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
227305cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
227405cbc0dbSFan Du 			sizeof(struct tcphdr) +
227505cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
227605cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
227705cbc0dbSFan Du 
227805cbc0dbSFan Du 		/* Update probe time stamp */
2279c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
228005cbc0dbSFan Du 	}
228105cbc0dbSFan Du }
228205cbc0dbSFan Du 
2283808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2284808cf9e3SIlya Lesokhin {
2285808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2286808cf9e3SIlya Lesokhin 
2287808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2288808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2289808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2290808cf9e3SIlya Lesokhin 			break;
2291808cf9e3SIlya Lesokhin 
22929b65b17dSTalal Ahmad 		if (unlikely(TCP_SKB_CB(skb)->eor) ||
22939b65b17dSTalal Ahmad 		    tcp_has_tx_tstamp(skb) ||
22949b65b17dSTalal Ahmad 		    !skb_pure_zcopy_same(skb, next))
2295808cf9e3SIlya Lesokhin 			return false;
2296808cf9e3SIlya Lesokhin 
2297808cf9e3SIlya Lesokhin 		len -= skb->len;
2298808cf9e3SIlya Lesokhin 	}
2299808cf9e3SIlya Lesokhin 
2300808cf9e3SIlya Lesokhin 	return true;
2301808cf9e3SIlya Lesokhin }
2302808cf9e3SIlya Lesokhin 
230373601329SEric Dumazet static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
230473601329SEric Dumazet 			     int probe_size)
230573601329SEric Dumazet {
230673601329SEric Dumazet 	skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
230773601329SEric Dumazet 	int i, todo, len = 0, nr_frags = 0;
230873601329SEric Dumazet 	const struct sk_buff *skb;
230973601329SEric Dumazet 
231073601329SEric Dumazet 	if (!sk_wmem_schedule(sk, to->truesize + probe_size))
231173601329SEric Dumazet 		return -ENOMEM;
231273601329SEric Dumazet 
231373601329SEric Dumazet 	skb_queue_walk(&sk->sk_write_queue, skb) {
231473601329SEric Dumazet 		const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
231573601329SEric Dumazet 
231673601329SEric Dumazet 		if (skb_headlen(skb))
231773601329SEric Dumazet 			return -EINVAL;
231873601329SEric Dumazet 
231973601329SEric Dumazet 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
232073601329SEric Dumazet 			if (len >= probe_size)
232173601329SEric Dumazet 				goto commit;
232273601329SEric Dumazet 			todo = min_t(int, skb_frag_size(fragfrom),
232373601329SEric Dumazet 				     probe_size - len);
232473601329SEric Dumazet 			len += todo;
232573601329SEric Dumazet 			if (lastfrag &&
232673601329SEric Dumazet 			    skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
232773601329SEric Dumazet 			    skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
232873601329SEric Dumazet 						      skb_frag_size(lastfrag)) {
232973601329SEric Dumazet 				skb_frag_size_add(lastfrag, todo);
233073601329SEric Dumazet 				continue;
233173601329SEric Dumazet 			}
233273601329SEric Dumazet 			if (unlikely(nr_frags == MAX_SKB_FRAGS))
233373601329SEric Dumazet 				return -E2BIG;
233473601329SEric Dumazet 			skb_frag_page_copy(fragto, fragfrom);
233573601329SEric Dumazet 			skb_frag_off_copy(fragto, fragfrom);
233673601329SEric Dumazet 			skb_frag_size_set(fragto, todo);
233773601329SEric Dumazet 			nr_frags++;
233873601329SEric Dumazet 			lastfrag = fragto++;
233973601329SEric Dumazet 		}
234073601329SEric Dumazet 	}
234173601329SEric Dumazet commit:
234273601329SEric Dumazet 	WARN_ON_ONCE(len != probe_size);
234373601329SEric Dumazet 	for (i = 0; i < nr_frags; i++)
234473601329SEric Dumazet 		skb_frag_ref(to, i);
234573601329SEric Dumazet 
234673601329SEric Dumazet 	skb_shinfo(to)->nr_frags = nr_frags;
234773601329SEric Dumazet 	to->truesize += probe_size;
234873601329SEric Dumazet 	to->len += probe_size;
234973601329SEric Dumazet 	to->data_len += probe_size;
235073601329SEric Dumazet 	__skb_header_release(to);
235173601329SEric Dumazet 	return 0;
235273601329SEric Dumazet }
235373601329SEric Dumazet 
23545d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
235567edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
235667edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
235767edfef7SAndi Kleen  * changes resulting in larger path MTUs.
235867edfef7SAndi Kleen  *
23595d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
23605d424d5aSJohn Heffner  *         1 if a probe was sent,
2361056834d9SIlpo Järvinen  *         -1 otherwise
2362056834d9SIlpo Järvinen  */
23635d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
23645d424d5aSJohn Heffner {
23655d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
236612a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
23675d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
23686b58e0a5SFan Du 	struct net *net = sock_net(sk);
23695d424d5aSJohn Heffner 	int probe_size;
237091cc17c0SIlpo Järvinen 	int size_needed;
237112a59abcSEric Dumazet 	int copy, len;
23725d424d5aSJohn Heffner 	int mss_now;
23736b58e0a5SFan Du 	int interval;
23745d424d5aSJohn Heffner 
23755d424d5aSJohn Heffner 	/* Not currently probing/verifying,
23765d424d5aSJohn Heffner 	 * not in recovery,
23775d424d5aSJohn Heffner 	 * have enough cwnd, and
237812a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
237912a59abcSEric Dumazet 	 */
238012a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
23815d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
23825d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
238340570375SEric Dumazet 		   tcp_snd_cwnd(tp) < 11 ||
238412a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
23855d424d5aSJohn Heffner 		return -1;
23865d424d5aSJohn Heffner 
23876b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
23886b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
23896b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
23906b58e0a5SFan Du 	 */
23910c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
23926b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
23936b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
239491cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
23956b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
239605cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
239705cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
239805cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
239905cbc0dbSFan Du 	 */
24006b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
240192c0aa41SKuniyuki Iwashima 	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
240205cbc0dbSFan Du 		/* Check whether enough time has elaplased for
240305cbc0dbSFan Du 		 * another round of probing.
240405cbc0dbSFan Du 		 */
240505cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
24065d424d5aSJohn Heffner 		return -1;
24075d424d5aSJohn Heffner 	}
24085d424d5aSJohn Heffner 
24095d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
24107f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
24115d424d5aSJohn Heffner 		return -1;
24125d424d5aSJohn Heffner 
241391cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
24145d424d5aSJohn Heffner 		return -1;
241590840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
24165d424d5aSJohn Heffner 		return 0;
24175d424d5aSJohn Heffner 
2418d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
241940570375SEric Dumazet 	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2420d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
24215d424d5aSJohn Heffner 			return -1;
24225d424d5aSJohn Heffner 		else
24235d424d5aSJohn Heffner 			return 0;
24245d424d5aSJohn Heffner 	}
24255d424d5aSJohn Heffner 
2426808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2427808cf9e3SIlya Lesokhin 		return -1;
2428808cf9e3SIlya Lesokhin 
24295d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
24305882efffSEric Dumazet 	nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false);
243151456b29SIan Morris 	if (!nskb)
24325d424d5aSJohn Heffner 		return -1;
243373601329SEric Dumazet 
243473601329SEric Dumazet 	/* build the payload, and be prepared to abort if this fails. */
243573601329SEric Dumazet 	if (tcp_clone_payload(sk, nskb, probe_size)) {
243671c299c7SJakub Kicinski 		tcp_skb_tsorted_anchor_cleanup(nskb);
243773601329SEric Dumazet 		consume_skb(nskb);
243873601329SEric Dumazet 		return -1;
243973601329SEric Dumazet 	}
2440ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, nskb->truesize);
24413ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
24425d424d5aSJohn Heffner 
2443fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
244441477662SJakub Kicinski 	skb_copy_decrypted(nskb, skb);
24455a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(nskb, skb);
24465d424d5aSJohn Heffner 
24475d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
24485d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
24494de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
24505d424d5aSJohn Heffner 
245150c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
24522b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
245350c4817eSIlpo Järvinen 
24545d424d5aSJohn Heffner 	len = 0;
2455234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
24565d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
24575d424d5aSJohn Heffner 
24585d424d5aSJohn Heffner 		if (skb->len <= copy) {
24595d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
24605d424d5aSJohn Heffner 			 * Throw it away. */
24614de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2462808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2463808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2464808cf9e3SIlya Lesokhin 			 */
2465808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2466888a5c53SWillem de Bruijn 			tcp_skb_collapse_tstamp(nskb, skb);
2467fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
246803271f3aSTalal Ahmad 			tcp_wmem_free_skb(sk, skb);
24695d424d5aSJohn Heffner 		} else {
24704de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2471a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
24725d424d5aSJohn Heffner 			__pskb_trim_head(skb, copy);
24735bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss_now);
24745d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
24755d424d5aSJohn Heffner 		}
24765d424d5aSJohn Heffner 
24775d424d5aSJohn Heffner 		len += copy;
2478234b6860SIlpo Järvinen 
2479234b6860SIlpo Järvinen 		if (len >= probe_size)
2480234b6860SIlpo Järvinen 			break;
24815d424d5aSJohn Heffner 	}
24825bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
24835d424d5aSJohn Heffner 
24845d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
24857faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
24867faee5c0SEric Dumazet 	 */
24875d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
24885d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
24895d424d5aSJohn Heffner 		 * effectively two packets. */
249040570375SEric Dumazet 		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
249166f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
24925d424d5aSJohn Heffner 
24935d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
24940e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
24950e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
24965d424d5aSJohn Heffner 
24975d424d5aSJohn Heffner 		return 1;
24985d424d5aSJohn Heffner 	}
24995d424d5aSJohn Heffner 
25005d424d5aSJohn Heffner 	return -1;
25015d424d5aSJohn Heffner }
25025d424d5aSJohn Heffner 
2503864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2504218af599SEric Dumazet {
2505864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2506864e5c09SEric Dumazet 
2507864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2508864e5c09SEric Dumazet 		return false;
2509864e5c09SEric Dumazet 
2510864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2511864e5c09SEric Dumazet 		return false;
2512864e5c09SEric Dumazet 
2513864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2514864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2515864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2516864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2517864e5c09SEric Dumazet 		sock_hold(sk);
2518864e5c09SEric Dumazet 	}
2519864e5c09SEric Dumazet 	return true;
2520218af599SEric Dumazet }
2521218af599SEric Dumazet 
2522f921a4a5SEric Dumazet static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
2523f921a4a5SEric Dumazet {
2524f921a4a5SEric Dumazet 	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
2525f921a4a5SEric Dumazet 
2526f921a4a5SEric Dumazet 	/* No skb in the rtx queue. */
2527f921a4a5SEric Dumazet 	if (!node)
2528f921a4a5SEric Dumazet 		return true;
2529f921a4a5SEric Dumazet 
2530f921a4a5SEric Dumazet 	/* Only one skb in rtx queue. */
2531f921a4a5SEric Dumazet 	return !node->rb_left && !node->rb_right;
2532f921a4a5SEric Dumazet }
2533f921a4a5SEric Dumazet 
2534f9616c35SEric Dumazet /* TCP Small Queues :
2535f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2536f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2537f9616c35SEric Dumazet  * This allows for :
2538f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2539f9616c35SEric Dumazet  *  - faster recovery
2540f9616c35SEric Dumazet  *  - high rates
2541f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2542f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2543f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2544f9616c35SEric Dumazet  */
2545f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2546f9616c35SEric Dumazet 				  unsigned int factor)
2547f9616c35SEric Dumazet {
254876a9ebe8SEric Dumazet 	unsigned long limit;
2549f9616c35SEric Dumazet 
255076a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
255176a9ebe8SEric Dumazet 		      2 * skb->truesize,
255228b24f90SEric Dumazet 		      READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
2553c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
255476a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
25559fb90193SKuniyuki Iwashima 			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2556f9616c35SEric Dumazet 	limit <<= factor;
2557f9616c35SEric Dumazet 
2558a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2559a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
256028b24f90SEric Dumazet 		u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
256128b24f90SEric Dumazet 				  tcp_sk(sk)->tcp_tx_delay;
2562a842fe14SEric Dumazet 
2563a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2564a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2565a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2566a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2567a842fe14SEric Dumazet 		 */
2568a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2569a842fe14SEric Dumazet 		limit += extra_bytes;
2570a842fe14SEric Dumazet 	}
257114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2572f921a4a5SEric Dumazet 		/* Always send skb if rtx queue is empty or has one skb.
257375eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
257475eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
257575eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
257675eefc6cSEric Dumazet 		 */
2577f921a4a5SEric Dumazet 		if (tcp_rtx_queue_empty_or_single_skb(sk))
257875eefc6cSEric Dumazet 			return false;
257975eefc6cSEric Dumazet 
25807aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2581f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2582f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2583f9616c35SEric Dumazet 		 * test again the condition.
2584f9616c35SEric Dumazet 		 */
2585f9616c35SEric Dumazet 		smp_mb__after_atomic();
2586ce8299b6SEric Dumazet 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2587f9616c35SEric Dumazet 			return true;
2588f9616c35SEric Dumazet 	}
2589f9616c35SEric Dumazet 	return false;
2590f9616c35SEric Dumazet }
2591f9616c35SEric Dumazet 
259205b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
259305b055e8SFrancis Yan {
2594628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2595efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
259605b055e8SFrancis Yan 
2597efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2598efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
259905b055e8SFrancis Yan 	tp->chrono_start = now;
260005b055e8SFrancis Yan 	tp->chrono_type = new;
260105b055e8SFrancis Yan }
260205b055e8SFrancis Yan 
260305b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
260405b055e8SFrancis Yan {
260505b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
260605b055e8SFrancis Yan 
260705b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
26080f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
26090f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
261005b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
261105b055e8SFrancis Yan 	 */
261205b055e8SFrancis Yan 	if (type > tp->chrono_type)
261305b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
261405b055e8SFrancis Yan }
261505b055e8SFrancis Yan 
261605b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
261705b055e8SFrancis Yan {
261805b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
261905b055e8SFrancis Yan 
26200f87230dSFrancis Yan 
26210f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
26220f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
26230f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
26240f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
26250f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
26260f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
26270f87230dSFrancis Yan 	 */
262875c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
262905b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
26300f87230dSFrancis Yan 	else if (type == tp->chrono_type)
26310f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
263205b055e8SFrancis Yan }
263305b055e8SFrancis Yan 
26341da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
26351da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
26361da177e4SLinus Torvalds  * window for us.
26371da177e4SLinus Torvalds  *
2638f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2639f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2640f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2641f8269a49SIlpo Järvinen  *
26426ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
26436ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
26446ba8a3b1SNandita Dukkipati 
2645a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2646a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
26471da177e4SLinus Torvalds  */
2648a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2649d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
26501da177e4SLinus Torvalds {
26511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
265292df7b51SDavid S. Miller 	struct sk_buff *skb;
2653c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2654c1b4a7e6SDavid S. Miller 	int cwnd_quota;
26555d424d5aSJohn Heffner 	int result;
26565615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2657605ad7f1SEric Dumazet 	u32 max_segs;
26581da177e4SLinus Torvalds 
2659c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
26605d424d5aSJohn Heffner 
2661ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2662d5dd9175SIlpo Järvinen 	if (!push_one) {
26635d424d5aSJohn Heffner 		/* Do MTU probing. */
2664d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2665d5dd9175SIlpo Järvinen 		if (!result) {
2666a2a385d6SEric Dumazet 			return false;
26675d424d5aSJohn Heffner 		} else if (result > 0) {
26685d424d5aSJohn Heffner 			sent_pkts = 1;
26695d424d5aSJohn Heffner 		}
2670d5dd9175SIlpo Järvinen 	}
26715d424d5aSJohn Heffner 
2672ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2673fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2674c8ac3774SHerbert Xu 		unsigned int limit;
2675c8ac3774SHerbert Xu 
267679861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
267779861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2678a1ac9c8aSMartin KaFai Lau 			tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2679a1ac9c8aSMartin KaFai Lau 			skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
268079861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2681bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
268279861919SEric Dumazet 			goto repair; /* Skip network transmission */
268379861919SEric Dumazet 		}
268479861919SEric Dumazet 
2685218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2686218af599SEric Dumazet 			break;
2687218af599SEric Dumazet 
26885bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2689c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2690c1b4a7e6SDavid S. Miller 
2691b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
26926ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
26936ba8a3b1SNandita Dukkipati 			if (push_one == 2)
26946ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
26956ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
26966ba8a3b1SNandita Dukkipati 			else
2697b68e9f85SHerbert Xu 				break;
26986ba8a3b1SNandita Dukkipati 		}
2699b68e9f85SHerbert Xu 
27005615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
27015615f886SFrancis Yan 			is_rwnd_limited = true;
2702b68e9f85SHerbert Xu 			break;
27035615f886SFrancis Yan 		}
2704b68e9f85SHerbert Xu 
2705d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2706aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2707aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2708aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2709aa93466bSDavid S. Miller 				break;
2710c1b4a7e6SDavid S. Miller 		} else {
2711ca8a2263SNeal Cardwell 			if (!push_one &&
2712605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2713f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2714aa93466bSDavid S. Miller 				break;
2715c1b4a7e6SDavid S. Miller 		}
2716aa93466bSDavid S. Miller 
2717605ad7f1SEric Dumazet 		limit = mss_now;
2718d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2719605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2720605ad7f1SEric Dumazet 						    min_t(unsigned int,
2721605ad7f1SEric Dumazet 							  cwnd_quota,
2722605ad7f1SEric Dumazet 							  max_segs),
2723605ad7f1SEric Dumazet 						    nonagle);
2724605ad7f1SEric Dumazet 
2725605ad7f1SEric Dumazet 		if (skb->len > limit &&
272656483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2727605ad7f1SEric Dumazet 			break;
2728605ad7f1SEric Dumazet 
2729f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
273046d3ceabSEric Dumazet 			break;
2731c9eeec26SEric Dumazet 
27321f85e626SEric Dumazet 		/* Argh, we hit an empty skb(), presumably a thread
27331f85e626SEric Dumazet 		 * is sleeping in sendmsg()/sk_stream_wait_memory().
27341f85e626SEric Dumazet 		 * We do not want to send a pure-ack packet and have
27351f85e626SEric Dumazet 		 * a strange looking rtx queue with empty packet(s).
27361f85e626SEric Dumazet 		 */
27371f85e626SEric Dumazet 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
27381f85e626SEric Dumazet 			break;
27391f85e626SEric Dumazet 
2740d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
27411da177e4SLinus Torvalds 			break;
27421da177e4SLinus Torvalds 
2743ec342325SAndrew Vagin repair:
27441da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
27451da177e4SLinus Torvalds 		 * This call will increment packets_out.
27461da177e4SLinus Torvalds 		 */
274766f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
27481da177e4SLinus Torvalds 
27491da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2750a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2751d5dd9175SIlpo Järvinen 
2752d5dd9175SIlpo Järvinen 		if (push_one)
2753d5dd9175SIlpo Järvinen 			break;
27541da177e4SLinus Torvalds 	}
27551da177e4SLinus Torvalds 
27565615f886SFrancis Yan 	if (is_rwnd_limited)
27575615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
27585615f886SFrancis Yan 	else
27595615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
27605615f886SFrancis Yan 
276140570375SEric Dumazet 	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
2762299bcb55SNeal Cardwell 	if (likely(sent_pkts || is_cwnd_limited))
2763299bcb55SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2764299bcb55SNeal Cardwell 
2765aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2766684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2767684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
27686ba8a3b1SNandita Dukkipati 
27696ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
27706ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2771ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2772a2a385d6SEric Dumazet 		return false;
27731da177e4SLinus Torvalds 	}
277475c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
27756ba8a3b1SNandita Dukkipati }
27766ba8a3b1SNandita Dukkipati 
2777ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
27786ba8a3b1SNandita Dukkipati {
27796ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
27806ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
27811c2709cfSNeal Cardwell 	u32 timeout, timeout_us, rto_delta_us;
27822ae21cf5SEric Dumazet 	int early_retrans;
27836ba8a3b1SNandita Dukkipati 
27846ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
27856ba8a3b1SNandita Dukkipati 	 * finishes.
27866ba8a3b1SNandita Dukkipati 	 */
2787d983ea6fSEric Dumazet 	if (rcu_access_pointer(tp->fastopen_rsk))
27886ba8a3b1SNandita Dukkipati 		return false;
27896ba8a3b1SNandita Dukkipati 
279052e65865SKuniyuki Iwashima 	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
27916ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2792b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
27936ba8a3b1SNandita Dukkipati 	 */
27942ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2795bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2796b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2797b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
27986ba8a3b1SNandita Dukkipati 		return false;
27996ba8a3b1SNandita Dukkipati 
2800bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2801f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2802f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
28036ba8a3b1SNandita Dukkipati 	 */
2804bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
28051c2709cfSNeal Cardwell 		timeout_us = tp->srtt_us >> 2;
28066ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
28071c2709cfSNeal Cardwell 			timeout_us += tcp_rto_min_us(sk);
2808bb4d991aSYuchung Cheng 		else
28091c2709cfSNeal Cardwell 			timeout_us += TCP_TIMEOUT_MIN_US;
28101c2709cfSNeal Cardwell 		timeout = usecs_to_jiffies(timeout_us);
2811bb4d991aSYuchung Cheng 	} else {
2812bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2813bb4d991aSYuchung Cheng 	}
28146ba8a3b1SNandita Dukkipati 
2815a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2816ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2817ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2818ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2819a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2820a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
28216ba8a3b1SNandita Dukkipati 
28228dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
28236ba8a3b1SNandita Dukkipati 	return true;
28246ba8a3b1SNandita Dukkipati }
28256ba8a3b1SNandita Dukkipati 
28261f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
28271f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
28281f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
28291f3279aeSEric Dumazet  */
2830f4dae54eSEric Dumazet static bool skb_still_in_host_queue(struct sock *sk,
28311f3279aeSEric Dumazet 				    const struct sk_buff *skb)
28321f3279aeSEric Dumazet {
283339bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2834f4dae54eSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2835f4dae54eSEric Dumazet 		smp_mb__after_atomic();
2836f4dae54eSEric Dumazet 		if (skb_fclone_busy(sk, skb)) {
2837c10d9310SEric Dumazet 			NET_INC_STATS(sock_net(sk),
28381f3279aeSEric Dumazet 				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
28391f3279aeSEric Dumazet 			return true;
28401f3279aeSEric Dumazet 		}
2841f4dae54eSEric Dumazet 	}
28421f3279aeSEric Dumazet 	return false;
28431f3279aeSEric Dumazet }
28441f3279aeSEric Dumazet 
2845b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
28466ba8a3b1SNandita Dukkipati  * retransmit the last segment.
28476ba8a3b1SNandita Dukkipati  */
28486ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
28496ba8a3b1SNandita Dukkipati {
28509b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
28516ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
28526ba8a3b1SNandita Dukkipati 	int pcount;
28536ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
28546ba8a3b1SNandita Dukkipati 
285576be93fcSYuchung Cheng 	/* At most one outstanding TLP */
285676be93fcSYuchung Cheng 	if (tp->tlp_high_seq)
285776be93fcSYuchung Cheng 		goto rearm_timer;
285876be93fcSYuchung Cheng 
285976be93fcSYuchung Cheng 	tp->tlp_retrans = 0;
2860b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
286175c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2862b340b264SYuchung Cheng 		pcount = tp->packets_out;
2863b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2864b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2865b340b264SYuchung Cheng 			goto probe_sent;
28666ba8a3b1SNandita Dukkipati 		goto rearm_timer;
28676ba8a3b1SNandita Dukkipati 	}
286875c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2869b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2870b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2871b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
287240570375SEric Dumazet 			  tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
2873b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2874b2b7af86SYuchung Cheng 		return;
2875b2b7af86SYuchung Cheng 	}
28766ba8a3b1SNandita Dukkipati 
28771f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28781f3279aeSEric Dumazet 		goto rearm_timer;
28791f3279aeSEric Dumazet 
28806ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
28816ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
28826ba8a3b1SNandita Dukkipati 		goto rearm_timer;
28836ba8a3b1SNandita Dukkipati 
28846ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
288575c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
288675c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
28876cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
28886ba8a3b1SNandita Dukkipati 			goto rearm_timer;
288975c119afSEric Dumazet 		skb = skb_rb_next(skb);
28906ba8a3b1SNandita Dukkipati 	}
28916ba8a3b1SNandita Dukkipati 
28926ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
28936ba8a3b1SNandita Dukkipati 		goto rearm_timer;
28946ba8a3b1SNandita Dukkipati 
289510d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2896b340b264SYuchung Cheng 		goto rearm_timer;
28976ba8a3b1SNandita Dukkipati 
289876be93fcSYuchung Cheng 	tp->tlp_retrans = 1;
289976be93fcSYuchung Cheng 
290076be93fcSYuchung Cheng probe_sent:
29019b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
29029b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
29039b717a8dSNandita Dukkipati 
2904c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2905fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2906fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2907b340b264SYuchung Cheng rearm_timer:
2908fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
29091da177e4SLinus Torvalds }
29101da177e4SLinus Torvalds 
2911a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2912a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2913a762a980SDavid S. Miller  * The socket must be locked by the caller.
2914a762a980SDavid S. Miller  */
29159e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
29169e412ba7SIlpo Järvinen 			       int nonagle)
2917a762a980SDavid S. Miller {
2918726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2919726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2920726e07a8SIlpo Järvinen 	 * all will be happy.
2921726e07a8SIlpo Järvinen 	 */
2922726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2923726e07a8SIlpo Järvinen 		return;
2924726e07a8SIlpo Järvinen 
292599a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
29267450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
29279e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2928a762a980SDavid S. Miller }
2929a762a980SDavid S. Miller 
2930c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2931c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2932c1b4a7e6SDavid S. Miller  */
2933c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2934c1b4a7e6SDavid S. Miller {
2935fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2936c1b4a7e6SDavid S. Miller 
2937c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2938c1b4a7e6SDavid S. Miller 
2939d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2940c1b4a7e6SDavid S. Miller }
2941c1b4a7e6SDavid S. Miller 
29421da177e4SLinus Torvalds /* This function returns the amount that we can raise the
29431da177e4SLinus Torvalds  * usable window based on the following constraints
29441da177e4SLinus Torvalds  *
29451da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
29461da177e4SLinus Torvalds  * 2. We limit memory per socket
29471da177e4SLinus Torvalds  *
29481da177e4SLinus Torvalds  * RFC 1122:
29491da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
29501da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
29511da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
29521da177e4SLinus Torvalds  *
29531da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
29541da177e4SLinus Torvalds  * it at least MSS bytes.
29551da177e4SLinus Torvalds  *
29561da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
29571da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
29581da177e4SLinus Torvalds  *
29591da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
29601da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
29611da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
29621da177e4SLinus Torvalds  * window to always advance by a single byte.
29631da177e4SLinus Torvalds  *
29641da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
29651da177e4SLinus Torvalds  * then this will not be a problem.
29661da177e4SLinus Torvalds  *
29671da177e4SLinus Torvalds  * BSD seems to make the following compromise:
29681da177e4SLinus Torvalds  *
29691da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
29701da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
29711da177e4SLinus Torvalds  *	then set the window to 0.
29721da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
29731da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
29741da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
29751da177e4SLinus Torvalds  *
29761da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
29771da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
29781da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
29791da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
29801da177e4SLinus Torvalds  * because the pipeline is full.
29811da177e4SLinus Torvalds  *
29821da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
29831da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
29841da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
29851da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
29861da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
29871da177e4SLinus Torvalds  *
29881da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
29891da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
29901da177e4SLinus Torvalds  *
29911da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
29921da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
29931da177e4SLinus Torvalds  */
29941da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
29951da177e4SLinus Torvalds {
2996463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
29971da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2998b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
2999caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
30001da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
30011da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
30021da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
30031da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
30041da177e4SLinus Torvalds 	 */
3005463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
30061da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
300786c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
3008071c8ed6SFlorian Westphal 	int full_space, window;
3009071c8ed6SFlorian Westphal 
3010071c8ed6SFlorian Westphal 	if (sk_is_mptcp(sk))
3011071c8ed6SFlorian Westphal 		mptcp_space(sk, &free_space, &allowed_space);
3012071c8ed6SFlorian Westphal 
3013071c8ed6SFlorian Westphal 	full_space = min_t(int, tp->window_clamp, allowed_space);
30141da177e4SLinus Torvalds 
301506425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
30161da177e4SLinus Torvalds 		mss = full_space;
301706425c30SEric Dumazet 		if (mss <= 0)
301806425c30SEric Dumazet 			return 0;
301906425c30SEric Dumazet 	}
3020b650d953Smfreemon@cloudflare.com 
3021b650d953Smfreemon@cloudflare.com 	/* Only allow window shrink if the sysctl is enabled and we have
3022b650d953Smfreemon@cloudflare.com 	 * a non-zero scaling factor in effect.
3023b650d953Smfreemon@cloudflare.com 	 */
3024b650d953Smfreemon@cloudflare.com 	if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
3025b650d953Smfreemon@cloudflare.com 		goto shrink_window_allowed;
3026b650d953Smfreemon@cloudflare.com 
3027b650d953Smfreemon@cloudflare.com 	/* do not allow window to shrink */
3028b650d953Smfreemon@cloudflare.com 
3029b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
3030463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
30311da177e4SLinus Torvalds 
3032b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
3033053f3684SWei Wang 			tcp_adjust_rcv_ssthresh(sk);
30341da177e4SLinus Torvalds 
303586c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
303686c1a045SFlorian Westphal 		 * increase it due to wscale.
303786c1a045SFlorian Westphal 		 */
303886c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
303986c1a045SFlorian Westphal 
304086c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
304186c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
304286c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
304386c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
304486c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
304586c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
304686c1a045SFlorian Westphal 		 */
304786c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
30481da177e4SLinus Torvalds 			return 0;
30491da177e4SLinus Torvalds 	}
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
30521da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
30531da177e4SLinus Torvalds 
30541da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
30551da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
30561da177e4SLinus Torvalds 	 */
30571da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
30581da177e4SLinus Torvalds 		window = free_space;
30591da177e4SLinus Torvalds 
30601da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
30611da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
30621da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
30631da177e4SLinus Torvalds 		 */
30641935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
30651da177e4SLinus Torvalds 	} else {
30661935299dSGao Feng 		window = tp->rcv_wnd;
30671da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
30681da177e4SLinus Torvalds 		 * Window clamp already applied above.
30691da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
30701da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
30711da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
30721da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
30731da177e4SLinus Torvalds 		 * is too small.
30741da177e4SLinus Torvalds 		 */
30751da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
30761935299dSGao Feng 			window = rounddown(free_space, mss);
307784565070SJohn Heffner 		else if (mss == full_space &&
3078b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
307984565070SJohn Heffner 			window = free_space;
30801da177e4SLinus Torvalds 	}
30811da177e4SLinus Torvalds 
30821da177e4SLinus Torvalds 	return window;
3083b650d953Smfreemon@cloudflare.com 
3084b650d953Smfreemon@cloudflare.com shrink_window_allowed:
3085b650d953Smfreemon@cloudflare.com 	/* new window should always be an exact multiple of scaling factor */
3086b650d953Smfreemon@cloudflare.com 	free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3087b650d953Smfreemon@cloudflare.com 
3088b650d953Smfreemon@cloudflare.com 	if (free_space < (full_space >> 1)) {
3089b650d953Smfreemon@cloudflare.com 		icsk->icsk_ack.quick = 0;
3090b650d953Smfreemon@cloudflare.com 
3091b650d953Smfreemon@cloudflare.com 		if (tcp_under_memory_pressure(sk))
3092b650d953Smfreemon@cloudflare.com 			tcp_adjust_rcv_ssthresh(sk);
3093b650d953Smfreemon@cloudflare.com 
3094b650d953Smfreemon@cloudflare.com 		/* if free space is too low, return a zero window */
3095b650d953Smfreemon@cloudflare.com 		if (free_space < (allowed_space >> 4) || free_space < mss ||
3096b650d953Smfreemon@cloudflare.com 			free_space < (1 << tp->rx_opt.rcv_wscale))
3097b650d953Smfreemon@cloudflare.com 			return 0;
3098b650d953Smfreemon@cloudflare.com 	}
3099b650d953Smfreemon@cloudflare.com 
3100b650d953Smfreemon@cloudflare.com 	if (free_space > tp->rcv_ssthresh) {
3101b650d953Smfreemon@cloudflare.com 		free_space = tp->rcv_ssthresh;
3102b650d953Smfreemon@cloudflare.com 		/* new window should always be an exact multiple of scaling factor
3103b650d953Smfreemon@cloudflare.com 		 *
3104b650d953Smfreemon@cloudflare.com 		 * For this case, we ALIGN "up" (increase free_space) because
3105b650d953Smfreemon@cloudflare.com 		 * we know free_space is not zero here, it has been reduced from
3106b650d953Smfreemon@cloudflare.com 		 * the memory-based limit, and rcv_ssthresh is not a hard limit
3107b650d953Smfreemon@cloudflare.com 		 * (unlike sk_rcvbuf).
3108b650d953Smfreemon@cloudflare.com 		 */
3109b650d953Smfreemon@cloudflare.com 		free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
3110b650d953Smfreemon@cloudflare.com 	}
3111b650d953Smfreemon@cloudflare.com 
3112b650d953Smfreemon@cloudflare.com 	return free_space;
31131da177e4SLinus Torvalds }
31141da177e4SLinus Torvalds 
3115cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3116082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
3117082ac2d5SMartin KaFai Lau {
31180a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
31190a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
31200a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
3121082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
3122082ac2d5SMartin KaFai Lau 
31230a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3124082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
31252de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
31262de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
3127082ac2d5SMartin KaFai Lau 	}
3128082ac2d5SMartin KaFai Lau }
3129082ac2d5SMartin KaFai Lau 
31304a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
3131f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
31321da177e4SLinus Torvalds {
31331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
313475c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
313513dde04fSWei Yongjun 	int next_skb_size;
31361da177e4SLinus Torvalds 
3137058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
31381da177e4SLinus Torvalds 
3139058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
31401da177e4SLinus Torvalds 
3141bd446314SEric Dumazet 	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3142f8071cdeSEric Dumazet 		return false;
3143bd446314SEric Dumazet 
31442b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
3145a6963a6bSIlpo Järvinen 
31461da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
31471da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
31481da177e4SLinus Torvalds 
3149e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
31504de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
31511da177e4SLinus Torvalds 
31521da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
31531da177e4SLinus Torvalds 	 * packet counting does not break.
31541da177e4SLinus Torvalds 	 */
31554828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3156a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3157b7689205SIlpo Järvinen 
3158b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
3159ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
3160ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
3161ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
3162b7689205SIlpo Järvinen 
3163797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3164797108d1SIlpo Järvinen 
3165082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
3166082ac2d5SMartin KaFai Lau 
316775c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3168f8071cdeSEric Dumazet 	return true;
31691da177e4SLinus Torvalds }
31701da177e4SLinus Torvalds 
317167edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
3172a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
31734a17fc3aSIlpo Järvinen {
31744a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
3175a2a385d6SEric Dumazet 		return false;
31764a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
3177a2a385d6SEric Dumazet 		return false;
31782331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
31794a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3180a2a385d6SEric Dumazet 		return false;
31814a17fc3aSIlpo Järvinen 
3182a2a385d6SEric Dumazet 	return true;
31834a17fc3aSIlpo Järvinen }
31844a17fc3aSIlpo Järvinen 
318567edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
318667edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
318767edfef7SAndi Kleen  */
31884a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
31894a17fc3aSIlpo Järvinen 				     int space)
31904a17fc3aSIlpo Järvinen {
31914a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
31924a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
3193a2a385d6SEric Dumazet 	bool first = true;
31944a17fc3aSIlpo Järvinen 
31951a63cb91SKuniyuki Iwashima 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
31964a17fc3aSIlpo Järvinen 		return;
31974de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
31984a17fc3aSIlpo Järvinen 		return;
31994a17fc3aSIlpo Järvinen 
320075c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
32014a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
32024a17fc3aSIlpo Järvinen 			break;
32034a17fc3aSIlpo Järvinen 
320485712484SMat Martineau 		if (!tcp_skb_can_collapse(to, skb))
3205a643b5d4SMartin KaFai Lau 			break;
3206a643b5d4SMartin KaFai Lau 
32074a17fc3aSIlpo Järvinen 		space -= skb->len;
32084a17fc3aSIlpo Järvinen 
32094a17fc3aSIlpo Järvinen 		if (first) {
3210a2a385d6SEric Dumazet 			first = false;
32114a17fc3aSIlpo Järvinen 			continue;
32124a17fc3aSIlpo Järvinen 		}
32134a17fc3aSIlpo Järvinen 
32144a17fc3aSIlpo Järvinen 		if (space < 0)
32154a17fc3aSIlpo Järvinen 			break;
32164a17fc3aSIlpo Järvinen 
32174a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
32184a17fc3aSIlpo Järvinen 			break;
32194a17fc3aSIlpo Järvinen 
3220f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
3221f8071cdeSEric Dumazet 			break;
32224a17fc3aSIlpo Järvinen 	}
32234a17fc3aSIlpo Järvinen }
32244a17fc3aSIlpo Järvinen 
32251da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
32261da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
32271da177e4SLinus Torvalds  * error occurred which prevented the send.
32281da177e4SLinus Torvalds  */
322910d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
32301da177e4SLinus Torvalds {
32315d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
323210d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
32337d227cd2SSridhar Samudrala 	unsigned int cur_mss;
323410d3be56SEric Dumazet 	int diff, len, err;
3235536a6c8eSYonglong Li 	int avail_wnd;
323610d3be56SEric Dumazet 
323710d3be56SEric Dumazet 	/* Inconclusive MTU probe */
323810d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
32395d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
32405d424d5aSJohn Heffner 
32411f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
32421f3279aeSEric Dumazet 		return -EBUSY;
32431f3279aeSEric Dumazet 
32441da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
32457f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
32467f582b24SEric Dumazet 			WARN_ON_ONCE(1);
32477f582b24SEric Dumazet 			return -EINVAL;
32487f582b24SEric Dumazet 		}
32491da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
32501da177e4SLinus Torvalds 			return -ENOMEM;
32511da177e4SLinus Torvalds 	}
32521da177e4SLinus Torvalds 
32537d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
32547d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
32557d227cd2SSridhar Samudrala 
32560c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
3257536a6c8eSYonglong Li 	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
32587d227cd2SSridhar Samudrala 
32591da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
32601da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
32611da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
3262536a6c8eSYonglong Li 	 * our retransmit of one segment serves as a zero window probe.
32631da177e4SLinus Torvalds 	 */
3264536a6c8eSYonglong Li 	if (avail_wnd <= 0) {
3265536a6c8eSYonglong Li 		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
32661da177e4SLinus Torvalds 			return -EAGAIN;
3267536a6c8eSYonglong Li 		avail_wnd = cur_mss;
3268536a6c8eSYonglong Li 	}
32691da177e4SLinus Torvalds 
327010d3be56SEric Dumazet 	len = cur_mss * segs;
3271536a6c8eSYonglong Li 	if (len > avail_wnd) {
3272536a6c8eSYonglong Li 		len = rounddown(avail_wnd, cur_mss);
3273536a6c8eSYonglong Li 		if (!len)
3274536a6c8eSYonglong Li 			len = avail_wnd;
3275536a6c8eSYonglong Li 	}
327610d3be56SEric Dumazet 	if (skb->len > len) {
327775c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
327875c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
32791da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
328002276f3cSIlpo Järvinen 	} else {
3281c4777efaSEric Dumazet 		if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
3282c52e2421SEric Dumazet 			return -ENOMEM;
328310d3be56SEric Dumazet 
328410d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
328510d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
328610d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
328710d3be56SEric Dumazet 		if (diff)
328810d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
3289536a6c8eSYonglong Li 		avail_wnd = min_t(int, avail_wnd, cur_mss);
3290536a6c8eSYonglong Li 		if (skb->len < avail_wnd)
3291536a6c8eSYonglong Li 			tcp_retrans_try_collapse(sk, skb, avail_wnd);
32921da177e4SLinus Torvalds 	}
32931da177e4SLinus Torvalds 
329449213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
329549213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
329649213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
329749213555SDaniel Borkmann 
3298678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
3299678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
3300678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3301678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3302678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3303678550c6SYuchung Cheng 	tp->total_retrans += segs;
3304fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
3305678550c6SYuchung Cheng 
330650bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
330750bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
330850bceae9SThomas Graf 	 * beyond what csum_start can cover.
330950bceae9SThomas Graf 	 */
331050bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
331150bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
331210a81980SEric Dumazet 		struct sk_buff *nskb;
331310a81980SEric Dumazet 
3314e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
331510a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
331607f8e4d0SFlorian Westphal 			if (nskb) {
331707f8e4d0SFlorian Westphal 				nskb->dev = NULL;
331807f8e4d0SFlorian Westphal 				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
331907f8e4d0SFlorian Westphal 			} else {
332007f8e4d0SFlorian Westphal 				err = -ENOBUFS;
332107f8e4d0SFlorian Westphal 			}
3322e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
3323e2080072SEric Dumazet 
33245889e2c0SYousuk Seung 		if (!err) {
3325a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
33265889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
33275889e2c0SYousuk Seung 		}
3328117632e6SEric Dumazet 	} else {
3329c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3330117632e6SEric Dumazet 	}
3331c84a5711SYuchung Cheng 
33327f12422cSYuchung Cheng 	/* To avoid taking spuriously low RTT samples based on a timestamp
33337f12422cSYuchung Cheng 	 * for a transmit that never happened, always mark EVER_RETRANS
33347f12422cSYuchung Cheng 	 */
33357f12422cSYuchung Cheng 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
33367f12422cSYuchung Cheng 
3337a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3338a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3339a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
3340a31ad29eSLawrence Brakmo 
3341fc9f3501SEric Dumazet 	if (likely(!err)) {
3342e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
3343678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
3344ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3345fc9f3501SEric Dumazet 	}
3346c84a5711SYuchung Cheng 	return err;
334793b174adSYuchung Cheng }
334893b174adSYuchung Cheng 
334910d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
335093b174adSYuchung Cheng {
335193b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
335210d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
33531da177e4SLinus Torvalds 
33541da177e4SLinus Torvalds 	if (err == 0) {
33551da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
33561da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3357e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
33581da177e4SLinus Torvalds 		}
33591da177e4SLinus Torvalds #endif
33601da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
33611da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
33627ae18975SYuchung Cheng 	}
33631da177e4SLinus Torvalds 
33647ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
33651da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
3366614e8316SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
33671da177e4SLinus Torvalds 
33686e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
33696e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
33706e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
33711da177e4SLinus Torvalds 	return err;
33721da177e4SLinus Torvalds }
33731da177e4SLinus Torvalds 
33741da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
33751da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
33761da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
33771da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
33781da177e4SLinus Torvalds  */
33791da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
33801da177e4SLinus Torvalds {
33816687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3382b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
33831da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3384916e6d1aSEric Dumazet 	bool rearm_timer = false;
3385840a3cbeSYuchung Cheng 	u32 max_segs;
338661eb55f4SIlpo Järvinen 	int mib_idx;
33876a438bbeSStephen Hemminger 
338845e77d31SIlpo Järvinen 	if (!tp->packets_out)
338945e77d31SIlpo Järvinen 		return;
339045e77d31SIlpo Järvinen 
339175c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3392b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3393ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
339475c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3395dca0aaf8SEric Dumazet 		__u8 sacked;
339610d3be56SEric Dumazet 		int segs;
33971da177e4SLinus Torvalds 
3398218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3399218af599SEric Dumazet 			break;
3400218af599SEric Dumazet 
34016a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
340251456b29SIan Morris 		if (!hole)
34036a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
34046a438bbeSStephen Hemminger 
340540570375SEric Dumazet 		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
340610d3be56SEric Dumazet 		if (segs <= 0)
3407916e6d1aSEric Dumazet 			break;
3408dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3409a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3410a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3411a3d2e9f8SEric Dumazet 		 */
3412a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
34130e1c54c2SIlpo Järvinen 
3414840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3415006f582cSIlpo Järvinen 			break;
34160e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
341751456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
34180e1c54c2SIlpo Järvinen 				hole = skb;
341961eb55f4SIlpo Järvinen 			continue;
34201da177e4SLinus Torvalds 
34210e1c54c2SIlpo Järvinen 		} else {
34220e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
34230e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
34240e1c54c2SIlpo Järvinen 			else
34250e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
34260e1c54c2SIlpo Järvinen 		}
34270e1c54c2SIlpo Järvinen 
34280e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
342961eb55f4SIlpo Järvinen 			continue;
343040b215e5SPavel Emelyanov 
3431f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3432916e6d1aSEric Dumazet 			break;
3433f9616c35SEric Dumazet 
343410d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
3435916e6d1aSEric Dumazet 			break;
343624ab6becSYuchung Cheng 
3437de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
34381da177e4SLinus Torvalds 
3439684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3440a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3441a262f0cdSNandita Dukkipati 
344275c119afSEric Dumazet 		if (skb == rtx_head &&
344357dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3444916e6d1aSEric Dumazet 			rearm_timer = true;
3445916e6d1aSEric Dumazet 
3446916e6d1aSEric Dumazet 	}
3447916e6d1aSEric Dumazet 	if (rearm_timer)
34483f80e08fSEric Dumazet 		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
34493f421baaSArnaldo Carvalho de Melo 				     inet_csk(sk)->icsk_rto,
34508dc242adSEric Dumazet 				     TCP_RTO_MAX);
34511da177e4SLinus Torvalds }
34521da177e4SLinus Torvalds 
3453d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3454d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3455845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3456845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3457a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3458a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3459d83769a5SEric Dumazet  */
3460a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3461d83769a5SEric Dumazet {
3462c4ee1185SEric Dumazet 	int delta, amt;
3463d83769a5SEric Dumazet 
3464c4ee1185SEric Dumazet 	delta = size - sk->sk_forward_alloc;
3465c4ee1185SEric Dumazet 	if (delta <= 0)
3466d83769a5SEric Dumazet 		return;
3467c4ee1185SEric Dumazet 	amt = sk_mem_pages(delta);
34685e6300e7SEric Dumazet 	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3469e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3470e805605cSJohannes Weiner 
3471baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
34724b1327beSWei Wang 		mem_cgroup_charge_skmem(sk->sk_memcg, amt,
34734b1327beSWei Wang 					gfp_memcg_charge() | __GFP_NOFAIL);
3474d83769a5SEric Dumazet }
3475d83769a5SEric Dumazet 
3476845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3477845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
34781da177e4SLinus Torvalds  */
34791da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
34801da177e4SLinus Torvalds {
3481ee2aabd3SEric Dumazet 	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
34821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34831da177e4SLinus Torvalds 
3484845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3485845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3486845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3487845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
34881da177e4SLinus Torvalds 	 */
3489ee2aabd3SEric Dumazet 	tskb = tail;
349075c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
349175c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
349275c119afSEric Dumazet 
349375c119afSEric Dumazet 	if (tskb) {
3494845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3495845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
34961da177e4SLinus Torvalds 		tp->write_seq++;
3497ee2aabd3SEric Dumazet 		if (!tail) {
3498845704a5SEric Dumazet 			/* This means tskb was already sent.
3499845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3500845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3501845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3502845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3503845704a5SEric Dumazet 			 */
3504e0d694d6SEric Dumazet 			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3505845704a5SEric Dumazet 			return;
3506845704a5SEric Dumazet 		}
35071da177e4SLinus Torvalds 	} else {
3508845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3509d1edc085SColin Ian King 		if (unlikely(!skb))
3510845704a5SEric Dumazet 			return;
3511d1edc085SColin Ian King 
3512e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3513d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3514a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
35151da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3516e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3517a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
35181da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
35191da177e4SLinus Torvalds 	}
3520845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
35211da177e4SLinus Torvalds }
35221da177e4SLinus Torvalds 
35231da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
35241da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
35251da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
352665bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
35271da177e4SLinus Torvalds  */
3528dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
35291da177e4SLinus Torvalds {
35301da177e4SLinus Torvalds 	struct sk_buff *skb;
35311da177e4SLinus Torvalds 
35327cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
35337cc2b043SGao Feng 
35341da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
35351da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
35361da177e4SLinus Torvalds 	if (!skb) {
35374e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
35381da177e4SLinus Torvalds 		return;
35391da177e4SLinus Torvalds 	}
35401da177e4SLinus Torvalds 
35411da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
35421da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3543e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3544a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
35459a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
35461da177e4SLinus Torvalds 	/* Send it off. */
3547dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
35484e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3549c24b14c4SSong Liu 
3550c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3551c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3552c24b14c4SSong Liu 	 */
3553c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
35541da177e4SLinus Torvalds }
35551da177e4SLinus Torvalds 
355667edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
355767edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
35581da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
35591da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
35601da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
35611da177e4SLinus Torvalds  */
35621da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
35631da177e4SLinus Torvalds {
35641da177e4SLinus Torvalds 	struct sk_buff *skb;
35651da177e4SLinus Torvalds 
356675c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
356751456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
356875c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
35691da177e4SLinus Torvalds 		return -EFAULT;
35701da177e4SLinus Torvalds 	}
35714de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
35721da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3573e2080072SEric Dumazet 			struct sk_buff *nskb;
3574e2080072SEric Dumazet 
3575e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3576e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3577e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
357851456b29SIan Morris 			if (!nskb)
35791da177e4SLinus Torvalds 				return -ENOMEM;
3580e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
35812bec445fSEric Dumazet 			tcp_highest_sack_replace(sk, skb, nskb);
358275c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3583f4a775d1SEric Dumazet 			__skb_header_release(nskb);
358475c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3585ab4e846aSEric Dumazet 			sk_wmem_queued_add(sk, nskb->truesize);
35863ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
35871da177e4SLinus Torvalds 			skb = nskb;
35881da177e4SLinus Torvalds 		}
35891da177e4SLinus Torvalds 
35904de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3591735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
35921da177e4SLinus Torvalds 	}
3593dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
35941da177e4SLinus Torvalds }
35951da177e4SLinus Torvalds 
35964aea39c1SEric Dumazet /**
3597331fca43SMartin KaFai Lau  * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3598331fca43SMartin KaFai Lau  * @sk: listener socket
3599331fca43SMartin KaFai Lau  * @dst: dst entry attached to the SYNACK. It is consumed and caller
3600331fca43SMartin KaFai Lau  *       should not use it again.
3601331fca43SMartin KaFai Lau  * @req: request_sock pointer
3602331fca43SMartin KaFai Lau  * @foc: cookie for tcp fast open
3603331fca43SMartin KaFai Lau  * @synack_type: Type of synack to prepare
3604331fca43SMartin KaFai Lau  * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
36054aea39c1SEric Dumazet  */
36065d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3607e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3608ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3609331fca43SMartin KaFai Lau 				enum tcp_synack_type synack_type,
3610331fca43SMartin KaFai Lau 				struct sk_buff *syn_skb)
36111da177e4SLinus Torvalds {
36122e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
36135d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
361480f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
36155d062de7SEric Dumazet 	struct tcp_out_options opts;
36165d062de7SEric Dumazet 	struct sk_buff *skb;
3617bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
36185d062de7SEric Dumazet 	struct tcphdr *th;
3619f5fff5dcSTom Quetchenbach 	int mss;
3620a842fe14SEric Dumazet 	u64 now;
36211da177e4SLinus Torvalds 
3622ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
36234aea39c1SEric Dumazet 	if (unlikely(!skb)) {
36244aea39c1SEric Dumazet 		dst_release(dst);
36251da177e4SLinus Torvalds 		return NULL;
36264aea39c1SEric Dumazet 	}
36271da177e4SLinus Torvalds 	/* Reserve space for headers. */
36281da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36291da177e4SLinus Torvalds 
3630b3d05147SEric Dumazet 	switch (synack_type) {
3631b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
36329e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3633b3d05147SEric Dumazet 		break;
3634b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3635b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3636b3d05147SEric Dumazet 		 * to avoid false sharing.
3637b3d05147SEric Dumazet 		 */
3638b3d05147SEric Dumazet 		break;
3639b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3640ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3641ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3642ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3643ca6fb065SEric Dumazet 		 */
3644ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3645b3d05147SEric Dumazet 		break;
3646ca6fb065SEric Dumazet 	}
36474aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
36481da177e4SLinus Torvalds 
36493541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3650f5fff5dcSTom Quetchenbach 
365133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3652614e8316SEric Dumazet 	if (tcp_rsk(req)->req_usec_ts < 0)
3653614e8316SEric Dumazet 		tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
3654a842fe14SEric Dumazet 	now = tcp_clock_ns();
36558b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
3656f8ace8d9SFlorian Westphal 	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3657a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3658a1ac9c8aSMartin KaFai Lau 				      true);
36598b5f12d0SFlorian Westphal 	else
36608b5f12d0SFlorian Westphal #endif
36619e450c1eSYuchung Cheng 	{
3662a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, now, true);
36639e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
36649e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
36659e450c1eSYuchung Cheng 	}
366680f03e27SEric Dumazet 
366780f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
366880f03e27SEric Dumazet 	rcu_read_lock();
3669fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
367080f03e27SEric Dumazet #endif
36715e526552SEric Dumazet 	skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
3672331fca43SMartin KaFai Lau 	/* bpf program will be interested in the tcp_flags */
3673331fca43SMartin KaFai Lau 	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
367460e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
3675331fca43SMartin KaFai Lau 					     foc, synack_type,
3676331fca43SMartin KaFai Lau 					     syn_skb) + sizeof(*th);
367733ad798cSAdam Langley 
3678aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3679aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
36801da177e4SLinus Torvalds 
3681ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
36821da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
36831da177e4SLinus Torvalds 	th->syn = 1;
36841da177e4SLinus Torvalds 	th->ack = 1;
36856ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3686b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3687634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3688e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
36893b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
36903b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
36918336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
36928336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
36931da177e4SLinus Torvalds 
36941da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3695ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3696ea66758cSPaolo Abeni 	tcp_options_write(th, NULL, &opts);
36971da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
3698bced3f7dSBreno Leitao 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3699cfb6eeb4SYOSHIFUJI Hideaki 
3700cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3701cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
370280f03e27SEric Dumazet 	if (md5)
3703bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
370439f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
370580f03e27SEric Dumazet 	rcu_read_unlock();
3706cfb6eeb4SYOSHIFUJI Hideaki #endif
3707cfb6eeb4SYOSHIFUJI Hideaki 
3708331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3709331fca43SMartin KaFai Lau 				synack_type, &opts);
3710331fca43SMartin KaFai Lau 
3711a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, now, true);
3712a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3713a842fe14SEric Dumazet 
37141da177e4SLinus Torvalds 	return skb;
37151da177e4SLinus Torvalds }
37164bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
37171da177e4SLinus Torvalds 
371881164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
371981164413SDaniel Borkmann {
372081164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
372181164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
372281164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
372381164413SDaniel Borkmann 
372481164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
372581164413SDaniel Borkmann 		return;
372681164413SDaniel Borkmann 
372781164413SDaniel Borkmann 	rcu_read_lock();
372881164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
37290baf26b0SMartin KaFai Lau 	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
37300baf26b0SMartin KaFai Lau 		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
373181164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
373281164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
373381164413SDaniel Borkmann 	}
373481164413SDaniel Borkmann 	rcu_read_unlock();
373581164413SDaniel Borkmann }
373681164413SDaniel Borkmann 
373767edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3738f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
37391da177e4SLinus Torvalds {
3740cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
37411da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37421da177e4SLinus Torvalds 	__u8 rcv_wscale;
374313d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
37441da177e4SLinus Torvalds 
37451da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
37461da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
37471da177e4SLinus Torvalds 	 */
37485d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
37493666f666SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
37505d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
37511da177e4SLinus Torvalds 
37521da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
37531da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
37541da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
37551da177e4SLinus Torvalds 	tp->max_window = 0;
37565d424d5aSJohn Heffner 	tcp_mtup_init(sk);
37571da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
37581da177e4SLinus Torvalds 
375981164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
376081164413SDaniel Borkmann 
37611da177e4SLinus Torvalds 	if (!tp->window_clamp)
37621da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
37633541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3764f5fff5dcSTom Quetchenbach 
37651da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
37661da177e4SLinus Torvalds 
3767e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3768e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3769e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3770e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3771e88c64f0SHagen Paul Pfeifer 
377213d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
377313d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
377413d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
377513d3b1ebSLawrence Brakmo 
3776ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
37771da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
37781da177e4SLinus Torvalds 				  &tp->rcv_wnd,
37791da177e4SLinus Torvalds 				  &tp->window_clamp,
37803666f666SKuniyuki Iwashima 				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
378131d12926Slaurent chavey 				  &rcv_wscale,
378213d3b1ebSLawrence Brakmo 				  rcv_wnd);
37831da177e4SLinus Torvalds 
37841da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
37851da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
37861da177e4SLinus Torvalds 
3787e13ec3daSEric Dumazet 	WRITE_ONCE(sk->sk_err, 0);
37881da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
37891da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3790ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
37917f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
37921da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
37931da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
379433f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3795e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3796ee995283SPavel Emelyanov 
3797ee995283SPavel Emelyanov 	if (likely(!tp->repair))
37981da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3799c7781a6eSAndrew Vagin 	else
380070eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3801ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
38027db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
38031da177e4SLinus Torvalds 
38048550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3805463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
38061da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
38071da177e4SLinus Torvalds }
38081da177e4SLinus Torvalds 
3809783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3810783237e8SYuchung Cheng {
3811783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3812783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3813783237e8SYuchung Cheng 
3814783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3815f4a775d1SEric Dumazet 	__skb_header_release(skb);
3816ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
3817783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
38180f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3819783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3820783237e8SYuchung Cheng }
3821783237e8SYuchung Cheng 
3822783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3823783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3824783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3825783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3826783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3827783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3828783237e8SYuchung Cheng  */
3829783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3830783237e8SYuchung Cheng {
3831ed0c99dcSJakub Kicinski 	struct inet_connection_sock *icsk = inet_csk(sk);
3832783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3833783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3834fbf93406SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
3835355a901eSEric Dumazet 	struct sk_buff *syn_data;
3836fbf93406SEric Dumazet 	int space, err = 0;
3837783237e8SYuchung Cheng 
383867da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3839065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3840783237e8SYuchung Cheng 		goto fallback;
3841783237e8SYuchung Cheng 
3842783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3843783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3844783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3845783237e8SYuchung Cheng 	 */
38463541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3847ed0c99dcSJakub Kicinski 	/* Sync mss_cache after updating the mss_clamp */
3848ed0c99dcSJakub Kicinski 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
38493541f9e8SEric Dumazet 
3850ed0c99dcSJakub Kicinski 	space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
3851783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3852783237e8SYuchung Cheng 
3853f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3854f5ddcbbbSEric Dumazet 
3855fbf93406SEric Dumazet 	if (space &&
3856fbf93406SEric Dumazet 	    !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
3857fbf93406SEric Dumazet 				  pfrag, sk->sk_allocation))
3858fbf93406SEric Dumazet 		goto fallback;
38595882efffSEric Dumazet 	syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false);
3860355a901eSEric Dumazet 	if (!syn_data)
3861783237e8SYuchung Cheng 		goto fallback;
3862355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
386307e100f9SEric Dumazet 	if (space) {
3864fbf93406SEric Dumazet 		space = min_t(size_t, space, pfrag->size - pfrag->offset);
3865fbf93406SEric Dumazet 		space = tcp_wmem_schedule(sk, space);
3866fbf93406SEric Dumazet 	}
3867fbf93406SEric Dumazet 	if (space) {
3868fbf93406SEric Dumazet 		space = copy_page_from_iter(pfrag->page, pfrag->offset,
3869fbf93406SEric Dumazet 					    space, &fo->data->msg_iter);
3870fbf93406SEric Dumazet 		if (unlikely(!space)) {
3871ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3872355a901eSEric Dumazet 			kfree_skb(syn_data);
3873783237e8SYuchung Cheng 			goto fallback;
3874783237e8SYuchung Cheng 		}
3875fbf93406SEric Dumazet 		skb_fill_page_desc(syn_data, 0, pfrag->page,
3876fbf93406SEric Dumazet 				   pfrag->offset, space);
3877fbf93406SEric Dumazet 		page_ref_inc(pfrag->page);
3878fbf93406SEric Dumazet 		pfrag->offset += space;
3879fbf93406SEric Dumazet 		skb_len_add(syn_data, space);
3880f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
388107e100f9SEric Dumazet 	}
3882355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3883355a901eSEric Dumazet 	if (space == fo->size)
3884355a901eSEric Dumazet 		fo->data = NULL;
3885355a901eSEric Dumazet 	fo->copied = space;
3886783237e8SYuchung Cheng 
3887355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
38880f87230dSFrancis Yan 	if (syn_data->len)
38890f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3890355a901eSEric Dumazet 
3891355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3892355a901eSEric Dumazet 
3893a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
3894355a901eSEric Dumazet 
3895355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3896355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3897355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3898355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3899431a9124SEric Dumazet 	 */
3900355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3901355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3902355a901eSEric Dumazet 	if (!err) {
390367da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
390475c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3905f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3906783237e8SYuchung Cheng 		goto done;
3907783237e8SYuchung Cheng 	}
3908783237e8SYuchung Cheng 
390975c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
391075c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3911b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3912b5b7db8dSEric Dumazet 
3913783237e8SYuchung Cheng fallback:
3914783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3915783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3916783237e8SYuchung Cheng 		fo->cookie.len = 0;
3917783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3918783237e8SYuchung Cheng 	if (err)
3919783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3920783237e8SYuchung Cheng done:
3921783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3922783237e8SYuchung Cheng 	return err;
3923783237e8SYuchung Cheng }
3924783237e8SYuchung Cheng 
392567edfef7SAndi Kleen /* Build a SYN and send it off. */
39261da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
39271da177e4SLinus Torvalds {
39281da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
39291da177e4SLinus Torvalds 	struct sk_buff *buff;
3930ee586811SEric Paris 	int err;
39311da177e4SLinus Torvalds 
3932de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
39338ba60924SEric Dumazet 
3934*0aadc739SDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
3935*0aadc739SDmitry Safonov 	/* Has to be checked late, after setting daddr/saddr/ops.
3936*0aadc739SDmitry Safonov 	 * Return error if the peer has both a md5 and a tcp-ao key
3937*0aadc739SDmitry Safonov 	 * configured as this is ambiguous.
3938*0aadc739SDmitry Safonov 	 */
3939*0aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->md5sig_info,
3940*0aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
3941*0aadc739SDmitry Safonov 		bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1);
3942*0aadc739SDmitry Safonov 		bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk);
3943*0aadc739SDmitry Safonov 		struct tcp_ao_info *ao_info;
3944*0aadc739SDmitry Safonov 
3945*0aadc739SDmitry Safonov 		ao_info = rcu_dereference_check(tp->ao_info,
3946*0aadc739SDmitry Safonov 						lockdep_sock_is_held(sk));
3947*0aadc739SDmitry Safonov 		if (ao_info) {
3948*0aadc739SDmitry Safonov 			/* This is an extra check: tcp_ao_required() in
3949*0aadc739SDmitry Safonov 			 * tcp_v{4,6}_parse_md5_keys() should prevent adding
3950*0aadc739SDmitry Safonov 			 * md5 keys on ao_required socket.
3951*0aadc739SDmitry Safonov 			 */
3952*0aadc739SDmitry Safonov 			needs_ao |= ao_info->ao_required;
3953*0aadc739SDmitry Safonov 			WARN_ON_ONCE(ao_info->ao_required && needs_md5);
3954*0aadc739SDmitry Safonov 		}
3955*0aadc739SDmitry Safonov 		if (needs_md5 && needs_ao)
3956*0aadc739SDmitry Safonov 			return -EKEYREJECTED;
3957*0aadc739SDmitry Safonov 
3958*0aadc739SDmitry Safonov 		/* If we have a matching md5 key and no matching tcp-ao key
3959*0aadc739SDmitry Safonov 		 * then free up ao_info if allocated.
3960*0aadc739SDmitry Safonov 		 */
3961*0aadc739SDmitry Safonov 		if (needs_md5) {
3962*0aadc739SDmitry Safonov 			tcp_ao_destroy_sock(sk);
3963*0aadc739SDmitry Safonov 		} else if (needs_ao) {
3964*0aadc739SDmitry Safonov 			tcp_clear_md5_list(sk);
3965*0aadc739SDmitry Safonov 			kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
3966*0aadc739SDmitry Safonov 						  lockdep_sock_is_held(sk)));
3967*0aadc739SDmitry Safonov 		}
3968*0aadc739SDmitry Safonov 	}
3969*0aadc739SDmitry Safonov #endif
3970*0aadc739SDmitry Safonov #ifdef CONFIG_TCP_AO
3971*0aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->ao_info,
3972*0aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
3973*0aadc739SDmitry Safonov 		/* Don't allow connecting if ao is configured but no
3974*0aadc739SDmitry Safonov 		 * matching key is found.
3975*0aadc739SDmitry Safonov 		 */
3976*0aadc739SDmitry Safonov 		if (!tp->af_specific->ao_lookup(sk, sk, -1, -1))
3977*0aadc739SDmitry Safonov 			return -EKEYREJECTED;
3978*0aadc739SDmitry Safonov 	}
3979*0aadc739SDmitry Safonov #endif
3980*0aadc739SDmitry Safonov 
39818ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
39828ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
39838ba60924SEric Dumazet 
39841da177e4SLinus Torvalds 	tcp_connect_init(sk);
39851da177e4SLinus Torvalds 
39862b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
39872b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
39882b916477SAndrey Vagin 		return 0;
39892b916477SAndrey Vagin 	}
39902b916477SAndrey Vagin 
39915882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
3992355a901eSEric Dumazet 	if (unlikely(!buff))
39931da177e4SLinus Torvalds 		return -ENOBUFS;
39941da177e4SLinus Torvalds 
3995a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
39969a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
39979d0c00f5SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp_ts(tp);
3998783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3999735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
400075c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
40011da177e4SLinus Torvalds 
4002783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
4003783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
4004783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
4005ee586811SEric Paris 	if (err == -ECONNREFUSED)
4006ee586811SEric Paris 		return err;
4007bd37a088SWei Yongjun 
4008bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
4009bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
4010bd37a088SWei Yongjun 	 */
4011e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4012bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
4013b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
4014b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
4015e0d694d6SEric Dumazet 		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
4016b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
4017b5b7db8dSEric Dumazet 	}
401881cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
40191da177e4SLinus Torvalds 
40201da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
40213f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
40223f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
40231da177e4SLinus Torvalds 	return 0;
40241da177e4SLinus Torvalds }
40254bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
40261da177e4SLinus Torvalds 
4027bbf80d71SEric Dumazet u32 tcp_delack_max(const struct sock *sk)
4028bbf80d71SEric Dumazet {
4029bbf80d71SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
4030bbf80d71SEric Dumazet 	u32 delack_max = inet_csk(sk)->icsk_delack_max;
4031bbf80d71SEric Dumazet 
4032bbf80d71SEric Dumazet 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
4033bbf80d71SEric Dumazet 		u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
4034bbf80d71SEric Dumazet 		u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
4035bbf80d71SEric Dumazet 
4036bbf80d71SEric Dumazet 		delack_max = min_t(u32, delack_max, delack_from_rto_min);
4037bbf80d71SEric Dumazet 	}
4038bbf80d71SEric Dumazet 	return delack_max;
4039bbf80d71SEric Dumazet }
4040bbf80d71SEric Dumazet 
40411da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
40421da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
40431da177e4SLinus Torvalds  * for details.
40441da177e4SLinus Torvalds  */
40451da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
40461da177e4SLinus Torvalds {
4047463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
4048463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
40491da177e4SLinus Torvalds 	unsigned long timeout;
40501da177e4SLinus Torvalds 
40511da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
4052463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
40531da177e4SLinus Torvalds 		int max_ato = HZ / 2;
40541da177e4SLinus Torvalds 
405531954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
4056056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
40571da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
40581da177e4SLinus Torvalds 
40591da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
40601da177e4SLinus Torvalds 
40611da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
4062463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
40631da177e4SLinus Torvalds 		 * directly.
40641da177e4SLinus Torvalds 		 */
4065740b0f18SEric Dumazet 		if (tp->srtt_us) {
4066740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
4067740b0f18SEric Dumazet 					TCP_DELACK_MIN);
40681da177e4SLinus Torvalds 
40691da177e4SLinus Torvalds 			if (rtt < max_ato)
40701da177e4SLinus Torvalds 				max_ato = rtt;
40711da177e4SLinus Torvalds 		}
40721da177e4SLinus Torvalds 
40731da177e4SLinus Torvalds 		ato = min(ato, max_ato);
40741da177e4SLinus Torvalds 	}
40751da177e4SLinus Torvalds 
4076bbf80d71SEric Dumazet 	ato = min_t(u32, ato, tcp_delack_max(sk));
40772b8ee4f0SMartin KaFai Lau 
40781da177e4SLinus Torvalds 	/* Stay within the limit we were given */
40791da177e4SLinus Torvalds 	timeout = jiffies + ato;
40801da177e4SLinus Torvalds 
40811da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
4082463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
4083b6b6d653SEric Dumazet 		/* If delack timer is about to expire, send ACK now. */
4084b6b6d653SEric Dumazet 		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
40851da177e4SLinus Torvalds 			tcp_send_ack(sk);
40861da177e4SLinus Torvalds 			return;
40871da177e4SLinus Torvalds 		}
40881da177e4SLinus Torvalds 
4089463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
4090463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
40911da177e4SLinus Torvalds 	}
4092463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
4093463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
4094463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
40951da177e4SLinus Torvalds }
40961da177e4SLinus Torvalds 
40971da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
40982987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
40991da177e4SLinus Torvalds {
41001da177e4SLinus Torvalds 	struct sk_buff *buff;
41011da177e4SLinus Torvalds 
4102058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
4103058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4104058dc334SIlpo Järvinen 		return;
4105058dc334SIlpo Järvinen 
41061da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
41071da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
41081da177e4SLinus Torvalds 	 * sock.
41091da177e4SLinus Torvalds 	 */
41107450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
41117450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
41127450aaf6SEric Dumazet 	if (unlikely(!buff)) {
4113a37c2134SEric Dumazet 		struct inet_connection_sock *icsk = inet_csk(sk);
4114a37c2134SEric Dumazet 		unsigned long delay;
4115a37c2134SEric Dumazet 
4116a37c2134SEric Dumazet 		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4117a37c2134SEric Dumazet 		if (delay < TCP_RTO_MAX)
4118a37c2134SEric Dumazet 			icsk->icsk_ack.retry++;
4119463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
4120a37c2134SEric Dumazet 		icsk->icsk_ack.ato = TCP_ATO_MIN;
4121a37c2134SEric Dumazet 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
41221da177e4SLinus Torvalds 		return;
41231da177e4SLinus Torvalds 	}
41241da177e4SLinus Torvalds 
41251da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
41261da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
4127a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
41281da177e4SLinus Torvalds 
412998781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
413098781965SEric Dumazet 	 * too much.
413198781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
413298781965SEric Dumazet 	 */
413398781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
413498781965SEric Dumazet 
41351da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
41362987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
41371da177e4SLinus Torvalds }
413827cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
41392987babbSYuchung Cheng 
41402987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
41412987babbSYuchung Cheng {
41422987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
41431da177e4SLinus Torvalds }
41441da177e4SLinus Torvalds 
41451da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
41461da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
41471da177e4SLinus Torvalds  *
41481da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
41491da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
41501da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
41511da177e4SLinus Torvalds  *
41521da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
41531da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
41541da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
41551da177e4SLinus Torvalds  */
4156e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
41571da177e4SLinus Torvalds {
41581da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
41591da177e4SLinus Torvalds 	struct sk_buff *skb;
41601da177e4SLinus Torvalds 
41611da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
41627450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
41637450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
416451456b29SIan Morris 	if (!skb)
41651da177e4SLinus Torvalds 		return -1;
41661da177e4SLinus Torvalds 
41671da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
41681da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
41691da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
41701da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
41711da177e4SLinus Torvalds 	 * send it.
41721da177e4SLinus Torvalds 	 */
4173a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4174e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
41757450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
41761da177e4SLinus Torvalds }
41771da177e4SLinus Torvalds 
4178385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
4179ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
4180ee995283SPavel Emelyanov {
4181ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
4182ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
41839a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
4184e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4185ee995283SPavel Emelyanov 	}
4186ee995283SPavel Emelyanov }
4187ee995283SPavel Emelyanov 
418867edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
4189e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
41901da177e4SLinus Torvalds {
41911da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
41921da177e4SLinus Torvalds 	struct sk_buff *skb;
41931da177e4SLinus Torvalds 
4194058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4195058dc334SIlpo Järvinen 		return -1;
4196058dc334SIlpo Järvinen 
419700db4124SIan Morris 	skb = tcp_send_head(sk);
419800db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
41991da177e4SLinus Torvalds 		int err;
42000c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
420190840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
42021da177e4SLinus Torvalds 
42031da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
42041da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
42051da177e4SLinus Torvalds 
42061da177e4SLinus Torvalds 		/* We are probing the opening of a window
42071da177e4SLinus Torvalds 		 * but the window size is != 0
42081da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
42091da177e4SLinus Torvalds 		 */
42101da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
42111da177e4SLinus Torvalds 		    skb->len > mss) {
42121da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
42134de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
421475c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
421575c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
42161da177e4SLinus Torvalds 				return -1;
42171da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
42185bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
42191da177e4SLinus Torvalds 
42204de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4221dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
422266f5fe62SIlpo Järvinen 		if (!err)
422366f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
42241da177e4SLinus Torvalds 		return err;
42251da177e4SLinus Torvalds 	} else {
422633f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4227e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
4228e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
42291da177e4SLinus Torvalds 	}
42301da177e4SLinus Torvalds }
42311da177e4SLinus Torvalds 
42321da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
42331da177e4SLinus Torvalds  * a partial packet else a zero probe.
42341da177e4SLinus Torvalds  */
42351da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
42361da177e4SLinus Torvalds {
4237463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
42381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4239c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
4240c1d5674fSYuchung Cheng 	unsigned long timeout;
42411da177e4SLinus Torvalds 	int err;
42421da177e4SLinus Torvalds 
4243e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
42441da177e4SLinus Torvalds 
424575c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
42461da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
42476687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
4248463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
42499d9b1ee0SEnke Chen 		icsk->icsk_probes_tstamp = 0;
42501da177e4SLinus Torvalds 		return;
42511da177e4SLinus Torvalds 	}
42521da177e4SLinus Torvalds 
4253c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
42541da177e4SLinus Torvalds 	if (err <= 0) {
425539e24435SKuniyuki Iwashima 		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4256463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
4257c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
42581da177e4SLinus Torvalds 	} else {
42591da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
4260c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
42611da177e4SLinus Torvalds 		 */
4262c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
42631da177e4SLinus Torvalds 	}
4264344db93aSEnke Chen 
4265344db93aSEnke Chen 	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
42668dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
42671da177e4SLinus Torvalds }
42685db92c99SOctavian Purdila 
4269ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
42705db92c99SOctavian Purdila {
42715db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
42725db92c99SOctavian Purdila 	struct flowi fl;
42735db92c99SOctavian Purdila 	int res;
42745db92c99SOctavian Purdila 
4275cb6cd2ceSAkhmat Karakotov 	/* Paired with WRITE_ONCE() in sock_setsockopt() */
4276cb6cd2ceSAkhmat Karakotov 	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
42775e526552SEric Dumazet 		WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
4278331fca43SMartin KaFai Lau 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4279331fca43SMartin KaFai Lau 				  NULL);
42805db92c99SOctavian Purdila 	if (!res) {
42810a375c82SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
42820a375c82SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4283e9d9da91SEric Dumazet 		if (unlikely(tcp_passive_fastopen(sk))) {
4284e9d9da91SEric Dumazet 			/* sk has const attribute because listeners are lockless.
4285e9d9da91SEric Dumazet 			 * However in this case, we are dealing with a passive fastopen
4286e9d9da91SEric Dumazet 			 * socket thus we can change total_retrans value.
4287e9d9da91SEric Dumazet 			 */
4288e9d9da91SEric Dumazet 			tcp_sk_rw(sk)->total_retrans++;
4289e9d9da91SEric Dumazet 		}
4290cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
42915db92c99SOctavian Purdila 	}
42925db92c99SOctavian Purdila 	return res;
42935db92c99SOctavian Purdila }
42945db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
4295