xref: /linux/net/ipv4/tcp_output.c (revision 94062790aedb505bdda209b10bea47b294d6394f)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
41eda7acddSPeter Krystad #include <net/mptcp.h>
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds #include <linux/compiler.h>
445a0e3ad6STejun Heo #include <linux/gfp.h>
451da177e4SLinus Torvalds #include <linux/module.h>
4660e2a778SUrsula Braun #include <linux/static_key.h>
471da177e4SLinus Torvalds 
48e086101bSCong Wang #include <trace/events/tcp.h>
4935089bb2SDavid S. Miller 
509799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
519799ccb0SEric Dumazet  * ensuring monotically increasing values.
529799ccb0SEric Dumazet  */
539799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
549799ccb0SEric Dumazet {
559799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
569799ccb0SEric Dumazet 
575f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
58e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
599799ccb0SEric Dumazet }
609799ccb0SEric Dumazet 
6146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
63519855c5SWilliam Allen Simpson 
6467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6575c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
666ff03ac3SIlpo Järvinen {
676ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
686ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
709e412ba7SIlpo Järvinen 
71e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
728512430eSIlpo Järvinen 
7375c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7475c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7575c119afSEric Dumazet 
7685369750SCambda Zhu 	if (tp->highest_sack == NULL)
7785369750SCambda Zhu 		tp->highest_sack = skb;
7885369750SCambda Zhu 
7966f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
80bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
81750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
82f19c29e3SYuchung Cheng 
83f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
84f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
854bfe744fSEric Dumazet 	tcp_check_space(sk);
866a5dc9e5SEric Dumazet }
871da177e4SLinus Torvalds 
88a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
89a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
901da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
911da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
921da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
931da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
941da177e4SLinus Torvalds  */
95cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
961da177e4SLinus Torvalds {
97cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
989e412ba7SIlpo Järvinen 
99a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
100a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
101a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1021da177e4SLinus Torvalds 		return tp->snd_nxt;
1031da177e4SLinus Torvalds 	else
10490840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1081da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1111da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1121da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1131da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1141da177e4SLinus Torvalds  *    large MSS.
1151da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1161da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1171da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1181da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1191da177e4SLinus Torvalds  *    probably even Jumbo".
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1221da177e4SLinus Torvalds {
1231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
124cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1251da177e4SLinus Torvalds 	int mss = tp->advmss;
1261da177e4SLinus Torvalds 
1270dbaee3bSDavid S. Miller 	if (dst) {
1280dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1290dbaee3bSDavid S. Miller 
1300dbaee3bSDavid S. Miller 		if (metric < mss) {
1310dbaee3bSDavid S. Miller 			mss = metric;
1321da177e4SLinus Torvalds 			tp->advmss = mss;
1331da177e4SLinus Torvalds 		}
1340dbaee3bSDavid S. Miller 	}
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds 	return (__u16)mss;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1406f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1416f021c62SEric Dumazet  */
1426f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1431da177e4SLinus Torvalds {
144463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1456f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
14640570375SEric Dumazet 	u32 cwnd = tcp_snd_cwnd(tp);
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1511da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1521da177e4SLinus Torvalds 
153463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1541da177e4SLinus Torvalds 		cwnd >>= 1;
15540570375SEric Dumazet 	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
156c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1571da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
162cf533ea5SEric Dumazet 				struct sock *sk)
1631da177e4SLinus Torvalds {
164463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
165d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1661da177e4SLinus Torvalds 
16705c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16805c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16905c5a46dSNeal Cardwell 
1704a41f453SWei Wang 	tp->lsndtime = now;
1714d8f24eeSWei Wang 
1724d8f24eeSWei Wang 	/* If it is a reply for ato after last received
173562b1fdfSHaiyang Zhang 	 * packet, increase pingpong count.
1744d8f24eeSWei Wang 	 */
1754d8f24eeSWei Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
176562b1fdfSHaiyang Zhang 		inet_csk_inc_pingpong_cnt(sk);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
17967edfef7SAndi Kleen /* Account for an ACK we sent. */
180059217c1SNeal Cardwell static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
1811da177e4SLinus Torvalds {
1825d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1835d9f4262SEric Dumazet 
1842b195850SEric Dumazet 	if (unlikely(tp->compressed_ack)) {
185200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
1862b195850SEric Dumazet 			      tp->compressed_ack);
1872b195850SEric Dumazet 		tp->compressed_ack = 0;
1885d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1895d9f4262SEric Dumazet 			__sock_put(sk);
1905d9f4262SEric Dumazet 	}
19127cde44aSYuchung Cheng 
19227cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19327cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
194059217c1SNeal Cardwell 	tcp_dec_quickack_mode(sk);
195463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1961da177e4SLinus Torvalds }
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1991da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2001da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2011da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2021da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2031da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2041da177e4SLinus Torvalds  */
205ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2061da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20731d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20831d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2091da177e4SLinus Torvalds {
2101da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2131da177e4SLinus Torvalds 	if (*window_clamp == 0)
214589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2151da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2181da177e4SLinus Torvalds 	if (space > mss)
219589c49cbSGao Feng 		space = rounddown(space, mss);
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22215d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22315d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22415d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22515d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22615d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22715d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2281da177e4SLinus Torvalds 	 */
2290f1e4d06SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
2301da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23115d99e02SRick Jones 	else
232a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
233a337531bSYuchung Cheng 
234a337531bSYuchung Cheng 	if (init_rcv_wnd)
235a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23615d99e02SRick Jones 
23719bf6261SEric Dumazet 	*rcv_wscale = 0;
2381da177e4SLinus Torvalds 	if (wscale_ok) {
239589c49cbSGao Feng 		/* Set window scaling on max possible window */
24002739545SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
2411227c177SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
242316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24319bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24419bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2451da177e4SLinus Torvalds 	}
2461da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
247589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2481da177e4SLinus Torvalds }
2494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2501da177e4SLinus Torvalds 
2511da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2521da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2531da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2541da177e4SLinus Torvalds  * frame.
2551da177e4SLinus Torvalds  */
25640efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2571da177e4SLinus Torvalds {
2581da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
259b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
260e2142825SMenglong Dong 	u32 old_win = tp->rcv_wnd;
261e2142825SMenglong Dong 	u32 cur_win, new_win;
2621da177e4SLinus Torvalds 
263e2142825SMenglong Dong 	/* Make the window 0 if we failed to queue the data because we
264e2142825SMenglong Dong 	 * are out of memory. The window is temporary, so we don't store
265e2142825SMenglong Dong 	 * it on the socket.
266e2142825SMenglong Dong 	 */
267e2142825SMenglong Dong 	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
268e2142825SMenglong Dong 		return 0;
269e2142825SMenglong Dong 
270e2142825SMenglong Dong 	cur_win = tcp_receive_window(tp);
271e2142825SMenglong Dong 	new_win = __tcp_select_window(sk);
2721da177e4SLinus Torvalds 	if (new_win < cur_win) {
2731da177e4SLinus Torvalds 		/* Danger Will Robinson!
2741da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2751da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2761da177e4SLinus Torvalds 		 * window in time.  --DaveM
2771da177e4SLinus Torvalds 		 *
2781da177e4SLinus Torvalds 		 * Relax Will Robinson.
2791da177e4SLinus Torvalds 		 */
280b650d953Smfreemon@cloudflare.com 		if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
281b650d953Smfreemon@cloudflare.com 			/* Never shrink the offered window */
2828e165e20SFlorian Westphal 			if (new_win == 0)
283b650d953Smfreemon@cloudflare.com 				NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
284607bfbf2SPatrick McHardy 			new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2851da177e4SLinus Torvalds 		}
286b650d953Smfreemon@cloudflare.com 	}
287b650d953Smfreemon@cloudflare.com 
2881da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2891da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2921da177e4SLinus Torvalds 	 * scaled window.
2931da177e4SLinus Torvalds 	 */
294ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
295b650d953Smfreemon@cloudflare.com 	    READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
2961da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2971da177e4SLinus Torvalds 	else
2981da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3011da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3021da177e4SLinus Torvalds 
30331770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3048e165e20SFlorian Westphal 	if (new_win == 0) {
30531770e34SFlorian Westphal 		tp->pred_flags = 0;
3068e165e20SFlorian Westphal 		if (old_win)
307b650d953Smfreemon@cloudflare.com 			NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
3088e165e20SFlorian Westphal 	} else if (old_win == 0) {
309b650d953Smfreemon@cloudflare.com 		NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
3108e165e20SFlorian Westphal 	}
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds 	return new_win;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
31567edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
316735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
317bdf1ee5dSIlpo Järvinen {
31830e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31930e502a3SDaniel Borkmann 
3204de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
321bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3224de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32391b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
32491b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
32530e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
326bdf1ee5dSIlpo Järvinen }
327bdf1ee5dSIlpo Järvinen 
32867edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
329735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
330bdf1ee5dSIlpo Järvinen {
331bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
33291b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
3334785a667SKuniyuki Iwashima 	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
33491b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
335f7b3bec6SFlorian Westphal 
336f7b3bec6SFlorian Westphal 	if (!use_ecn) {
337f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
338f7b3bec6SFlorian Westphal 
339f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
340f7b3bec6SFlorian Westphal 			use_ecn = true;
341f7b3bec6SFlorian Westphal 	}
342bdf1ee5dSIlpo Järvinen 
343bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
344f7b3bec6SFlorian Westphal 
345f7b3bec6SFlorian Westphal 	if (use_ecn) {
3464de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
347bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34891b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34930e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
350bdf1ee5dSIlpo Järvinen 	}
351bdf1ee5dSIlpo Järvinen }
352bdf1ee5dSIlpo Järvinen 
35349213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
35449213555SDaniel Borkmann {
35512b8d9caSKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
35649213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
35749213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35849213555SDaniel Borkmann 		 */
35949213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
36049213555SDaniel Borkmann }
36149213555SDaniel Borkmann 
362735d3831SFlorian Westphal static void
3636ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
364bdf1ee5dSIlpo Järvinen {
3656ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
366bdf1ee5dSIlpo Järvinen 		th->ece = 1;
367bdf1ee5dSIlpo Järvinen }
368bdf1ee5dSIlpo Järvinen 
36967edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
37067edfef7SAndi Kleen  * be sent.
37167edfef7SAndi Kleen  */
372735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
373ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
374bdf1ee5dSIlpo Järvinen {
375bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
376bdf1ee5dSIlpo Järvinen 
377bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
378bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
379bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
380bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
381bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
382bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
383bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
384ea1627c2SEric Dumazet 				th->cwr = 1;
385bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
386bdf1ee5dSIlpo Järvinen 			}
38730e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
388bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
389bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
390bdf1ee5dSIlpo Järvinen 		}
391bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
392ea1627c2SEric Dumazet 			th->ece = 1;
393bdf1ee5dSIlpo Järvinen 	}
394bdf1ee5dSIlpo Järvinen }
395bdf1ee5dSIlpo Järvinen 
396e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
397e870a8efSIlpo Järvinen  * auto increment end seqno.
398e870a8efSIlpo Järvinen  */
399e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
400e870a8efSIlpo Järvinen {
4012e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
402e870a8efSIlpo Järvinen 
4034de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
404e870a8efSIlpo Järvinen 
405cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
406e870a8efSIlpo Järvinen 
407e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
408a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
409e870a8efSIlpo Järvinen 		seq++;
410e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
411e870a8efSIlpo Järvinen }
412e870a8efSIlpo Järvinen 
413a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41433f5f57eSIlpo Järvinen {
41533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41633f5f57eSIlpo Järvinen }
41733f5f57eSIlpo Järvinen 
4183b65abb8SLeonard Crestez #define OPTION_SACK_ADVERTISE	BIT(0)
4193b65abb8SLeonard Crestez #define OPTION_TS		BIT(1)
4203b65abb8SLeonard Crestez #define OPTION_MD5		BIT(2)
4213b65abb8SLeonard Crestez #define OPTION_WSCALE		BIT(3)
4223b65abb8SLeonard Crestez #define OPTION_FAST_OPEN_COOKIE	BIT(8)
4233b65abb8SLeonard Crestez #define OPTION_SMC		BIT(9)
4243b65abb8SLeonard Crestez #define OPTION_MPTCP		BIT(10)
4251e03d32bSDmitry Safonov #define OPTION_AO		BIT(11)
42660e2a778SUrsula Braun 
42760e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
42860e2a778SUrsula Braun {
42960e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
43060e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
43160e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
43260e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
43360e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
43460e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
43560e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
43660e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
43760e2a778SUrsula Braun 		}
43860e2a778SUrsula Braun 	}
43960e2a778SUrsula Braun #endif
44060e2a778SUrsula Braun }
44133ad798cSAdam Langley 
44233ad798cSAdam Langley struct tcp_out_options {
4432100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4442100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
44533ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
44633ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
447bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
448331fca43SMartin KaFai Lau 	u8 bpf_opt_len;		/* length of BPF hdr option */
449bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4502100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4512100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
452eda7acddSPeter Krystad 	struct mptcp_out_options mptcp;
45333ad798cSAdam Langley };
45433ad798cSAdam Langley 
455ea66758cSPaolo Abeni static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
456ea66758cSPaolo Abeni 				struct tcp_sock *tp,
457fa3fe2b1SFlorian Westphal 				struct tcp_out_options *opts)
458eda7acddSPeter Krystad {
459eda7acddSPeter Krystad #if IS_ENABLED(CONFIG_MPTCP)
460eda7acddSPeter Krystad 	if (unlikely(OPTION_MPTCP & opts->options))
461ea66758cSPaolo Abeni 		mptcp_write_options(th, ptr, tp, &opts->mptcp);
462eda7acddSPeter Krystad #endif
463eda7acddSPeter Krystad }
464eda7acddSPeter Krystad 
465331fca43SMartin KaFai Lau #ifdef CONFIG_CGROUP_BPF
4660813a841SMartin KaFai Lau static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
4670813a841SMartin KaFai Lau 					enum tcp_synack_type synack_type)
4680813a841SMartin KaFai Lau {
4690813a841SMartin KaFai Lau 	if (unlikely(!skb))
4700813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
4710813a841SMartin KaFai Lau 
4720813a841SMartin KaFai Lau 	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
4730813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
4740813a841SMartin KaFai Lau 
4750813a841SMartin KaFai Lau 	return 0;
4760813a841SMartin KaFai Lau }
4770813a841SMartin KaFai Lau 
478331fca43SMartin KaFai Lau /* req, syn_skb and synack_type are used when writing synack */
479331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
480331fca43SMartin KaFai Lau 				  struct request_sock *req,
481331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
482331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
483331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
484331fca43SMartin KaFai Lau 				  unsigned int *remaining)
485331fca43SMartin KaFai Lau {
4860813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
4870813a841SMartin KaFai Lau 	int err;
4880813a841SMartin KaFai Lau 
489331fca43SMartin KaFai Lau 	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
490331fca43SMartin KaFai Lau 					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
491331fca43SMartin KaFai Lau 	    !*remaining)
492331fca43SMartin KaFai Lau 		return;
493331fca43SMartin KaFai Lau 
4940813a841SMartin KaFai Lau 	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
4950813a841SMartin KaFai Lau 
4960813a841SMartin KaFai Lau 	/* init sock_ops */
4970813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
4980813a841SMartin KaFai Lau 
4990813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
5000813a841SMartin KaFai Lau 
5010813a841SMartin KaFai Lau 	if (req) {
5020813a841SMartin KaFai Lau 		/* The listen "sk" cannot be passed here because
5030813a841SMartin KaFai Lau 		 * it is not locked.  It would not make too much
5040813a841SMartin KaFai Lau 		 * sense to do bpf_setsockopt(listen_sk) based
5050813a841SMartin KaFai Lau 		 * on individual connection request also.
5060813a841SMartin KaFai Lau 		 *
5070813a841SMartin KaFai Lau 		 * Thus, "req" is passed here and the cgroup-bpf-progs
5080813a841SMartin KaFai Lau 		 * of the listen "sk" will be run.
5090813a841SMartin KaFai Lau 		 *
5100813a841SMartin KaFai Lau 		 * "req" is also used here for fastopen even the "sk" here is
5110813a841SMartin KaFai Lau 		 * a fullsock "child" sk.  It is to keep the behavior
5120813a841SMartin KaFai Lau 		 * consistent between fastopen and non-fastopen on
5130813a841SMartin KaFai Lau 		 * the bpf programming side.
514331fca43SMartin KaFai Lau 		 */
5150813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5160813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5170813a841SMartin KaFai Lau 	} else {
5180813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5190813a841SMartin KaFai Lau 
5200813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5210813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5220813a841SMartin KaFai Lau 	}
5230813a841SMartin KaFai Lau 
5240813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5250813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = *remaining;
5260813a841SMartin KaFai Lau 	/* tcp_current_mss() does not pass a skb */
5270813a841SMartin KaFai Lau 	if (skb)
5280813a841SMartin KaFai Lau 		bpf_skops_init_skb(&sock_ops, skb, 0);
5290813a841SMartin KaFai Lau 
5300813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5310813a841SMartin KaFai Lau 
5320813a841SMartin KaFai Lau 	if (err || sock_ops.remaining_opt_len == *remaining)
5330813a841SMartin KaFai Lau 		return;
5340813a841SMartin KaFai Lau 
5350813a841SMartin KaFai Lau 	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
5360813a841SMartin KaFai Lau 	/* round up to 4 bytes */
5370813a841SMartin KaFai Lau 	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
5380813a841SMartin KaFai Lau 
5390813a841SMartin KaFai Lau 	*remaining -= opts->bpf_opt_len;
540331fca43SMartin KaFai Lau }
541331fca43SMartin KaFai Lau 
542331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
543331fca43SMartin KaFai Lau 				    struct request_sock *req,
544331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
545331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
546331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
547331fca43SMartin KaFai Lau {
5480813a841SMartin KaFai Lau 	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
5490813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
5500813a841SMartin KaFai Lau 	int err;
5510813a841SMartin KaFai Lau 
5520813a841SMartin KaFai Lau 	if (likely(!max_opt_len))
553331fca43SMartin KaFai Lau 		return;
554331fca43SMartin KaFai Lau 
5550813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
5560813a841SMartin KaFai Lau 
5570813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
5580813a841SMartin KaFai Lau 
5590813a841SMartin KaFai Lau 	if (req) {
5600813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5610813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5620813a841SMartin KaFai Lau 	} else {
5630813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5640813a841SMartin KaFai Lau 
5650813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5660813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5670813a841SMartin KaFai Lau 	}
5680813a841SMartin KaFai Lau 
5690813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5700813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = max_opt_len;
5710813a841SMartin KaFai Lau 	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
5720813a841SMartin KaFai Lau 	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
5730813a841SMartin KaFai Lau 
5740813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5750813a841SMartin KaFai Lau 
5760813a841SMartin KaFai Lau 	if (err)
5770813a841SMartin KaFai Lau 		nr_written = 0;
5780813a841SMartin KaFai Lau 	else
5790813a841SMartin KaFai Lau 		nr_written = max_opt_len - sock_ops.remaining_opt_len;
5800813a841SMartin KaFai Lau 
5810813a841SMartin KaFai Lau 	if (nr_written < max_opt_len)
5820813a841SMartin KaFai Lau 		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
5830813a841SMartin KaFai Lau 		       max_opt_len - nr_written);
584331fca43SMartin KaFai Lau }
585331fca43SMartin KaFai Lau #else
586331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
587331fca43SMartin KaFai Lau 				  struct request_sock *req,
588331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
589331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
590331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
591331fca43SMartin KaFai Lau 				  unsigned int *remaining)
592331fca43SMartin KaFai Lau {
593331fca43SMartin KaFai Lau }
594331fca43SMartin KaFai Lau 
595331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
596331fca43SMartin KaFai Lau 				    struct request_sock *req,
597331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
598331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
599331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
600331fca43SMartin KaFai Lau {
601331fca43SMartin KaFai Lau }
602331fca43SMartin KaFai Lau #endif
603331fca43SMartin KaFai Lau 
6047425627bSNathan Chancellor static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
6057425627bSNathan Chancellor 				      const struct tcp_request_sock *tcprsk,
6067425627bSNathan Chancellor 				      struct tcp_out_options *opts,
6077425627bSNathan Chancellor 				      struct tcp_key *key, __be32 *ptr)
6087425627bSNathan Chancellor {
6097425627bSNathan Chancellor #ifdef CONFIG_TCP_AO
6107425627bSNathan Chancellor 	u8 maclen = tcp_ao_maclen(key->ao_key);
6117425627bSNathan Chancellor 
6127425627bSNathan Chancellor 	if (tcprsk) {
6137425627bSNathan Chancellor 		u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
6147425627bSNathan Chancellor 
6157425627bSNathan Chancellor 		*ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
6167425627bSNathan Chancellor 			       (tcprsk->ao_keyid << 8) |
6177425627bSNathan Chancellor 			       (tcprsk->ao_rcv_next));
6187425627bSNathan Chancellor 	} else {
6197425627bSNathan Chancellor 		struct tcp_ao_key *rnext_key;
6207425627bSNathan Chancellor 		struct tcp_ao_info *ao_info;
6217425627bSNathan Chancellor 
6227425627bSNathan Chancellor 		ao_info = rcu_dereference_check(tp->ao_info,
6237425627bSNathan Chancellor 			lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
6247425627bSNathan Chancellor 		rnext_key = READ_ONCE(ao_info->rnext_key);
6257425627bSNathan Chancellor 		if (WARN_ON_ONCE(!rnext_key))
6267425627bSNathan Chancellor 			return ptr;
6277425627bSNathan Chancellor 		*ptr++ = htonl((TCPOPT_AO << 24) |
6287425627bSNathan Chancellor 			       (tcp_ao_len(key->ao_key) << 16) |
6297425627bSNathan Chancellor 			       (key->ao_key->sndid << 8) |
6307425627bSNathan Chancellor 			       (rnext_key->rcvid));
6317425627bSNathan Chancellor 	}
6327425627bSNathan Chancellor 	opts->hash_location = (__u8 *)ptr;
6337425627bSNathan Chancellor 	ptr += maclen / sizeof(*ptr);
6347425627bSNathan Chancellor 	if (unlikely(maclen % sizeof(*ptr))) {
6357425627bSNathan Chancellor 		memset(ptr, TCPOPT_NOP, sizeof(*ptr));
6367425627bSNathan Chancellor 		ptr++;
6377425627bSNathan Chancellor 	}
6387425627bSNathan Chancellor #endif
6397425627bSNathan Chancellor 	return ptr;
6407425627bSNathan Chancellor }
6417425627bSNathan Chancellor 
64267edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
64367edfef7SAndi Kleen  *
64467edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
645fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
646fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
6478e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
648fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
649fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
650fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
651fd6149d3SIlpo Järvinen  *
652fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
653fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
654fd6149d3SIlpo Järvinen  */
655ea66758cSPaolo Abeni static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
65606b22ef2SDmitry Safonov 			      const struct tcp_request_sock *tcprsk,
6571e03d32bSDmitry Safonov 			      struct tcp_out_options *opts,
6581e03d32bSDmitry Safonov 			      struct tcp_key *key)
659bd0388aeSWilliam Allen Simpson {
660ea66758cSPaolo Abeni 	__be32 *ptr = (__be32 *)(th + 1);
6612100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
662bd0388aeSWilliam Allen Simpson 
6631e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
6641a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
6651a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
666bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
667bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
66833ad798cSAdam Langley 		ptr += 4;
6691e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
6707425627bSNathan Chancellor 		ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
6711e03d32bSDmitry Safonov 	}
672fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
673fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
674fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
675fd6149d3SIlpo Järvinen 			       opts->mss);
676fd6149d3SIlpo Järvinen 	}
677fd6149d3SIlpo Järvinen 
678bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
679bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
68033ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
68133ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
68233ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
68333ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
684bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
68533ad798cSAdam Langley 		} else {
686496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
68740efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
68840efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
68940efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
69040efc6faSStephen Hemminger 		}
69133ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
69233ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
69333ad798cSAdam Langley 	}
69433ad798cSAdam Langley 
695bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
69633ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
69733ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
69833ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
69933ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
70033ad798cSAdam Langley 	}
70133ad798cSAdam Langley 
702bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
70333ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
70433ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
70533ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
70633ad798cSAdam Langley 			       opts->ws);
70733ad798cSAdam Langley 	}
70833ad798cSAdam Langley 
70933ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
71033ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
71133ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
71240efc6faSStephen Hemminger 		int this_sack;
71340efc6faSStephen Hemminger 
71440efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
71540efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
71640efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
71733ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
71840efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
7192de979bdSStephen Hemminger 
72033ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
72133ad798cSAdam Langley 		     ++this_sack) {
72240efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
72340efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
72440efc6faSStephen Hemminger 		}
7252de979bdSStephen Hemminger 
72640efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
72740efc6faSStephen Hemminger 	}
7282100c8d2SYuchung Cheng 
7292100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
7302100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
7317f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
7327f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
7332100c8d2SYuchung Cheng 
7347f9b838bSDaniel Lee 		if (foc->exp) {
7357f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
7367f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
7372100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
7387f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
7397f9b838bSDaniel Lee 		} else {
7407f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
7417f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
7427f9b838bSDaniel Lee 			*p++ = len;
7432100c8d2SYuchung Cheng 		}
7447f9b838bSDaniel Lee 
7457f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
7467f9b838bSDaniel Lee 		if ((len & 3) == 2) {
7477f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
7487f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
7497f9b838bSDaniel Lee 		}
7507f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
7512100c8d2SYuchung Cheng 	}
75260e2a778SUrsula Braun 
75360e2a778SUrsula Braun 	smc_options_write(ptr, &options);
754eda7acddSPeter Krystad 
755ea66758cSPaolo Abeni 	mptcp_options_write(th, ptr, tp, opts);
75660e2a778SUrsula Braun }
75760e2a778SUrsula Braun 
75860e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
75960e2a778SUrsula Braun 			   struct tcp_out_options *opts,
76060e2a778SUrsula Braun 			   unsigned int *remaining)
76160e2a778SUrsula Braun {
76260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
76360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
76460e2a778SUrsula Braun 		if (tp->syn_smc) {
76560e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
76660e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
76760e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
76860e2a778SUrsula Braun 			}
76960e2a778SUrsula Braun 		}
77060e2a778SUrsula Braun 	}
77160e2a778SUrsula Braun #endif
77260e2a778SUrsula Braun }
77360e2a778SUrsula Braun 
77460e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
77560e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
77660e2a778SUrsula Braun 				struct tcp_out_options *opts,
77760e2a778SUrsula Braun 				unsigned int *remaining)
77860e2a778SUrsula Braun {
77960e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
78060e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
78160e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
78260e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
78360e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
78460e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
78560e2a778SUrsula Braun 			}
78660e2a778SUrsula Braun 		}
78760e2a778SUrsula Braun 	}
78860e2a778SUrsula Braun #endif
78940efc6faSStephen Hemminger }
79040efc6faSStephen Hemminger 
791cec37a6eSPeter Krystad static void mptcp_set_option_cond(const struct request_sock *req,
792cec37a6eSPeter Krystad 				  struct tcp_out_options *opts,
793cec37a6eSPeter Krystad 				  unsigned int *remaining)
794cec37a6eSPeter Krystad {
795cec37a6eSPeter Krystad 	if (rsk_is_mptcp(req)) {
796cec37a6eSPeter Krystad 		unsigned int size;
797cec37a6eSPeter Krystad 
798cec37a6eSPeter Krystad 		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
799cec37a6eSPeter Krystad 			if (*remaining >= size) {
800cec37a6eSPeter Krystad 				opts->options |= OPTION_MPTCP;
801cec37a6eSPeter Krystad 				*remaining -= size;
802cec37a6eSPeter Krystad 			}
803cec37a6eSPeter Krystad 		}
804cec37a6eSPeter Krystad 	}
805cec37a6eSPeter Krystad }
806cec37a6eSPeter Krystad 
80767edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
80867edfef7SAndi Kleen  * network wire format yet.
80967edfef7SAndi Kleen  */
81095c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
81133ad798cSAdam Langley 				struct tcp_out_options *opts,
8121e03d32bSDmitry Safonov 				struct tcp_key *key)
813cf533ea5SEric Dumazet {
81433ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
81595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
816783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
8171e03d32bSDmitry Safonov 	bool timestamps;
81833ad798cSAdam Langley 
8191e03d32bSDmitry Safonov 	/* Better than switch (key.type) as it has static branches */
8201e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
8211e03d32bSDmitry Safonov 		timestamps = false;
82233ad798cSAdam Langley 		opts->options |= OPTION_MD5;
823bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
8241e03d32bSDmitry Safonov 	} else {
8251e03d32bSDmitry Safonov 		timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
8261e03d32bSDmitry Safonov 		if (tcp_key_is_ao(key)) {
8271e03d32bSDmitry Safonov 			opts->options |= OPTION_AO;
828da7dfaa6SDmitry Safonov 			remaining -= tcp_ao_len_aligned(key->ao_key);
829cfb6eeb4SYOSHIFUJI Hideaki 		}
8308c2320e8SEric Dumazet 	}
83133ad798cSAdam Langley 
83233ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
83333ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
83433ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
83533ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
83633ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
83733ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
83833ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
83933ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
84033ad798cSAdam Langley 	 * going out.  */
84133ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
842bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
84333ad798cSAdam Langley 
8441e03d32bSDmitry Safonov 	if (likely(timestamps)) {
84533ad798cSAdam Langley 		opts->options |= OPTION_TS;
846614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
84733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
848bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
84933ad798cSAdam Langley 	}
8503666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
85133ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
85289e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
853bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
85433ad798cSAdam Langley 	}
8553666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
85633ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
857b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
858bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
85933ad798cSAdam Langley 	}
86033ad798cSAdam Langley 
861783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
8622646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
8632646c831SDaniel Lee 
8642646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
8652646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
866783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
867783237e8SYuchung Cheng 		if (remaining >= need) {
868783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
869783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
870783237e8SYuchung Cheng 			remaining -= need;
871783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
8722646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
873783237e8SYuchung Cheng 		}
874783237e8SYuchung Cheng 	}
875bd0388aeSWilliam Allen Simpson 
87660e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
87760e2a778SUrsula Braun 
878cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
879cec37a6eSPeter Krystad 		unsigned int size;
880cec37a6eSPeter Krystad 
881cc7972eaSChristoph Paasch 		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
882cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
883cec37a6eSPeter Krystad 			remaining -= size;
884cec37a6eSPeter Krystad 		}
885cec37a6eSPeter Krystad 	}
886cec37a6eSPeter Krystad 
887331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
888331fca43SMartin KaFai Lau 
889bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
89033ad798cSAdam Langley }
89133ad798cSAdam Langley 
89267edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
89360e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
89460e2a778SUrsula Braun 				       struct request_sock *req,
89595c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
89633ad798cSAdam Langley 				       struct tcp_out_options *opts,
8979427c6aaSDmitry Safonov 				       const struct tcp_key *key,
898e114e1e8SEric Dumazet 				       struct tcp_fastopen_cookie *foc,
899331fca43SMartin KaFai Lau 				       enum tcp_synack_type synack_type,
900331fca43SMartin KaFai Lau 				       struct sk_buff *syn_skb)
9014957faadSWilliam Allen Simpson {
90233ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
90395c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
90433ad798cSAdam Langley 
9059427c6aaSDmitry Safonov 	if (tcp_key_is_md5(key)) {
90633ad798cSAdam Langley 		opts->options |= OPTION_MD5;
9074957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
9084957faadSWilliam Allen Simpson 
9094957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
9104957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
9114957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
9124957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
9134957faadSWilliam Allen Simpson 		 */
914e114e1e8SEric Dumazet 		if (synack_type != TCP_SYNACK_COOKIE)
915de213e5eSEric Dumazet 			ireq->tstamp_ok &= !ireq->sack_ok;
9169427c6aaSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
9179427c6aaSDmitry Safonov 		opts->options |= OPTION_AO;
918da7dfaa6SDmitry Safonov 		remaining -= tcp_ao_len_aligned(key->ao_key);
9199427c6aaSDmitry Safonov 		ireq->tstamp_ok &= !ireq->sack_ok;
92033ad798cSAdam Langley 	}
92133ad798cSAdam Langley 
9224957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
92333ad798cSAdam Langley 	opts->mss = mss;
9244957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
92533ad798cSAdam Langley 
92633ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
92733ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
92889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
9294957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
93033ad798cSAdam Langley 	}
931de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
93233ad798cSAdam Langley 		opts->options |= OPTION_TS;
933614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
934614e8316SEric Dumazet 			      tcp_rsk(req)->ts_off;
935eba20811SEric Dumazet 		opts->tsecr = READ_ONCE(req->ts_recent);
9364957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
93733ad798cSAdam Langley 	}
93833ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
93933ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
940de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
9414957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
94233ad798cSAdam Langley 	}
9437f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
9447f9b838bSDaniel Lee 		u32 need = foc->len;
9457f9b838bSDaniel Lee 
9467f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
9477f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
9488336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
9498336886fSJerry Chu 		if (remaining >= need) {
9508336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
9518336886fSJerry Chu 			opts->fastopen_cookie = foc;
9528336886fSJerry Chu 			remaining -= need;
9538336886fSJerry Chu 		}
9548336886fSJerry Chu 	}
9554957faadSWilliam Allen Simpson 
956cec37a6eSPeter Krystad 	mptcp_set_option_cond(req, opts, &remaining);
957cec37a6eSPeter Krystad 
95860e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
95960e2a778SUrsula Braun 
960331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
961331fca43SMartin KaFai Lau 			      synack_type, opts, &remaining);
962331fca43SMartin KaFai Lau 
9634957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
96433ad798cSAdam Langley }
96533ad798cSAdam Langley 
96667edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
96767edfef7SAndi Kleen  * final wire format yet.
96867edfef7SAndi Kleen  */
96995c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
97033ad798cSAdam Langley 					struct tcp_out_options *opts,
9711e03d32bSDmitry Safonov 					struct tcp_key *key)
972cf533ea5SEric Dumazet {
97333ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
97495c96174SEric Dumazet 	unsigned int size = 0;
975cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
97633ad798cSAdam Langley 
9775843ef42SAndi Kleen 	opts->options = 0;
9785843ef42SAndi Kleen 
9791e03d32bSDmitry Safonov 	/* Better than switch (key.type) as it has static branches */
9801e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
98133ad798cSAdam Langley 		opts->options |= OPTION_MD5;
98233ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
9831e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
9841e03d32bSDmitry Safonov 		opts->options |= OPTION_AO;
985da7dfaa6SDmitry Safonov 		size += tcp_ao_len_aligned(key->ao_key);
98633ad798cSAdam Langley 	}
98733ad798cSAdam Langley 
98833ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
98933ad798cSAdam Langley 		opts->options |= OPTION_TS;
990614e8316SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
991614e8316SEric Dumazet 				tp->tsoffset : 0;
99233ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
99333ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
99433ad798cSAdam Langley 	}
99533ad798cSAdam Langley 
996cec37a6eSPeter Krystad 	/* MPTCP options have precedence over SACK for the limited TCP
997cec37a6eSPeter Krystad 	 * option space because a MPTCP connection would be forced to
998cec37a6eSPeter Krystad 	 * fall back to regular TCP if a required multipath option is
999cec37a6eSPeter Krystad 	 * missing. SACK still gets a chance to use whatever space is
1000cec37a6eSPeter Krystad 	 * left.
1001cec37a6eSPeter Krystad 	 */
1002cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
1003cec37a6eSPeter Krystad 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1004cec37a6eSPeter Krystad 		unsigned int opt_size = 0;
1005cec37a6eSPeter Krystad 
1006cec37a6eSPeter Krystad 		if (mptcp_established_options(sk, skb, &opt_size, remaining,
1007cec37a6eSPeter Krystad 					      &opts->mptcp)) {
1008cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
1009cec37a6eSPeter Krystad 			size += opt_size;
1010cec37a6eSPeter Krystad 		}
1011cec37a6eSPeter Krystad 	}
1012cec37a6eSPeter Krystad 
1013cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
1014cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
101595c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
10169cfcca23SMat Martineau 		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
10179cfcca23SMat Martineau 					 TCPOLEN_SACK_PERBLOCK))
10189cfcca23SMat Martineau 			return size;
10199cfcca23SMat Martineau 
102033ad798cSAdam Langley 		opts->num_sack_blocks =
102195c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
102233ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
102333ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
10249cfcca23SMat Martineau 
102533ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
102633ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
102733ad798cSAdam Langley 	}
102833ad798cSAdam Langley 
1029331fca43SMartin KaFai Lau 	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
1030331fca43SMartin KaFai Lau 					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
1031331fca43SMartin KaFai Lau 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1032331fca43SMartin KaFai Lau 
1033331fca43SMartin KaFai Lau 		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
1034331fca43SMartin KaFai Lau 
1035331fca43SMartin KaFai Lau 		size = MAX_TCP_OPTION_SPACE - remaining;
1036331fca43SMartin KaFai Lau 	}
1037331fca43SMartin KaFai Lau 
103833ad798cSAdam Langley 	return size;
103940efc6faSStephen Hemminger }
10401da177e4SLinus Torvalds 
104146d3ceabSEric Dumazet 
104246d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
104346d3ceabSEric Dumazet  *
104446d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
104546d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
104646d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
104746d3ceabSEric Dumazet  *
104846d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
104946d3ceabSEric Dumazet  * needs to be reallocated in a driver.
10508e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
105146d3ceabSEric Dumazet  *
105246d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
105346d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
105446d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
105546d3ceabSEric Dumazet  */
105646d3ceabSEric Dumazet struct tsq_tasklet {
105746d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
105846d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
105946d3ceabSEric Dumazet };
106046d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
106146d3ceabSEric Dumazet 
106273a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
10636f458dfbSEric Dumazet {
10646f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
10656f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1066f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1067f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
1068f9616c35SEric Dumazet 
1069f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
107040570375SEric Dumazet 		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
10713a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
1072f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
10733a91d29fSKoichiro Den 		}
1074f9616c35SEric Dumazet 
1075f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1076bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
10776f458dfbSEric Dumazet 	}
1078f9616c35SEric Dumazet }
107973a6bab5SEric Dumazet 
108073a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
108173a6bab5SEric Dumazet {
108273a6bab5SEric Dumazet 	bh_lock_sock(sk);
108373a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
108473a6bab5SEric Dumazet 		tcp_tsq_write(sk);
108573a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
108673a6bab5SEric Dumazet 		sock_hold(sk);
108773a6bab5SEric Dumazet 	bh_unlock_sock(sk);
108873a6bab5SEric Dumazet }
108946d3ceabSEric Dumazet /*
10908e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
109146d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
10928e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
109346d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
109446d3ceabSEric Dumazet  */
1095c6533ca8SAllen Pais static void tcp_tasklet_func(struct tasklet_struct *t)
109646d3ceabSEric Dumazet {
1097c6533ca8SAllen Pais 	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
109846d3ceabSEric Dumazet 	LIST_HEAD(list);
109946d3ceabSEric Dumazet 	unsigned long flags;
110046d3ceabSEric Dumazet 	struct list_head *q, *n;
110146d3ceabSEric Dumazet 	struct tcp_sock *tp;
110246d3ceabSEric Dumazet 	struct sock *sk;
110346d3ceabSEric Dumazet 
110446d3ceabSEric Dumazet 	local_irq_save(flags);
110546d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
110646d3ceabSEric Dumazet 	local_irq_restore(flags);
110746d3ceabSEric Dumazet 
110846d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
110946d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
111046d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
111146d3ceabSEric Dumazet 
111246d3ceabSEric Dumazet 		sk = (struct sock *)tp;
11130a9648f1SEric Dumazet 		smp_mb__before_atomic();
11147aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
11157aa5470cSEric Dumazet 
11166f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
111746d3ceabSEric Dumazet 		sk_free(sk);
111846d3ceabSEric Dumazet 	}
111946d3ceabSEric Dumazet }
112046d3ceabSEric Dumazet 
112140fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
112240fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
112340fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
1124133c4c0dSEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED |	\
1125133c4c0dSEric Dumazet 			  TCPF_ACK_DEFERRED)
112646d3ceabSEric Dumazet /**
112746d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
112846d3ceabSEric Dumazet  * @sk: socket
112946d3ceabSEric Dumazet  *
113046d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
113146d3ceabSEric Dumazet  * actions before socket release.
113246d3ceabSEric Dumazet  */
113346d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
113446d3ceabSEric Dumazet {
1135fac30731SEric Dumazet 	unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1136fac30731SEric Dumazet 	unsigned long nflags;
113746d3ceabSEric Dumazet 
11386f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
11396f458dfbSEric Dumazet 	do {
11406f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
11416f458dfbSEric Dumazet 			return;
11426f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
1143fac30731SEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
11446f458dfbSEric Dumazet 
114573a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
114673a6bab5SEric Dumazet 		tcp_tsq_write(sk);
114773a6bab5SEric Dumazet 		__sock_put(sk);
114873a6bab5SEric Dumazet 	}
1149c3f9b018SEric Dumazet 
115040fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
11516f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
1152144d56e9SEric Dumazet 		__sock_put(sk);
1153144d56e9SEric Dumazet 	}
115440fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
11556f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
1156144d56e9SEric Dumazet 		__sock_put(sk);
1157144d56e9SEric Dumazet 	}
115840fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
11594fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1160144d56e9SEric Dumazet 		__sock_put(sk);
1161144d56e9SEric Dumazet 	}
1162133c4c0dSEric Dumazet 	if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
1163133c4c0dSEric Dumazet 		tcp_send_ack(sk);
116446d3ceabSEric Dumazet }
116546d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
116646d3ceabSEric Dumazet 
116746d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
116846d3ceabSEric Dumazet {
116946d3ceabSEric Dumazet 	int i;
117046d3ceabSEric Dumazet 
117146d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
117246d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
117346d3ceabSEric Dumazet 
117446d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
1175c6533ca8SAllen Pais 		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
117646d3ceabSEric Dumazet 	}
117746d3ceabSEric Dumazet }
117846d3ceabSEric Dumazet 
117946d3ceabSEric Dumazet /*
118046d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
11818e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
118246d3ceabSEric Dumazet  * hold qdisc lock.
118346d3ceabSEric Dumazet  */
1184d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
118546d3ceabSEric Dumazet {
118646d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
118746d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1188408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
1189b548b17aSEric Dumazet 	struct tsq_tasklet *tsq;
1190b548b17aSEric Dumazet 	bool empty;
11919b462d02SEric Dumazet 
11929b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
11939b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
11949b462d02SEric Dumazet 	 */
119514afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
11969b462d02SEric Dumazet 
11979b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
11989b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
11999b462d02SEric Dumazet 	 * This gives :
12009b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
12019b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
12029b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
12039b462d02SEric Dumazet 	 */
120414afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
12059b462d02SEric Dumazet 		goto out;
120646d3ceabSEric Dumazet 
1207b548b17aSEric Dumazet 	oval = smp_load_acquire(&sk->sk_tsq_flags);
1208b548b17aSEric Dumazet 	do {
1209408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1210408f0a6cSEric Dumazet 			goto out;
1211408f0a6cSEric Dumazet 
121273a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1213b548b17aSEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
1214408f0a6cSEric Dumazet 
121546d3ceabSEric Dumazet 	/* queue this socket to tasklet queue */
121646d3ceabSEric Dumazet 	local_irq_save(flags);
1217903ceff7SChristoph Lameter 	tsq = this_cpu_ptr(&tsq_tasklet);
1218a9b204d1SEric Dumazet 	empty = list_empty(&tsq->head);
121946d3ceabSEric Dumazet 	list_add(&tp->tsq_node, &tsq->head);
1220a9b204d1SEric Dumazet 	if (empty)
122146d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
122246d3ceabSEric Dumazet 	local_irq_restore(flags);
12239b462d02SEric Dumazet 	return;
12249b462d02SEric Dumazet out:
12259b462d02SEric Dumazet 	sk_free(sk);
122646d3ceabSEric Dumazet }
122746d3ceabSEric Dumazet 
122873a6bab5SEric Dumazet /* Note: Called under soft irq.
122973a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
1230218af599SEric Dumazet  */
1231218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1232218af599SEric Dumazet {
1233218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1234218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
1235218af599SEric Dumazet 
123673a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
123773a6bab5SEric Dumazet 	sock_put(sk);
1238218af599SEric Dumazet 
1239218af599SEric Dumazet 	return HRTIMER_NORESTART;
1240218af599SEric Dumazet }
1241218af599SEric Dumazet 
1242a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1243a7a25630SEric Dumazet 				      u64 prior_wstamp)
1244e2080072SEric Dumazet {
1245ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1246ab408b6dSEric Dumazet 
1247ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
124828b24f90SEric Dumazet 		unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
1249ab408b6dSEric Dumazet 
1250ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
1251ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
1252ab408b6dSEric Dumazet 		 * this is a minor annoyance.
1253ab408b6dSEric Dumazet 		 */
125476a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1255a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1256a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1257a7a25630SEric Dumazet 
1258a7a25630SEric Dumazet 			/* take into account OS jitter */
1259a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
1260a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
1261ab408b6dSEric Dumazet 		}
1262ab408b6dSEric Dumazet 	}
1263e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1264e2080072SEric Dumazet }
1265e2080072SEric Dumazet 
126605e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
126705e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1268dd2e0b86SEric Dumazet INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
126905e22e83SEric Dumazet 
12701da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
12711da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
12721da177e4SLinus Torvalds  * transmission and possible later retransmissions.
12731da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
12741da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
12751da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
12761da177e4SLinus Torvalds  * device.
12771da177e4SLinus Torvalds  *
12781da177e4SLinus Torvalds  * We are working here with either a clone of the original
12791da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
12801da177e4SLinus Torvalds  */
12812987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
12822987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
12831da177e4SLinus Torvalds {
12846687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1285dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1286dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1287dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
128833ad798cSAdam Langley 	struct tcp_out_options opts;
128995c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
12908c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
12911e03d32bSDmitry Safonov 	struct tcp_key key;
12921da177e4SLinus Torvalds 	struct tcphdr *th;
1293a7a25630SEric Dumazet 	u64 prior_wstamp;
12941da177e4SLinus Torvalds 	int err;
12951da177e4SLinus Torvalds 
1296dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
12976f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
12987f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
12997f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1300a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
1301ccdbb6e9SEric Dumazet 	if (clone_it) {
13028c72c65bSEric Dumazet 		oskb = skb;
1303e2080072SEric Dumazet 
1304e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1305e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1306e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1307dfb4b9dcSDavid S. Miller 			else
1308e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1309e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1310e2080072SEric Dumazet 
1311dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1312dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1313b738a185SEric Dumazet 		/* retransmit skbs might have a non zero value in skb->dev
1314b738a185SEric Dumazet 		 * because skb->dev is aliased with skb->rbnode.rb_left
1315b738a185SEric Dumazet 		 */
1316b738a185SEric Dumazet 		skb->dev = NULL;
1317dfb4b9dcSDavid S. Miller 	}
13185f6188a8SEric Dumazet 
1319dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1320dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
132133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
13221da177e4SLinus Torvalds 
13231e03d32bSDmitry Safonov 	tcp_get_current_key(sk, &key);
1324051ba674SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
13251e03d32bSDmitry Safonov 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &key);
1326051ba674SEric Dumazet 	} else {
13271e03d32bSDmitry Safonov 		tcp_options_size = tcp_established_options(sk, skb, &opts, &key);
1328051ba674SEric Dumazet 		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1329051ba674SEric Dumazet 		 * at receiver : This slightly improve GRO performance.
1330051ba674SEric Dumazet 		 * Note that we do not force the PSH flag for non GSO packets,
1331051ba674SEric Dumazet 		 * because they might be sent under high congestion events,
1332051ba674SEric Dumazet 		 * and in this case it is better to delay the delivery of 1-MSS
1333051ba674SEric Dumazet 		 * packets and thus the corresponding ACK packet that would
1334051ba674SEric Dumazet 		 * release the following packet.
1335051ba674SEric Dumazet 		 */
1336051ba674SEric Dumazet 		if (tcp_skb_pcount(skb) > 1)
1337051ba674SEric Dumazet 			tcb->tcp_flags |= TCPHDR_PSH;
1338051ba674SEric Dumazet 	}
133933ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
13401da177e4SLinus Torvalds 
1341726e9e8bSEric Dumazet 	/* We set skb->ooo_okay to one if this packet can select
1342726e9e8bSEric Dumazet 	 * a different TX queue than prior packets of this flow,
1343726e9e8bSEric Dumazet 	 * to avoid self inflicted reorders.
1344726e9e8bSEric Dumazet 	 * The 'other' queue decision is based on current cpu number
1345726e9e8bSEric Dumazet 	 * if XPS is enabled, or sk->sk_txhash otherwise.
1346726e9e8bSEric Dumazet 	 * We can switch to another (and better) queue if:
1347726e9e8bSEric Dumazet 	 * 1) No packet with payload is in qdisc/device queues.
1348726e9e8bSEric Dumazet 	 *    Delays in TX completion can defeat the test
1349726e9e8bSEric Dumazet 	 *    even if packets were already sent.
1350726e9e8bSEric Dumazet 	 * 2) Or rtx queue is empty.
1351726e9e8bSEric Dumazet 	 *    This mitigates above case if ACK packets for
1352726e9e8bSEric Dumazet 	 *    all prior packets were already processed.
1353547669d4SEric Dumazet 	 */
1354726e9e8bSEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) ||
1355726e9e8bSEric Dumazet 			tcp_rtx_queue_empty(sk);
13561da177e4SLinus Torvalds 
135738ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
135838ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
135938ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
136038ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
136138ab52e8SEric Dumazet 	 */
136238ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
136338ab52e8SEric Dumazet 
1364aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1365aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
136646d3ceabSEric Dumazet 
136746d3ceabSEric Dumazet 	skb_orphan(skb);
136846d3ceabSEric Dumazet 	skb->sk = sk;
13691d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
137014afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
13711da177e4SLinus Torvalds 
1372eb44ad4eSEric Dumazet 	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
1373c3a2e837SJulian Anastasov 
13741da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1375ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1376c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1377c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
13781da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
13792987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1380df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
13814de075e0SEric Dumazet 					tcb->tcp_flags);
1382dfb4b9dcSDavid S. Miller 
13831da177e4SLinus Torvalds 	th->check		= 0;
13841da177e4SLinus Torvalds 	th->urg_ptr		= 0;
13851da177e4SLinus Torvalds 
138633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
13877691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
13887691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
13891da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
13901da177e4SLinus Torvalds 			th->urg = 1;
13917691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
13920eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
13937691367dSHerbert Xu 			th->urg = 1;
13947691367dSHerbert Xu 		}
13951da177e4SLinus Torvalds 	}
13961da177e4SLinus Torvalds 
139751466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1398ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1399ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1400ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1401ea1627c2SEric Dumazet 	} else {
1402ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1403ea1627c2SEric Dumazet 		 * is never scaled.
1404ea1627c2SEric Dumazet 		 */
1405ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1406ea1627c2SEric Dumazet 	}
1407fa3fe2b1SFlorian Westphal 
140806b22ef2SDmitry Safonov 	tcp_options_write(th, tp, NULL, &opts, &key);
1409fa3fe2b1SFlorian Westphal 
14101e03d32bSDmitry Safonov 	if (tcp_key_is_md5(&key)) {
1411cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1412cfb6eeb4SYOSHIFUJI Hideaki 		/* Calculate the MD5 hash, as we have all we need now */
1413aba54656SEric Dumazet 		sk_gso_disable(sk);
1414bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
14151e03d32bSDmitry Safonov 					       key.md5_key, sk, skb);
1416cfb6eeb4SYOSHIFUJI Hideaki #endif
14171e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(&key)) {
14181e03d32bSDmitry Safonov 		int err;
14191e03d32bSDmitry Safonov 
14201e03d32bSDmitry Safonov 		err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th,
14211e03d32bSDmitry Safonov 					  opts.hash_location);
14221e03d32bSDmitry Safonov 		if (err) {
14231e03d32bSDmitry Safonov 			kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
14241e03d32bSDmitry Safonov 			return -ENOMEM;
14251e03d32bSDmitry Safonov 		}
14261e03d32bSDmitry Safonov 	}
1427cfb6eeb4SYOSHIFUJI Hideaki 
1428331fca43SMartin KaFai Lau 	/* BPF prog is the last one writing header option */
1429331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1430331fca43SMartin KaFai Lau 
1431dd2e0b86SEric Dumazet 	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1432dd2e0b86SEric Dumazet 			   tcp_v6_send_check, tcp_v4_send_check,
1433dd2e0b86SEric Dumazet 			   sk, skb);
14341da177e4SLinus Torvalds 
14354de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1436059217c1SNeal Cardwell 		tcp_event_ack_sent(sk, rcv_nxt);
14371da177e4SLinus Torvalds 
1438a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1439cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1440a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1441ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1442a44d6eacSMartin KaFai Lau 	}
14431da177e4SLinus Torvalds 
1444bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1445aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1446aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
14471da177e4SLinus Torvalds 
14482efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
14490ae5b43dSYuchung Cheng 	skb_set_hash_from_sk(skb, sk);
1450f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1451cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1452f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1453cd7d8498SEric Dumazet 
1454d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1455971f10ecSEric Dumazet 
1456971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1457971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1458971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1459971f10ecSEric Dumazet 
1460a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1461a842fe14SEric Dumazet 
146205e22e83SEric Dumazet 	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
146305e22e83SEric Dumazet 				 inet6_csk_xmit, ip_queue_xmit,
146405e22e83SEric Dumazet 				 sk, skb, &inet->cork.fl);
14657faee5c0SEric Dumazet 
14668c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
14675ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
14688c72c65bSEric Dumazet 		err = net_xmit_eval(err);
14698c72c65bSEric Dumazet 	}
1470fc225799SEric Dumazet 	if (!err && oskb) {
1471a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1472fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1473fc225799SEric Dumazet 	}
14748c72c65bSEric Dumazet 	return err;
14751da177e4SLinus Torvalds }
14761da177e4SLinus Torvalds 
14772987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
14782987babbSYuchung Cheng 			    gfp_t gfp_mask)
14792987babbSYuchung Cheng {
14802987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
14812987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
14822987babbSYuchung Cheng }
14832987babbSYuchung Cheng 
148467edfef7SAndi Kleen /* This routine just queues the buffer for sending.
14851da177e4SLinus Torvalds  *
14861da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
14871da177e4SLinus Torvalds  * otherwise socket can stall.
14881da177e4SLinus Torvalds  */
14891da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
14901da177e4SLinus Torvalds {
14911da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
14921da177e4SLinus Torvalds 
14931da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
14940f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1495f4a775d1SEric Dumazet 	__skb_header_release(skb);
1496fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
1497ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
14983ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
14991da177e4SLinus Torvalds }
15001da177e4SLinus Torvalds 
150167edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
15025bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1503f6302d1dSDavid S. Miller {
15044a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1505f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1506f6302d1dSDavid S. Miller 		 * non-TSO case.
1507f6302d1dSDavid S. Miller 		 */
1508cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1509f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1510f6302d1dSDavid S. Miller 	} else {
1511cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1512f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
15131da177e4SLinus Torvalds 	}
15141da177e4SLinus Torvalds }
15151da177e4SLinus Torvalds 
1516797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1517797108d1SIlpo Järvinen  * tweaks to fix counters
1518797108d1SIlpo Järvinen  */
1519cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1520797108d1SIlpo Järvinen {
1521797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1522797108d1SIlpo Järvinen 
1523797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1524797108d1SIlpo Järvinen 
1525797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1526797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1527797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1528797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1529797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1530797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1531797108d1SIlpo Järvinen 
1532797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1533797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1534797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1535797108d1SIlpo Järvinen 
1536797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1537797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1538713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1539797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1540797108d1SIlpo Järvinen 
1541797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1542797108d1SIlpo Järvinen }
1543797108d1SIlpo Järvinen 
15440a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
15450a2cf20cSSoheil Hassas Yeganeh {
15460a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
15470a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
15480a2cf20cSSoheil Hassas Yeganeh }
15490a2cf20cSSoheil Hassas Yeganeh 
1550490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1551490cc7d0SWillem de Bruijn {
1552490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1553490cc7d0SWillem de Bruijn 
15540a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1555490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1556490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1557490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1558490cc7d0SWillem de Bruijn 
1559490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1560490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1561490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1562b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1563b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1564490cc7d0SWillem de Bruijn 	}
1565490cc7d0SWillem de Bruijn }
1566490cc7d0SWillem de Bruijn 
1567a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1568a166140eSMartin KaFai Lau {
1569a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1570a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1571a166140eSMartin KaFai Lau }
1572a166140eSMartin KaFai Lau 
157375c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
157475c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
157575c119afSEric Dumazet 					 struct sk_buff *buff,
157675c119afSEric Dumazet 					 struct sock *sk,
157775c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
157875c119afSEric Dumazet {
157975c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
158075c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
158175c119afSEric Dumazet 	else
158275c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
158375c119afSEric Dumazet }
158475c119afSEric Dumazet 
15851da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
15861da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
15871da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
15881da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
15891da177e4SLinus Torvalds  */
159075c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
159175c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
15926cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
15931da177e4SLinus Torvalds {
15941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
15951da177e4SLinus Torvalds 	struct sk_buff *buff;
1596b4a24397SEric Dumazet 	int old_factor;
1597b617158dSEric Dumazet 	long limit;
1598b60b49eaSHerbert Xu 	int nlen;
15999ce01461SIlpo Järvinen 	u8 flags;
16001da177e4SLinus Torvalds 
16012fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
16022fceec13SIlpo Järvinen 		return -EINVAL;
16036a438bbeSStephen Hemminger 
1604b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
16051da177e4SLinus Torvalds 
1606b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1607b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1608b617158dSEric Dumazet 	 * SO_SNDBUF values.
1609b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1610b617158dSEric Dumazet 	 */
16117c4e983cSAlexander Duyck 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1612b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1613b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1614b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1615b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1616f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1617f070ef2aSEric Dumazet 		return -ENOMEM;
1618f070ef2aSEric Dumazet 	}
1619f070ef2aSEric Dumazet 
1620c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, gfp))
16211da177e4SLinus Torvalds 		return -ENOMEM;
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
16245882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
162551456b29SIan Morris 	if (!buff)
16261da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
162741477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
16285a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
1629ef5cb973SHerbert Xu 
1630ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
16313ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1632b4a24397SEric Dumazet 	nlen = skb->len - len;
1633b60b49eaSHerbert Xu 	buff->truesize += nlen;
1634b60b49eaSHerbert Xu 	skb->truesize -= nlen;
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
16371da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
16381da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
16391da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
16401da177e4SLinus Torvalds 
16411da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
16424de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
16434de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
16444de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1645e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1646a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	skb_split(skb, buff, len);
16491da177e4SLinus Torvalds 
1650a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(buff, skb->tstamp, true);
1651490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
16521da177e4SLinus Torvalds 
16536475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
16546475be16SDavid S. Miller 
16551da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
16565bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
16575bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
16581da177e4SLinus Torvalds 
1659b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1660b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1661b9f64820SYuchung Cheng 
16626475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
16636475be16SDavid S. Miller 	 * adjust the various packet counters.
16646475be16SDavid S. Miller 	 */
1665cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
16666475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
16676475be16SDavid S. Miller 			tcp_skb_pcount(buff);
16681da177e4SLinus Torvalds 
1669797108d1SIlpo Järvinen 		if (diff)
1670797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
16711da177e4SLinus Torvalds 	}
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1674f4a775d1SEric Dumazet 	__skb_header_release(buff);
167575c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1676f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1677e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	return 0;
16801da177e4SLinus Torvalds }
16811da177e4SLinus Torvalds 
1682f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1683f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
16841da177e4SLinus Torvalds  */
16857162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
16861da177e4SLinus Torvalds {
16877b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
16881da177e4SLinus Torvalds 	int i, k, eat;
16891da177e4SLinus Torvalds 
1690b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
16911da177e4SLinus Torvalds 	eat = len;
16921da177e4SLinus Torvalds 	k = 0;
16937b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
16947b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
16957b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
16969e903e08SEric Dumazet 
16979e903e08SEric Dumazet 		if (size <= eat) {
1698aff65da0SIan Campbell 			skb_frag_unref(skb, i);
16999e903e08SEric Dumazet 			eat -= size;
17001da177e4SLinus Torvalds 		} else {
17017b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
17021da177e4SLinus Torvalds 			if (eat) {
1703b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[k], eat);
17047b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
17051da177e4SLinus Torvalds 				eat = 0;
17061da177e4SLinus Torvalds 			}
17071da177e4SLinus Torvalds 			k++;
17081da177e4SLinus Torvalds 		}
17091da177e4SLinus Torvalds 	}
17107b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
17111da177e4SLinus Torvalds 
17121da177e4SLinus Torvalds 	skb->data_len -= len;
17131da177e4SLinus Torvalds 	skb->len = skb->data_len;
17147162fb24SEric Dumazet 	return len;
17151da177e4SLinus Torvalds }
17161da177e4SLinus Torvalds 
171767edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
17181da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
17191da177e4SLinus Torvalds {
17207162fb24SEric Dumazet 	u32 delta_truesize;
17217162fb24SEric Dumazet 
1722c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
17231da177e4SLinus Torvalds 		return -ENOMEM;
17241da177e4SLinus Torvalds 
17257162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
17261da177e4SLinus Torvalds 
17271da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
17281da177e4SLinus Torvalds 
17297162fb24SEric Dumazet 	skb->truesize	   -= delta_truesize;
1730ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, -delta_truesize);
17319b65b17dSTalal Ahmad 	if (!skb_zcopy_pure(skb))
17327162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
17331da177e4SLinus Torvalds 
17345b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
17351da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
17365bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds 	return 0;
17391da177e4SLinus Torvalds }
17401da177e4SLinus Torvalds 
17411b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
17421b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
17435d424d5aSJohn Heffner {
1744cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1745cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
17465d424d5aSJohn Heffner 	int mss_now;
17475d424d5aSJohn Heffner 
17485d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
17495d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
17505d424d5aSJohn Heffner 	 */
17515d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
17525d424d5aSJohn Heffner 
17535d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
17545d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
17555d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
17565d424d5aSJohn Heffner 
17575d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
17585d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
17595d424d5aSJohn Heffner 
17605d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
176178eb166cSKuniyuki Iwashima 	mss_now = max(mss_now,
176278eb166cSKuniyuki Iwashima 		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
17635d424d5aSJohn Heffner 	return mss_now;
17645d424d5aSJohn Heffner }
17655d424d5aSJohn Heffner 
17661b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
17671b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
17681b63edd6SYuchung Cheng {
17691b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
17701b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
17711b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
17721b63edd6SYuchung Cheng }
1773c7bb4b89SEric Dumazet EXPORT_SYMBOL(tcp_mtu_to_mss);
17741b63edd6SYuchung Cheng 
17755d424d5aSJohn Heffner /* Inverse of above */
177667469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
17775d424d5aSJohn Heffner {
1778cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1779cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
17805d424d5aSJohn Heffner 
1781e57a3447SYan Zhai 	return mss +
17825d424d5aSJohn Heffner 	      tp->tcp_header_len +
17835d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
17845d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
17855d424d5aSJohn Heffner }
1786556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
17875d424d5aSJohn Heffner 
178867edfef7SAndi Kleen /* MTU probing init per socket */
17895d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
17905d424d5aSJohn Heffner {
17915d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
17925d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1793b0f9ca53SFan Du 	struct net *net = sock_net(sk);
17945d424d5aSJohn Heffner 
1795f47d00e0SKuniyuki Iwashima 	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
17965d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
17975d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
179888d78bc0SKuniyuki Iwashima 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
17995d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
180005cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1801c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
18025d424d5aSJohn Heffner }
18034bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
18045d424d5aSJohn Heffner 
18051da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
18061da177e4SLinus Torvalds 
18071da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
18081da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
18091da177e4SLinus Torvalds 
18101da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1811caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
18121da177e4SLinus Torvalds    It also does not include TCP options.
18131da177e4SLinus Torvalds 
1814d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
18151da177e4SLinus Torvalds 
18161da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
18171da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
18181da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
18191da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
18201da177e4SLinus Torvalds 
18211da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
18221da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
18231da177e4SLinus Torvalds 
1824d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1825d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
18261da177e4SLinus Torvalds  */
18271da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
18281da177e4SLinus Torvalds {
18291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1830d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
18315d424d5aSJohn Heffner 	int mss_now;
18321da177e4SLinus Torvalds 
18335d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
18345d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
18351da177e4SLinus Torvalds 
18365d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1837409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
18381da177e4SLinus Torvalds 
18391da177e4SLinus Torvalds 	/* And store cached results */
1840d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
18415d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
18425d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1843c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
18441da177e4SLinus Torvalds 
18451da177e4SLinus Torvalds 	return mss_now;
18461da177e4SLinus Torvalds }
18474bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
18481da177e4SLinus Torvalds 
18491da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
18501da177e4SLinus Torvalds  * and even PMTU discovery events into account.
18511da177e4SLinus Torvalds  */
18520c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
18531da177e4SLinus Torvalds {
1854cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1855cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1856c1b4a7e6SDavid S. Miller 	u32 mss_now;
185795c96174SEric Dumazet 	unsigned int header_len;
185833ad798cSAdam Langley 	struct tcp_out_options opts;
18591e03d32bSDmitry Safonov 	struct tcp_key key;
18601da177e4SLinus Torvalds 
1861c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1862c1b4a7e6SDavid S. Miller 
18631da177e4SLinus Torvalds 	if (dst) {
18641da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1865d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
18661da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
18671da177e4SLinus Torvalds 	}
18681e03d32bSDmitry Safonov 	tcp_get_current_key(sk, &key);
18691e03d32bSDmitry Safonov 	header_len = tcp_established_options(sk, NULL, &opts, &key) +
187033ad798cSAdam Langley 		     sizeof(struct tcphdr);
187133ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
187233ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
187333ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
187433ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
187533ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
187633ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
187733ad798cSAdam Langley 		mss_now -= delta;
187833ad798cSAdam Langley 	}
1879cfb6eeb4SYOSHIFUJI Hideaki 
18801da177e4SLinus Torvalds 	return mss_now;
18811da177e4SLinus Torvalds }
18821da177e4SLinus Torvalds 
188386fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
188486fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
188586fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
188686fd14adSWeiping Pan  */
188786fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1888a762a980SDavid S. Miller {
18899e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1890a762a980SDavid S. Miller 
189186fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
189286fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
189386fd14adSWeiping Pan 		/* Limited by application or receiver window. */
189486fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
189586fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
189640570375SEric Dumazet 		if (win_used < tcp_snd_cwnd(tp)) {
189786fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
189840570375SEric Dumazet 			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
189986fd14adSWeiping Pan 		}
190086fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
190186fd14adSWeiping Pan 	}
1902c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
190386fd14adSWeiping Pan }
190486fd14adSWeiping Pan 
1905ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1906a762a980SDavid S. Miller {
19071b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1908a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1909a762a980SDavid S. Miller 
1910f4ce91ceSNeal Cardwell 	/* Track the strongest available signal of the degree to which the cwnd
1911f4ce91ceSNeal Cardwell 	 * is fully utilized. If cwnd-limited then remember that fact for the
1912f4ce91ceSNeal Cardwell 	 * current window. If not cwnd-limited then track the maximum number of
1913f4ce91ceSNeal Cardwell 	 * outstanding packets in the current window. (If cwnd-limited then we
1914f4ce91ceSNeal Cardwell 	 * chose to not update tp->max_packets_out to avoid an extra else
1915f4ce91ceSNeal Cardwell 	 * clause with no functional impact.)
1916ca8a2263SNeal Cardwell 	 */
1917f4ce91ceSNeal Cardwell 	if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1918f4ce91ceSNeal Cardwell 	    is_cwnd_limited ||
1919f4ce91ceSNeal Cardwell 	    (!tp->is_cwnd_limited &&
1920f4ce91ceSNeal Cardwell 	     tp->packets_out > tp->max_packets_out)) {
1921ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1922f4ce91ceSNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1923f4ce91ceSNeal Cardwell 		tp->cwnd_usage_seq = tp->snd_nxt;
1924ca8a2263SNeal Cardwell 	}
1925e114a710SEric Dumazet 
192624901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1927a762a980SDavid S. Miller 		/* Network is feed fully. */
1928a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1929c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1930a762a980SDavid S. Miller 	} else {
1931a762a980SDavid S. Miller 		/* Network starves. */
1932a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1933a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1934a762a980SDavid S. Miller 
19354845b571SKuniyuki Iwashima 		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
1936c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
19371b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1938a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1939b0f71bd3SFrancis Yan 
1940b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1941b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1942b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1943b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
194475c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1945b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1946b0f71bd3SFrancis Yan 		 */
194775c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1948b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1949b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1950b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1951a762a980SDavid S. Miller 	}
1952a762a980SDavid S. Miller }
1953a762a980SDavid S. Miller 
1954d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1955d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1956d4589926SEric Dumazet {
1957d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1958d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1959d4589926SEric Dumazet }
1960d4589926SEric Dumazet 
1961d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1962d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1963d4589926SEric Dumazet  * The test is really :
1964d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1965d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1966d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1967d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
19680e3a4803SIlpo Järvinen  */
1969d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1970d4589926SEric Dumazet 				const struct sk_buff *skb)
1971d4589926SEric Dumazet {
1972d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1973d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1974d4589926SEric Dumazet }
1975d4589926SEric Dumazet 
1976d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1977d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1978d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1979d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1980d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1981d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1982d4589926SEric Dumazet  */
1983d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1984cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1985d4589926SEric Dumazet {
1986d4589926SEric Dumazet 	return partial &&
1987d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1988d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1989d4589926SEric Dumazet }
1990605ad7f1SEric Dumazet 
1991605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
199265466904SEric Dumazet  * depending on current pacing rate, and how close the peer is.
199365466904SEric Dumazet  *
199465466904SEric Dumazet  * Rationale is:
199565466904SEric Dumazet  * - For close peers, we rather send bigger packets to reduce
199665466904SEric Dumazet  *   cpu costs, because occasional losses will be repaired fast.
199765466904SEric Dumazet  * - For long distance/rtt flows, we would like to get ACK clocking
199865466904SEric Dumazet  *   with 1 ACK per ms.
199965466904SEric Dumazet  *
200065466904SEric Dumazet  * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
200165466904SEric Dumazet  * in bigger TSO bursts. We we cut the RTT-based allowance in half
200265466904SEric Dumazet  * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
200365466904SEric Dumazet  * is below 1500 bytes after 6 * ~500 usec = 3ms.
2004605ad7f1SEric Dumazet  */
2005dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
20061b3878caSNeal Cardwell 			    int min_tso_segs)
2007605ad7f1SEric Dumazet {
200865466904SEric Dumazet 	unsigned long bytes;
200965466904SEric Dumazet 	u32 r;
2010605ad7f1SEric Dumazet 
201128b24f90SEric Dumazet 	bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
2012605ad7f1SEric Dumazet 
20132455e61bSKuniyuki Iwashima 	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
201465466904SEric Dumazet 	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
201565466904SEric Dumazet 		bytes += sk->sk_gso_max_size >> r;
2016605ad7f1SEric Dumazet 
201765466904SEric Dumazet 	bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
201865466904SEric Dumazet 
201965466904SEric Dumazet 	return max_t(u32, bytes / mss_now, min_tso_segs);
2020605ad7f1SEric Dumazet }
2021605ad7f1SEric Dumazet 
2022ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
2023ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
2024ed6e7268SNeal Cardwell  */
2025ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
2026ed6e7268SNeal Cardwell {
2027ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2028dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
2029ed6e7268SNeal Cardwell 
2030dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
2031dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
2032e0bb4ab9SKuniyuki Iwashima 			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
2033dcb8c9b4SEric Dumazet 
2034dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2035350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
2036ed6e7268SNeal Cardwell }
2037ed6e7268SNeal Cardwell 
2038d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
2039d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
2040d4589926SEric Dumazet 					const struct sk_buff *skb,
2041d4589926SEric Dumazet 					unsigned int mss_now,
2042d4589926SEric Dumazet 					unsigned int max_segs,
2043d4589926SEric Dumazet 					int nonagle)
2044c1b4a7e6SDavid S. Miller {
2045cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2046d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
2047c1b4a7e6SDavid S. Miller 
204890840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
20491485348dSBen Hutchings 	max_len = mss_now * max_segs;
20500e3a4803SIlpo Järvinen 
20511485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
20521485348dSBen Hutchings 		return max_len;
20530e3a4803SIlpo Järvinen 
20545ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
20555ea3a748SIlpo Järvinen 
20561485348dSBen Hutchings 	if (max_len <= needed)
20571485348dSBen Hutchings 		return max_len;
20580e3a4803SIlpo Järvinen 
2059d4589926SEric Dumazet 	partial = needed % mss_now;
2060d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
2061d4589926SEric Dumazet 	 * to include this last segment in this skb.
2062d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
2063d4589926SEric Dumazet 	 */
2064cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
2065d4589926SEric Dumazet 		return needed - partial;
2066d4589926SEric Dumazet 
2067d4589926SEric Dumazet 	return needed;
2068c1b4a7e6SDavid S. Miller }
2069c1b4a7e6SDavid S. Miller 
2070c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
2071c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
2072c1b4a7e6SDavid S. Miller  */
2073cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2074cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
2075c1b4a7e6SDavid S. Miller {
2076d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
2077c1b4a7e6SDavid S. Miller 
2078c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
20794de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
20804de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
2081c1b4a7e6SDavid S. Miller 		return 1;
2082c1b4a7e6SDavid S. Miller 
2083c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
208440570375SEric Dumazet 	cwnd = tcp_snd_cwnd(tp);
2085d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
2086c1b4a7e6SDavid S. Miller 		return 0;
2087d649a7a8SEric Dumazet 
2088d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
2089d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
2090d649a7a8SEric Dumazet 	 */
2091d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
2092d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
2093c1b4a7e6SDavid S. Miller }
2094c1b4a7e6SDavid S. Miller 
2095b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
209667edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
2097c1b4a7e6SDavid S. Miller  * SKB onto the wire.
2098c1b4a7e6SDavid S. Miller  */
20995bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2100c1b4a7e6SDavid S. Miller {
2101c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
2102c1b4a7e6SDavid S. Miller 
2103f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
21045bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
2105c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
2106c1b4a7e6SDavid S. Miller 	}
2107c1b4a7e6SDavid S. Miller 	return tso_segs;
2108c1b4a7e6SDavid S. Miller }
2109c1b4a7e6SDavid S. Miller 
2110c1b4a7e6SDavid S. Miller 
2111a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
2112c1b4a7e6SDavid S. Miller  * sent now.
2113c1b4a7e6SDavid S. Miller  */
2114a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2115c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
2116c1b4a7e6SDavid S. Miller {
2117c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
2118c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
2119c1b4a7e6SDavid S. Miller 	 *
2120c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
2121c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
2122c1b4a7e6SDavid S. Miller 	 */
2123c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
2124a2a385d6SEric Dumazet 		return true;
2125c1b4a7e6SDavid S. Miller 
21269b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
21279b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2128a2a385d6SEric Dumazet 		return true;
2129c1b4a7e6SDavid S. Miller 
2130cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2131a2a385d6SEric Dumazet 		return true;
2132c1b4a7e6SDavid S. Miller 
2133a2a385d6SEric Dumazet 	return false;
2134c1b4a7e6SDavid S. Miller }
2135c1b4a7e6SDavid S. Miller 
2136c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
2137a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2138a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
2139056834d9SIlpo Järvinen 			     unsigned int cur_mss)
2140c1b4a7e6SDavid S. Miller {
2141c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2142c1b4a7e6SDavid S. Miller 
2143c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
2144c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2145c1b4a7e6SDavid S. Miller 
214690840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
2147c1b4a7e6SDavid S. Miller }
2148c1b4a7e6SDavid S. Miller 
2149c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2150c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
2151c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
2152c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
2153c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
2154c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
2155c1b4a7e6SDavid S. Miller  */
215656483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2157c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
2158c1b4a7e6SDavid S. Miller {
2159c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
216056483341SEric Dumazet 	struct sk_buff *buff;
21619ce01461SIlpo Järvinen 	u8 flags;
2162c1b4a7e6SDavid S. Miller 
2163c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
2164b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len);
2165c1b4a7e6SDavid S. Miller 
21665882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
216751456b29SIan Morris 	if (unlikely(!buff))
2168c1b4a7e6SDavid S. Miller 		return -ENOMEM;
216941477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
21705a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
2171c1b4a7e6SDavid S. Miller 
2172ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
21733ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
2174b60b49eaSHerbert Xu 	buff->truesize += nlen;
2175c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
2176c1b4a7e6SDavid S. Miller 
2177c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
2178c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2179c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2180c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2181c1b4a7e6SDavid S. Miller 
2182c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
21834de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
21844de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
21854de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
2186c1b4a7e6SDavid S. Miller 
2187a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
2188a166140eSMartin KaFai Lau 
2189c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
2190490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
2191c1b4a7e6SDavid S. Miller 
2192c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
21935bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
21945bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
2195c1b4a7e6SDavid S. Miller 
2196c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
2197f4a775d1SEric Dumazet 	__skb_header_release(buff);
219856483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2199c1b4a7e6SDavid S. Miller 
2200c1b4a7e6SDavid S. Miller 	return 0;
2201c1b4a7e6SDavid S. Miller }
2202c1b4a7e6SDavid S. Miller 
2203c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
2204c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2205c1b4a7e6SDavid S. Miller  *
2206c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
2207c1b4a7e6SDavid S. Miller  */
2208ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2209f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
2210f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
2211f9bfe4e6SEric Dumazet 				 u32 max_segs)
2212c1b4a7e6SDavid S. Miller {
22136687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2214f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
221550c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
221650c8339eSEric Dumazet 	struct sk_buff *head;
2217ad9f4f50SEric Dumazet 	int win_divisor;
2218f1c6ea38SEric Dumazet 	s64 delta;
2219c1b4a7e6SDavid S. Miller 
222099d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2221ae8064acSJohn Heffner 		goto send_now;
2222ae8064acSJohn Heffner 
22235f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
2224a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
2225a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2226a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
22275f852eb5SEric Dumazet 	 */
2228a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2229a682850aSEric Dumazet 	if (delta > 0)
2230ae8064acSJohn Heffner 		goto send_now;
2231908a75c1SDavid S. Miller 
2232c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
2233c1b4a7e6SDavid S. Miller 
2234c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
223540570375SEric Dumazet 	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2236c1b4a7e6SDavid S. Miller 
223790840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2238c1b4a7e6SDavid S. Miller 
2239c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
224040570375SEric Dumazet 	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2241c1b4a7e6SDavid S. Miller 
2242c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
2243c1b4a7e6SDavid S. Miller 
2244ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
2245605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
2246ae8064acSJohn Heffner 		goto send_now;
2247ba244fe9SDavid S. Miller 
224862ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
224962ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
225062ad2761SIlpo Järvinen 		goto send_now;
225162ad2761SIlpo Järvinen 
22525bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2253ad9f4f50SEric Dumazet 	if (win_divisor) {
225440570375SEric Dumazet 		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2255c1b4a7e6SDavid S. Miller 
2256c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
2257c1b4a7e6SDavid S. Miller 		 * just use it.
2258c1b4a7e6SDavid S. Miller 		 */
2259ad9f4f50SEric Dumazet 		chunk /= win_divisor;
2260c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
2261ae8064acSJohn Heffner 			goto send_now;
2262c1b4a7e6SDavid S. Miller 	} else {
2263c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
2264c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
2265c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
2266c1b4a7e6SDavid S. Miller 		 * then send now.
2267c1b4a7e6SDavid S. Miller 		 */
22686b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2269ae8064acSJohn Heffner 			goto send_now;
2270c1b4a7e6SDavid S. Miller 	}
2271c1b4a7e6SDavid S. Miller 
227275c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
227375c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
227475c119afSEric Dumazet 	if (!head)
227575c119afSEric Dumazet 		goto send_now;
2276f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
227750c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
2278f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
227950c8339eSEric Dumazet 		goto send_now;
228050c8339eSEric Dumazet 
2281f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
2282f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
2283f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
2284f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
2285f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
2286f9bfe4e6SEric Dumazet 	 */
2287f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
2288f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
2289ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2290f9bfe4e6SEric Dumazet 			return true;
2291f9bfe4e6SEric Dumazet 		}
2292f9bfe4e6SEric Dumazet 	} else {
2293f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2294f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2295f9bfe4e6SEric Dumazet 			return true;
2296f9bfe4e6SEric Dumazet 		}
2297f9bfe4e6SEric Dumazet 	}
2298f9bfe4e6SEric Dumazet 
2299f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2300d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2301d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2302f9bfe4e6SEric Dumazet 		goto send_now;
2303ca8a2263SNeal Cardwell 
2304a2a385d6SEric Dumazet 	return true;
2305ae8064acSJohn Heffner 
2306ae8064acSJohn Heffner send_now:
2307a2a385d6SEric Dumazet 	return false;
2308c1b4a7e6SDavid S. Miller }
2309c1b4a7e6SDavid S. Miller 
231005cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
231105cbc0dbSFan Du {
231205cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
231305cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
231405cbc0dbSFan Du 	struct net *net = sock_net(sk);
231505cbc0dbSFan Du 	u32 interval;
231605cbc0dbSFan Du 	s32 delta;
231705cbc0dbSFan Du 
23182a85388fSKuniyuki Iwashima 	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2319c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
232005cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
232105cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
232205cbc0dbSFan Du 
232305cbc0dbSFan Du 		/* Update current search range */
232405cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
232505cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
232605cbc0dbSFan Du 			sizeof(struct tcphdr) +
232705cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
232805cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
232905cbc0dbSFan Du 
233005cbc0dbSFan Du 		/* Update probe time stamp */
2331c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
233205cbc0dbSFan Du 	}
233305cbc0dbSFan Du }
233405cbc0dbSFan Du 
2335808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2336808cf9e3SIlya Lesokhin {
2337808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2338808cf9e3SIlya Lesokhin 
2339808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2340808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2341808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2342808cf9e3SIlya Lesokhin 			break;
2343808cf9e3SIlya Lesokhin 
23449b65b17dSTalal Ahmad 		if (unlikely(TCP_SKB_CB(skb)->eor) ||
23459b65b17dSTalal Ahmad 		    tcp_has_tx_tstamp(skb) ||
23469b65b17dSTalal Ahmad 		    !skb_pure_zcopy_same(skb, next))
2347808cf9e3SIlya Lesokhin 			return false;
2348808cf9e3SIlya Lesokhin 
2349808cf9e3SIlya Lesokhin 		len -= skb->len;
2350808cf9e3SIlya Lesokhin 	}
2351808cf9e3SIlya Lesokhin 
2352808cf9e3SIlya Lesokhin 	return true;
2353808cf9e3SIlya Lesokhin }
2354808cf9e3SIlya Lesokhin 
235573601329SEric Dumazet static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
235673601329SEric Dumazet 			     int probe_size)
235773601329SEric Dumazet {
235873601329SEric Dumazet 	skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
235973601329SEric Dumazet 	int i, todo, len = 0, nr_frags = 0;
236073601329SEric Dumazet 	const struct sk_buff *skb;
236173601329SEric Dumazet 
236273601329SEric Dumazet 	if (!sk_wmem_schedule(sk, to->truesize + probe_size))
236373601329SEric Dumazet 		return -ENOMEM;
236473601329SEric Dumazet 
236573601329SEric Dumazet 	skb_queue_walk(&sk->sk_write_queue, skb) {
236673601329SEric Dumazet 		const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
236773601329SEric Dumazet 
236873601329SEric Dumazet 		if (skb_headlen(skb))
236973601329SEric Dumazet 			return -EINVAL;
237073601329SEric Dumazet 
237173601329SEric Dumazet 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
237273601329SEric Dumazet 			if (len >= probe_size)
237373601329SEric Dumazet 				goto commit;
237473601329SEric Dumazet 			todo = min_t(int, skb_frag_size(fragfrom),
237573601329SEric Dumazet 				     probe_size - len);
237673601329SEric Dumazet 			len += todo;
237773601329SEric Dumazet 			if (lastfrag &&
237873601329SEric Dumazet 			    skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
237973601329SEric Dumazet 			    skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
238073601329SEric Dumazet 						      skb_frag_size(lastfrag)) {
238173601329SEric Dumazet 				skb_frag_size_add(lastfrag, todo);
238273601329SEric Dumazet 				continue;
238373601329SEric Dumazet 			}
238473601329SEric Dumazet 			if (unlikely(nr_frags == MAX_SKB_FRAGS))
238573601329SEric Dumazet 				return -E2BIG;
238673601329SEric Dumazet 			skb_frag_page_copy(fragto, fragfrom);
238773601329SEric Dumazet 			skb_frag_off_copy(fragto, fragfrom);
238873601329SEric Dumazet 			skb_frag_size_set(fragto, todo);
238973601329SEric Dumazet 			nr_frags++;
239073601329SEric Dumazet 			lastfrag = fragto++;
239173601329SEric Dumazet 		}
239273601329SEric Dumazet 	}
239373601329SEric Dumazet commit:
239473601329SEric Dumazet 	WARN_ON_ONCE(len != probe_size);
239573601329SEric Dumazet 	for (i = 0; i < nr_frags; i++)
239673601329SEric Dumazet 		skb_frag_ref(to, i);
239773601329SEric Dumazet 
239873601329SEric Dumazet 	skb_shinfo(to)->nr_frags = nr_frags;
239973601329SEric Dumazet 	to->truesize += probe_size;
240073601329SEric Dumazet 	to->len += probe_size;
240173601329SEric Dumazet 	to->data_len += probe_size;
240273601329SEric Dumazet 	__skb_header_release(to);
240373601329SEric Dumazet 	return 0;
240473601329SEric Dumazet }
240573601329SEric Dumazet 
24065d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
240767edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
240867edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
240967edfef7SAndi Kleen  * changes resulting in larger path MTUs.
241067edfef7SAndi Kleen  *
24115d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
24125d424d5aSJohn Heffner  *         1 if a probe was sent,
2413056834d9SIlpo Järvinen  *         -1 otherwise
2414056834d9SIlpo Järvinen  */
24155d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
24165d424d5aSJohn Heffner {
24175d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
241812a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
24195d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
24206b58e0a5SFan Du 	struct net *net = sock_net(sk);
24215d424d5aSJohn Heffner 	int probe_size;
242291cc17c0SIlpo Järvinen 	int size_needed;
242312a59abcSEric Dumazet 	int copy, len;
24245d424d5aSJohn Heffner 	int mss_now;
24256b58e0a5SFan Du 	int interval;
24265d424d5aSJohn Heffner 
24275d424d5aSJohn Heffner 	/* Not currently probing/verifying,
24285d424d5aSJohn Heffner 	 * not in recovery,
24295d424d5aSJohn Heffner 	 * have enough cwnd, and
243012a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
243112a59abcSEric Dumazet 	 */
243212a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
24335d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
24345d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
243540570375SEric Dumazet 		   tcp_snd_cwnd(tp) < 11 ||
243612a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
24375d424d5aSJohn Heffner 		return -1;
24385d424d5aSJohn Heffner 
24396b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
24406b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
24416b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
24426b58e0a5SFan Du 	 */
24430c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
24446b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
24456b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
244691cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
24476b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
244805cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
244905cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
245005cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
245105cbc0dbSFan Du 	 */
24526b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
245392c0aa41SKuniyuki Iwashima 	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
245405cbc0dbSFan Du 		/* Check whether enough time has elaplased for
245505cbc0dbSFan Du 		 * another round of probing.
245605cbc0dbSFan Du 		 */
245705cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
24585d424d5aSJohn Heffner 		return -1;
24595d424d5aSJohn Heffner 	}
24605d424d5aSJohn Heffner 
24615d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
24627f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
24635d424d5aSJohn Heffner 		return -1;
24645d424d5aSJohn Heffner 
246591cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
24665d424d5aSJohn Heffner 		return -1;
246790840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
24685d424d5aSJohn Heffner 		return 0;
24695d424d5aSJohn Heffner 
2470d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
247140570375SEric Dumazet 	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2472d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
24735d424d5aSJohn Heffner 			return -1;
24745d424d5aSJohn Heffner 		else
24755d424d5aSJohn Heffner 			return 0;
24765d424d5aSJohn Heffner 	}
24775d424d5aSJohn Heffner 
2478808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2479808cf9e3SIlya Lesokhin 		return -1;
2480808cf9e3SIlya Lesokhin 
24815d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
24825882efffSEric Dumazet 	nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false);
248351456b29SIan Morris 	if (!nskb)
24845d424d5aSJohn Heffner 		return -1;
248573601329SEric Dumazet 
248673601329SEric Dumazet 	/* build the payload, and be prepared to abort if this fails. */
248773601329SEric Dumazet 	if (tcp_clone_payload(sk, nskb, probe_size)) {
248871c299c7SJakub Kicinski 		tcp_skb_tsorted_anchor_cleanup(nskb);
248973601329SEric Dumazet 		consume_skb(nskb);
249073601329SEric Dumazet 		return -1;
249173601329SEric Dumazet 	}
2492ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, nskb->truesize);
24933ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
24945d424d5aSJohn Heffner 
2495fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
249641477662SJakub Kicinski 	skb_copy_decrypted(nskb, skb);
24975a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(nskb, skb);
24985d424d5aSJohn Heffner 
24995d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
25005d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
25014de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
25025d424d5aSJohn Heffner 
250350c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
25042b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
250550c4817eSIlpo Järvinen 
25065d424d5aSJohn Heffner 	len = 0;
2507234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
25085d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
25095d424d5aSJohn Heffner 
25105d424d5aSJohn Heffner 		if (skb->len <= copy) {
25115d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
25125d424d5aSJohn Heffner 			 * Throw it away. */
25134de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2514808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2515808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2516808cf9e3SIlya Lesokhin 			 */
2517808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2518888a5c53SWillem de Bruijn 			tcp_skb_collapse_tstamp(nskb, skb);
2519fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
252003271f3aSTalal Ahmad 			tcp_wmem_free_skb(sk, skb);
25215d424d5aSJohn Heffner 		} else {
25224de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2523a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
25245d424d5aSJohn Heffner 			__pskb_trim_head(skb, copy);
25255bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss_now);
25265d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
25275d424d5aSJohn Heffner 		}
25285d424d5aSJohn Heffner 
25295d424d5aSJohn Heffner 		len += copy;
2530234b6860SIlpo Järvinen 
2531234b6860SIlpo Järvinen 		if (len >= probe_size)
2532234b6860SIlpo Järvinen 			break;
25335d424d5aSJohn Heffner 	}
25345bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
25355d424d5aSJohn Heffner 
25365d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
25377faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
25387faee5c0SEric Dumazet 	 */
25395d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
25405d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
25415d424d5aSJohn Heffner 		 * effectively two packets. */
254240570375SEric Dumazet 		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
254366f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
25445d424d5aSJohn Heffner 
25455d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
25460e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
25470e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
25485d424d5aSJohn Heffner 
25495d424d5aSJohn Heffner 		return 1;
25505d424d5aSJohn Heffner 	}
25515d424d5aSJohn Heffner 
25525d424d5aSJohn Heffner 	return -1;
25535d424d5aSJohn Heffner }
25545d424d5aSJohn Heffner 
2555864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2556218af599SEric Dumazet {
2557864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2558864e5c09SEric Dumazet 
2559864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2560864e5c09SEric Dumazet 		return false;
2561864e5c09SEric Dumazet 
2562864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2563864e5c09SEric Dumazet 		return false;
2564864e5c09SEric Dumazet 
2565864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2566864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2567864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2568864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2569864e5c09SEric Dumazet 		sock_hold(sk);
2570864e5c09SEric Dumazet 	}
2571864e5c09SEric Dumazet 	return true;
2572218af599SEric Dumazet }
2573218af599SEric Dumazet 
2574f921a4a5SEric Dumazet static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
2575f921a4a5SEric Dumazet {
2576f921a4a5SEric Dumazet 	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
2577f921a4a5SEric Dumazet 
2578f921a4a5SEric Dumazet 	/* No skb in the rtx queue. */
2579f921a4a5SEric Dumazet 	if (!node)
2580f921a4a5SEric Dumazet 		return true;
2581f921a4a5SEric Dumazet 
2582f921a4a5SEric Dumazet 	/* Only one skb in rtx queue. */
2583f921a4a5SEric Dumazet 	return !node->rb_left && !node->rb_right;
2584f921a4a5SEric Dumazet }
2585f921a4a5SEric Dumazet 
2586f9616c35SEric Dumazet /* TCP Small Queues :
2587f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2588f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2589f9616c35SEric Dumazet  * This allows for :
2590f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2591f9616c35SEric Dumazet  *  - faster recovery
2592f9616c35SEric Dumazet  *  - high rates
2593f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2594f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2595f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2596f9616c35SEric Dumazet  */
2597f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2598f9616c35SEric Dumazet 				  unsigned int factor)
2599f9616c35SEric Dumazet {
260076a9ebe8SEric Dumazet 	unsigned long limit;
2601f9616c35SEric Dumazet 
260276a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
260376a9ebe8SEric Dumazet 		      2 * skb->truesize,
260428b24f90SEric Dumazet 		      READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
2605c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
260676a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
26079fb90193SKuniyuki Iwashima 			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2608f9616c35SEric Dumazet 	limit <<= factor;
2609f9616c35SEric Dumazet 
2610a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2611a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
261228b24f90SEric Dumazet 		u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
261328b24f90SEric Dumazet 				  tcp_sk(sk)->tcp_tx_delay;
2614a842fe14SEric Dumazet 
2615a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2616a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2617a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2618a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2619a842fe14SEric Dumazet 		 */
2620a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2621a842fe14SEric Dumazet 		limit += extra_bytes;
2622a842fe14SEric Dumazet 	}
262314afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2624f921a4a5SEric Dumazet 		/* Always send skb if rtx queue is empty or has one skb.
262575eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
262675eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
262775eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
262875eefc6cSEric Dumazet 		 */
2629f921a4a5SEric Dumazet 		if (tcp_rtx_queue_empty_or_single_skb(sk))
263075eefc6cSEric Dumazet 			return false;
263175eefc6cSEric Dumazet 
26327aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2633f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2634f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2635f9616c35SEric Dumazet 		 * test again the condition.
2636f9616c35SEric Dumazet 		 */
2637f9616c35SEric Dumazet 		smp_mb__after_atomic();
2638ce8299b6SEric Dumazet 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2639f9616c35SEric Dumazet 			return true;
2640f9616c35SEric Dumazet 	}
2641f9616c35SEric Dumazet 	return false;
2642f9616c35SEric Dumazet }
2643f9616c35SEric Dumazet 
264405b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
264505b055e8SFrancis Yan {
2646628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2647efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
264805b055e8SFrancis Yan 
2649efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2650efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
265105b055e8SFrancis Yan 	tp->chrono_start = now;
265205b055e8SFrancis Yan 	tp->chrono_type = new;
265305b055e8SFrancis Yan }
265405b055e8SFrancis Yan 
265505b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
265605b055e8SFrancis Yan {
265705b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
265805b055e8SFrancis Yan 
265905b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
26600f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
26610f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
266205b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
266305b055e8SFrancis Yan 	 */
266405b055e8SFrancis Yan 	if (type > tp->chrono_type)
266505b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
266605b055e8SFrancis Yan }
266705b055e8SFrancis Yan 
266805b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
266905b055e8SFrancis Yan {
267005b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
267105b055e8SFrancis Yan 
26720f87230dSFrancis Yan 
26730f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
26740f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
26750f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
26760f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
26770f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
26780f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
26790f87230dSFrancis Yan 	 */
268075c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
268105b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
26820f87230dSFrancis Yan 	else if (type == tp->chrono_type)
26830f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
268405b055e8SFrancis Yan }
268505b055e8SFrancis Yan 
26861da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
26871da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
26881da177e4SLinus Torvalds  * window for us.
26891da177e4SLinus Torvalds  *
2690f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2691f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2692f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2693f8269a49SIlpo Järvinen  *
26946ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
26956ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
26966ba8a3b1SNandita Dukkipati 
2697a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2698a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
26991da177e4SLinus Torvalds  */
2700a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2701d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
27021da177e4SLinus Torvalds {
27031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
270492df7b51SDavid S. Miller 	struct sk_buff *skb;
2705c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2706c1b4a7e6SDavid S. Miller 	int cwnd_quota;
27075d424d5aSJohn Heffner 	int result;
27085615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2709605ad7f1SEric Dumazet 	u32 max_segs;
27101da177e4SLinus Torvalds 
2711c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
27125d424d5aSJohn Heffner 
2713ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2714d5dd9175SIlpo Järvinen 	if (!push_one) {
27155d424d5aSJohn Heffner 		/* Do MTU probing. */
2716d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2717d5dd9175SIlpo Järvinen 		if (!result) {
2718a2a385d6SEric Dumazet 			return false;
27195d424d5aSJohn Heffner 		} else if (result > 0) {
27205d424d5aSJohn Heffner 			sent_pkts = 1;
27215d424d5aSJohn Heffner 		}
2722d5dd9175SIlpo Järvinen 	}
27235d424d5aSJohn Heffner 
2724ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2725fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2726c8ac3774SHerbert Xu 		unsigned int limit;
2727c8ac3774SHerbert Xu 
272879861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
272979861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2730a1ac9c8aSMartin KaFai Lau 			tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2731a1ac9c8aSMartin KaFai Lau 			skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
273279861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2733bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
273479861919SEric Dumazet 			goto repair; /* Skip network transmission */
273579861919SEric Dumazet 		}
273679861919SEric Dumazet 
2737218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2738218af599SEric Dumazet 			break;
2739218af599SEric Dumazet 
27405bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2741c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2742c1b4a7e6SDavid S. Miller 
2743b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
27446ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
27456ba8a3b1SNandita Dukkipati 			if (push_one == 2)
27466ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
27476ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
27486ba8a3b1SNandita Dukkipati 			else
2749b68e9f85SHerbert Xu 				break;
27506ba8a3b1SNandita Dukkipati 		}
2751b68e9f85SHerbert Xu 
27525615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
27535615f886SFrancis Yan 			is_rwnd_limited = true;
2754b68e9f85SHerbert Xu 			break;
27555615f886SFrancis Yan 		}
2756b68e9f85SHerbert Xu 
2757d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2758aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2759aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2760aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2761aa93466bSDavid S. Miller 				break;
2762c1b4a7e6SDavid S. Miller 		} else {
2763ca8a2263SNeal Cardwell 			if (!push_one &&
2764605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2765f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2766aa93466bSDavid S. Miller 				break;
2767c1b4a7e6SDavid S. Miller 		}
2768aa93466bSDavid S. Miller 
2769605ad7f1SEric Dumazet 		limit = mss_now;
2770d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2771605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2772605ad7f1SEric Dumazet 						    min_t(unsigned int,
2773605ad7f1SEric Dumazet 							  cwnd_quota,
2774605ad7f1SEric Dumazet 							  max_segs),
2775605ad7f1SEric Dumazet 						    nonagle);
2776605ad7f1SEric Dumazet 
2777605ad7f1SEric Dumazet 		if (skb->len > limit &&
277856483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2779605ad7f1SEric Dumazet 			break;
2780605ad7f1SEric Dumazet 
2781f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
278246d3ceabSEric Dumazet 			break;
2783c9eeec26SEric Dumazet 
27841f85e626SEric Dumazet 		/* Argh, we hit an empty skb(), presumably a thread
27851f85e626SEric Dumazet 		 * is sleeping in sendmsg()/sk_stream_wait_memory().
27861f85e626SEric Dumazet 		 * We do not want to send a pure-ack packet and have
27871f85e626SEric Dumazet 		 * a strange looking rtx queue with empty packet(s).
27881f85e626SEric Dumazet 		 */
27891f85e626SEric Dumazet 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
27901f85e626SEric Dumazet 			break;
27911f85e626SEric Dumazet 
2792d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
27931da177e4SLinus Torvalds 			break;
27941da177e4SLinus Torvalds 
2795ec342325SAndrew Vagin repair:
27961da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
27971da177e4SLinus Torvalds 		 * This call will increment packets_out.
27981da177e4SLinus Torvalds 		 */
279966f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
28001da177e4SLinus Torvalds 
28011da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2802a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2803d5dd9175SIlpo Järvinen 
2804d5dd9175SIlpo Järvinen 		if (push_one)
2805d5dd9175SIlpo Järvinen 			break;
28061da177e4SLinus Torvalds 	}
28071da177e4SLinus Torvalds 
28085615f886SFrancis Yan 	if (is_rwnd_limited)
28095615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
28105615f886SFrancis Yan 	else
28115615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
28125615f886SFrancis Yan 
281340570375SEric Dumazet 	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
2814299bcb55SNeal Cardwell 	if (likely(sent_pkts || is_cwnd_limited))
2815299bcb55SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2816299bcb55SNeal Cardwell 
2817aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2818684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2819684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
28206ba8a3b1SNandita Dukkipati 
28216ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
28226ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2823ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2824a2a385d6SEric Dumazet 		return false;
28251da177e4SLinus Torvalds 	}
282675c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
28276ba8a3b1SNandita Dukkipati }
28286ba8a3b1SNandita Dukkipati 
2829ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
28306ba8a3b1SNandita Dukkipati {
28316ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
28326ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
28331c2709cfSNeal Cardwell 	u32 timeout, timeout_us, rto_delta_us;
28342ae21cf5SEric Dumazet 	int early_retrans;
28356ba8a3b1SNandita Dukkipati 
28366ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
28376ba8a3b1SNandita Dukkipati 	 * finishes.
28386ba8a3b1SNandita Dukkipati 	 */
2839d983ea6fSEric Dumazet 	if (rcu_access_pointer(tp->fastopen_rsk))
28406ba8a3b1SNandita Dukkipati 		return false;
28416ba8a3b1SNandita Dukkipati 
284252e65865SKuniyuki Iwashima 	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
28436ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2844b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
28456ba8a3b1SNandita Dukkipati 	 */
28462ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2847bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2848b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2849b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
28506ba8a3b1SNandita Dukkipati 		return false;
28516ba8a3b1SNandita Dukkipati 
2852bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2853f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2854f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
28556ba8a3b1SNandita Dukkipati 	 */
2856bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
28571c2709cfSNeal Cardwell 		timeout_us = tp->srtt_us >> 2;
28586ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
28591c2709cfSNeal Cardwell 			timeout_us += tcp_rto_min_us(sk);
2860bb4d991aSYuchung Cheng 		else
28611c2709cfSNeal Cardwell 			timeout_us += TCP_TIMEOUT_MIN_US;
28621c2709cfSNeal Cardwell 		timeout = usecs_to_jiffies(timeout_us);
2863bb4d991aSYuchung Cheng 	} else {
2864bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2865bb4d991aSYuchung Cheng 	}
28666ba8a3b1SNandita Dukkipati 
2867a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2868ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2869ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2870ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2871a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2872a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
28736ba8a3b1SNandita Dukkipati 
28748dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
28756ba8a3b1SNandita Dukkipati 	return true;
28766ba8a3b1SNandita Dukkipati }
28776ba8a3b1SNandita Dukkipati 
28781f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
28791f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
28801f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
28811f3279aeSEric Dumazet  */
2882f4dae54eSEric Dumazet static bool skb_still_in_host_queue(struct sock *sk,
28831f3279aeSEric Dumazet 				    const struct sk_buff *skb)
28841f3279aeSEric Dumazet {
288539bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2886f4dae54eSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2887f4dae54eSEric Dumazet 		smp_mb__after_atomic();
2888f4dae54eSEric Dumazet 		if (skb_fclone_busy(sk, skb)) {
2889c10d9310SEric Dumazet 			NET_INC_STATS(sock_net(sk),
28901f3279aeSEric Dumazet 				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
28911f3279aeSEric Dumazet 			return true;
28921f3279aeSEric Dumazet 		}
2893f4dae54eSEric Dumazet 	}
28941f3279aeSEric Dumazet 	return false;
28951f3279aeSEric Dumazet }
28961f3279aeSEric Dumazet 
2897b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
28986ba8a3b1SNandita Dukkipati  * retransmit the last segment.
28996ba8a3b1SNandita Dukkipati  */
29006ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
29016ba8a3b1SNandita Dukkipati {
29029b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
29036ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
29046ba8a3b1SNandita Dukkipati 	int pcount;
29056ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
29066ba8a3b1SNandita Dukkipati 
290776be93fcSYuchung Cheng 	/* At most one outstanding TLP */
290876be93fcSYuchung Cheng 	if (tp->tlp_high_seq)
290976be93fcSYuchung Cheng 		goto rearm_timer;
291076be93fcSYuchung Cheng 
291176be93fcSYuchung Cheng 	tp->tlp_retrans = 0;
2912b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
291375c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2914b340b264SYuchung Cheng 		pcount = tp->packets_out;
2915b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2916b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2917b340b264SYuchung Cheng 			goto probe_sent;
29186ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29196ba8a3b1SNandita Dukkipati 	}
292075c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2921b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2922b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2923b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
292440570375SEric Dumazet 			  tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
2925b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2926b2b7af86SYuchung Cheng 		return;
2927b2b7af86SYuchung Cheng 	}
29286ba8a3b1SNandita Dukkipati 
29291f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
29301f3279aeSEric Dumazet 		goto rearm_timer;
29311f3279aeSEric Dumazet 
29326ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
29336ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
29346ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29356ba8a3b1SNandita Dukkipati 
29366ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
293775c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
293875c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
29396cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
29406ba8a3b1SNandita Dukkipati 			goto rearm_timer;
294175c119afSEric Dumazet 		skb = skb_rb_next(skb);
29426ba8a3b1SNandita Dukkipati 	}
29436ba8a3b1SNandita Dukkipati 
29446ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
29456ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29466ba8a3b1SNandita Dukkipati 
294710d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2948b340b264SYuchung Cheng 		goto rearm_timer;
29496ba8a3b1SNandita Dukkipati 
295076be93fcSYuchung Cheng 	tp->tlp_retrans = 1;
295176be93fcSYuchung Cheng 
295276be93fcSYuchung Cheng probe_sent:
29539b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
29549b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
29559b717a8dSNandita Dukkipati 
2956c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2957fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2958fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2959b340b264SYuchung Cheng rearm_timer:
2960fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
29611da177e4SLinus Torvalds }
29621da177e4SLinus Torvalds 
2963a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2964a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2965a762a980SDavid S. Miller  * The socket must be locked by the caller.
2966a762a980SDavid S. Miller  */
29679e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
29689e412ba7SIlpo Järvinen 			       int nonagle)
2969a762a980SDavid S. Miller {
2970726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2971726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2972726e07a8SIlpo Järvinen 	 * all will be happy.
2973726e07a8SIlpo Järvinen 	 */
2974726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2975726e07a8SIlpo Järvinen 		return;
2976726e07a8SIlpo Järvinen 
297799a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
29787450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
29799e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2980a762a980SDavid S. Miller }
2981a762a980SDavid S. Miller 
2982c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2983c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2984c1b4a7e6SDavid S. Miller  */
2985c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2986c1b4a7e6SDavid S. Miller {
2987fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2988c1b4a7e6SDavid S. Miller 
2989c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2990c1b4a7e6SDavid S. Miller 
2991d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2992c1b4a7e6SDavid S. Miller }
2993c1b4a7e6SDavid S. Miller 
29941da177e4SLinus Torvalds /* This function returns the amount that we can raise the
29951da177e4SLinus Torvalds  * usable window based on the following constraints
29961da177e4SLinus Torvalds  *
29971da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
29981da177e4SLinus Torvalds  * 2. We limit memory per socket
29991da177e4SLinus Torvalds  *
30001da177e4SLinus Torvalds  * RFC 1122:
30011da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
30021da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
30031da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
30041da177e4SLinus Torvalds  *
30051da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
30061da177e4SLinus Torvalds  * it at least MSS bytes.
30071da177e4SLinus Torvalds  *
30081da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
30091da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
30101da177e4SLinus Torvalds  *
30111da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
30121da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
30131da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
30141da177e4SLinus Torvalds  * window to always advance by a single byte.
30151da177e4SLinus Torvalds  *
30161da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
30171da177e4SLinus Torvalds  * then this will not be a problem.
30181da177e4SLinus Torvalds  *
30191da177e4SLinus Torvalds  * BSD seems to make the following compromise:
30201da177e4SLinus Torvalds  *
30211da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
30221da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
30231da177e4SLinus Torvalds  *	then set the window to 0.
30241da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
30251da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
30261da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
30271da177e4SLinus Torvalds  *
30281da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
30291da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
30301da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
30311da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
30321da177e4SLinus Torvalds  * because the pipeline is full.
30331da177e4SLinus Torvalds  *
30341da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
30351da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
30361da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
30371da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
30381da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
30391da177e4SLinus Torvalds  *
30401da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
30411da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
30421da177e4SLinus Torvalds  *
30431da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
30441da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
30451da177e4SLinus Torvalds  */
30461da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
30471da177e4SLinus Torvalds {
3048463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
30491da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3050b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
3051caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
30521da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
30531da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
30541da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
30551da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
30561da177e4SLinus Torvalds 	 */
3057463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
30581da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
305986c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
3060071c8ed6SFlorian Westphal 	int full_space, window;
3061071c8ed6SFlorian Westphal 
3062071c8ed6SFlorian Westphal 	if (sk_is_mptcp(sk))
3063071c8ed6SFlorian Westphal 		mptcp_space(sk, &free_space, &allowed_space);
3064071c8ed6SFlorian Westphal 
3065071c8ed6SFlorian Westphal 	full_space = min_t(int, tp->window_clamp, allowed_space);
30661da177e4SLinus Torvalds 
306706425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
30681da177e4SLinus Torvalds 		mss = full_space;
306906425c30SEric Dumazet 		if (mss <= 0)
307006425c30SEric Dumazet 			return 0;
307106425c30SEric Dumazet 	}
3072b650d953Smfreemon@cloudflare.com 
3073b650d953Smfreemon@cloudflare.com 	/* Only allow window shrink if the sysctl is enabled and we have
3074b650d953Smfreemon@cloudflare.com 	 * a non-zero scaling factor in effect.
3075b650d953Smfreemon@cloudflare.com 	 */
3076b650d953Smfreemon@cloudflare.com 	if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
3077b650d953Smfreemon@cloudflare.com 		goto shrink_window_allowed;
3078b650d953Smfreemon@cloudflare.com 
3079b650d953Smfreemon@cloudflare.com 	/* do not allow window to shrink */
3080b650d953Smfreemon@cloudflare.com 
3081b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
3082463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
30831da177e4SLinus Torvalds 
3084b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
3085053f3684SWei Wang 			tcp_adjust_rcv_ssthresh(sk);
30861da177e4SLinus Torvalds 
308786c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
308886c1a045SFlorian Westphal 		 * increase it due to wscale.
308986c1a045SFlorian Westphal 		 */
309086c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
309186c1a045SFlorian Westphal 
309286c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
309386c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
309486c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
309586c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
309686c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
309786c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
309886c1a045SFlorian Westphal 		 */
309986c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
31001da177e4SLinus Torvalds 			return 0;
31011da177e4SLinus Torvalds 	}
31021da177e4SLinus Torvalds 
31031da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
31041da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
31051da177e4SLinus Torvalds 
31061da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
31071da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
31081da177e4SLinus Torvalds 	 */
31091da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
31101da177e4SLinus Torvalds 		window = free_space;
31111da177e4SLinus Torvalds 
31121da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
31131da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
31141da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
31151da177e4SLinus Torvalds 		 */
31161935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
31171da177e4SLinus Torvalds 	} else {
31181935299dSGao Feng 		window = tp->rcv_wnd;
31191da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
31201da177e4SLinus Torvalds 		 * Window clamp already applied above.
31211da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
31221da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
31231da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
31241da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
31251da177e4SLinus Torvalds 		 * is too small.
31261da177e4SLinus Torvalds 		 */
31271da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
31281935299dSGao Feng 			window = rounddown(free_space, mss);
312984565070SJohn Heffner 		else if (mss == full_space &&
3130b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
313184565070SJohn Heffner 			window = free_space;
31321da177e4SLinus Torvalds 	}
31331da177e4SLinus Torvalds 
31341da177e4SLinus Torvalds 	return window;
3135b650d953Smfreemon@cloudflare.com 
3136b650d953Smfreemon@cloudflare.com shrink_window_allowed:
3137b650d953Smfreemon@cloudflare.com 	/* new window should always be an exact multiple of scaling factor */
3138b650d953Smfreemon@cloudflare.com 	free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3139b650d953Smfreemon@cloudflare.com 
3140b650d953Smfreemon@cloudflare.com 	if (free_space < (full_space >> 1)) {
3141b650d953Smfreemon@cloudflare.com 		icsk->icsk_ack.quick = 0;
3142b650d953Smfreemon@cloudflare.com 
3143b650d953Smfreemon@cloudflare.com 		if (tcp_under_memory_pressure(sk))
3144b650d953Smfreemon@cloudflare.com 			tcp_adjust_rcv_ssthresh(sk);
3145b650d953Smfreemon@cloudflare.com 
3146b650d953Smfreemon@cloudflare.com 		/* if free space is too low, return a zero window */
3147b650d953Smfreemon@cloudflare.com 		if (free_space < (allowed_space >> 4) || free_space < mss ||
3148b650d953Smfreemon@cloudflare.com 			free_space < (1 << tp->rx_opt.rcv_wscale))
3149b650d953Smfreemon@cloudflare.com 			return 0;
3150b650d953Smfreemon@cloudflare.com 	}
3151b650d953Smfreemon@cloudflare.com 
3152b650d953Smfreemon@cloudflare.com 	if (free_space > tp->rcv_ssthresh) {
3153b650d953Smfreemon@cloudflare.com 		free_space = tp->rcv_ssthresh;
3154b650d953Smfreemon@cloudflare.com 		/* new window should always be an exact multiple of scaling factor
3155b650d953Smfreemon@cloudflare.com 		 *
3156b650d953Smfreemon@cloudflare.com 		 * For this case, we ALIGN "up" (increase free_space) because
3157b650d953Smfreemon@cloudflare.com 		 * we know free_space is not zero here, it has been reduced from
3158b650d953Smfreemon@cloudflare.com 		 * the memory-based limit, and rcv_ssthresh is not a hard limit
3159b650d953Smfreemon@cloudflare.com 		 * (unlike sk_rcvbuf).
3160b650d953Smfreemon@cloudflare.com 		 */
3161b650d953Smfreemon@cloudflare.com 		free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
3162b650d953Smfreemon@cloudflare.com 	}
3163b650d953Smfreemon@cloudflare.com 
3164b650d953Smfreemon@cloudflare.com 	return free_space;
31651da177e4SLinus Torvalds }
31661da177e4SLinus Torvalds 
3167cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3168082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
3169082ac2d5SMartin KaFai Lau {
31700a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
31710a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
31720a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
3173082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
3174082ac2d5SMartin KaFai Lau 
31750a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3176082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
31772de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
31782de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
3179082ac2d5SMartin KaFai Lau 	}
3180082ac2d5SMartin KaFai Lau }
3181082ac2d5SMartin KaFai Lau 
31824a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
3183f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
31841da177e4SLinus Torvalds {
31851da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
318675c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
318713dde04fSWei Yongjun 	int next_skb_size;
31881da177e4SLinus Torvalds 
3189058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
31901da177e4SLinus Torvalds 
3191058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
31921da177e4SLinus Torvalds 
3193bd446314SEric Dumazet 	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3194f8071cdeSEric Dumazet 		return false;
3195bd446314SEric Dumazet 
31962b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
3197a6963a6bSIlpo Järvinen 
31981da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
31991da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
32001da177e4SLinus Torvalds 
3201e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
32024de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
32031da177e4SLinus Torvalds 
32041da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
32051da177e4SLinus Torvalds 	 * packet counting does not break.
32061da177e4SLinus Torvalds 	 */
32074828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3208a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3209b7689205SIlpo Järvinen 
3210b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
3211ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
3212ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
3213ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
3214b7689205SIlpo Järvinen 
3215797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3216797108d1SIlpo Järvinen 
3217082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
3218082ac2d5SMartin KaFai Lau 
321975c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3220f8071cdeSEric Dumazet 	return true;
32211da177e4SLinus Torvalds }
32221da177e4SLinus Torvalds 
322367edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
3224a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
32254a17fc3aSIlpo Järvinen {
32264a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
3227a2a385d6SEric Dumazet 		return false;
32284a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
3229a2a385d6SEric Dumazet 		return false;
32302331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
32314a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3232a2a385d6SEric Dumazet 		return false;
32334a17fc3aSIlpo Järvinen 
3234a2a385d6SEric Dumazet 	return true;
32354a17fc3aSIlpo Järvinen }
32364a17fc3aSIlpo Järvinen 
323767edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
323867edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
323967edfef7SAndi Kleen  */
32404a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
32414a17fc3aSIlpo Järvinen 				     int space)
32424a17fc3aSIlpo Järvinen {
32434a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32444a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
3245a2a385d6SEric Dumazet 	bool first = true;
32464a17fc3aSIlpo Järvinen 
32471a63cb91SKuniyuki Iwashima 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
32484a17fc3aSIlpo Järvinen 		return;
32494de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
32504a17fc3aSIlpo Järvinen 		return;
32514a17fc3aSIlpo Järvinen 
325275c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
32534a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
32544a17fc3aSIlpo Järvinen 			break;
32554a17fc3aSIlpo Järvinen 
325685712484SMat Martineau 		if (!tcp_skb_can_collapse(to, skb))
3257a643b5d4SMartin KaFai Lau 			break;
3258a643b5d4SMartin KaFai Lau 
32594a17fc3aSIlpo Järvinen 		space -= skb->len;
32604a17fc3aSIlpo Järvinen 
32614a17fc3aSIlpo Järvinen 		if (first) {
3262a2a385d6SEric Dumazet 			first = false;
32634a17fc3aSIlpo Järvinen 			continue;
32644a17fc3aSIlpo Järvinen 		}
32654a17fc3aSIlpo Järvinen 
32664a17fc3aSIlpo Järvinen 		if (space < 0)
32674a17fc3aSIlpo Järvinen 			break;
32684a17fc3aSIlpo Järvinen 
32694a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
32704a17fc3aSIlpo Järvinen 			break;
32714a17fc3aSIlpo Järvinen 
3272f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
3273f8071cdeSEric Dumazet 			break;
32744a17fc3aSIlpo Järvinen 	}
32754a17fc3aSIlpo Järvinen }
32764a17fc3aSIlpo Järvinen 
32771da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
32781da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
32791da177e4SLinus Torvalds  * error occurred which prevented the send.
32801da177e4SLinus Torvalds  */
328110d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
32821da177e4SLinus Torvalds {
32835d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
328410d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
32857d227cd2SSridhar Samudrala 	unsigned int cur_mss;
328610d3be56SEric Dumazet 	int diff, len, err;
3287536a6c8eSYonglong Li 	int avail_wnd;
328810d3be56SEric Dumazet 
328910d3be56SEric Dumazet 	/* Inconclusive MTU probe */
329010d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
32915d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
32925d424d5aSJohn Heffner 
32931f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
32941f3279aeSEric Dumazet 		return -EBUSY;
32951f3279aeSEric Dumazet 
3296f99cd562SDong Chenchen start:
32971da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3298f99cd562SDong Chenchen 		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3299f99cd562SDong Chenchen 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
3300f99cd562SDong Chenchen 			TCP_SKB_CB(skb)->seq++;
3301f99cd562SDong Chenchen 			goto start;
3302f99cd562SDong Chenchen 		}
33037f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
33047f582b24SEric Dumazet 			WARN_ON_ONCE(1);
33057f582b24SEric Dumazet 			return -EINVAL;
33067f582b24SEric Dumazet 		}
33071da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
33081da177e4SLinus Torvalds 			return -ENOMEM;
33091da177e4SLinus Torvalds 	}
33101da177e4SLinus Torvalds 
33117d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
33127d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
33137d227cd2SSridhar Samudrala 
33140c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
3315536a6c8eSYonglong Li 	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
33167d227cd2SSridhar Samudrala 
33171da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
33181da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
33191da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
3320536a6c8eSYonglong Li 	 * our retransmit of one segment serves as a zero window probe.
33211da177e4SLinus Torvalds 	 */
3322536a6c8eSYonglong Li 	if (avail_wnd <= 0) {
3323536a6c8eSYonglong Li 		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
33241da177e4SLinus Torvalds 			return -EAGAIN;
3325536a6c8eSYonglong Li 		avail_wnd = cur_mss;
3326536a6c8eSYonglong Li 	}
33271da177e4SLinus Torvalds 
332810d3be56SEric Dumazet 	len = cur_mss * segs;
3329536a6c8eSYonglong Li 	if (len > avail_wnd) {
3330536a6c8eSYonglong Li 		len = rounddown(avail_wnd, cur_mss);
3331536a6c8eSYonglong Li 		if (!len)
3332536a6c8eSYonglong Li 			len = avail_wnd;
3333536a6c8eSYonglong Li 	}
333410d3be56SEric Dumazet 	if (skb->len > len) {
333575c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
333675c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
33371da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
333802276f3cSIlpo Järvinen 	} else {
3339c4777efaSEric Dumazet 		if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
3340c52e2421SEric Dumazet 			return -ENOMEM;
334110d3be56SEric Dumazet 
334210d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
334310d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
334410d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
334510d3be56SEric Dumazet 		if (diff)
334610d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
3347536a6c8eSYonglong Li 		avail_wnd = min_t(int, avail_wnd, cur_mss);
3348536a6c8eSYonglong Li 		if (skb->len < avail_wnd)
3349536a6c8eSYonglong Li 			tcp_retrans_try_collapse(sk, skb, avail_wnd);
33501da177e4SLinus Torvalds 	}
33511da177e4SLinus Torvalds 
335249213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
335349213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
335449213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
335549213555SDaniel Borkmann 
3356678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
3357678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
3358678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3359678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3360678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3361678550c6SYuchung Cheng 	tp->total_retrans += segs;
3362fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
3363678550c6SYuchung Cheng 
336450bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
336550bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
336650bceae9SThomas Graf 	 * beyond what csum_start can cover.
336750bceae9SThomas Graf 	 */
336850bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
336950bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
337010a81980SEric Dumazet 		struct sk_buff *nskb;
337110a81980SEric Dumazet 
3372e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
337310a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
337407f8e4d0SFlorian Westphal 			if (nskb) {
337507f8e4d0SFlorian Westphal 				nskb->dev = NULL;
337607f8e4d0SFlorian Westphal 				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
337707f8e4d0SFlorian Westphal 			} else {
337807f8e4d0SFlorian Westphal 				err = -ENOBUFS;
337907f8e4d0SFlorian Westphal 			}
3380e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
3381e2080072SEric Dumazet 
33825889e2c0SYousuk Seung 		if (!err) {
3383a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
33845889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
33855889e2c0SYousuk Seung 		}
3386117632e6SEric Dumazet 	} else {
3387c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3388117632e6SEric Dumazet 	}
3389c84a5711SYuchung Cheng 
33907f12422cSYuchung Cheng 	/* To avoid taking spuriously low RTT samples based on a timestamp
33917f12422cSYuchung Cheng 	 * for a transmit that never happened, always mark EVER_RETRANS
33927f12422cSYuchung Cheng 	 */
33937f12422cSYuchung Cheng 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
33947f12422cSYuchung Cheng 
3395a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3396a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3397a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
3398a31ad29eSLawrence Brakmo 
3399fc9f3501SEric Dumazet 	if (likely(!err)) {
3400e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
3401678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
3402ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3403fc9f3501SEric Dumazet 	}
3404c84a5711SYuchung Cheng 	return err;
340593b174adSYuchung Cheng }
340693b174adSYuchung Cheng 
340710d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
340893b174adSYuchung Cheng {
340993b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
341010d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
34111da177e4SLinus Torvalds 
34121da177e4SLinus Torvalds 	if (err == 0) {
34131da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
34141da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3415e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
34161da177e4SLinus Torvalds 		}
34171da177e4SLinus Torvalds #endif
34181da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
34191da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
34207ae18975SYuchung Cheng 	}
34211da177e4SLinus Torvalds 
34227ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
34231da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
3424614e8316SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
34251da177e4SLinus Torvalds 
34266e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
34276e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
34286e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
34291da177e4SLinus Torvalds 	return err;
34301da177e4SLinus Torvalds }
34311da177e4SLinus Torvalds 
34321da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
34331da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
34341da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
34351da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
34361da177e4SLinus Torvalds  */
34371da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
34381da177e4SLinus Torvalds {
34396687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3440b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
34411da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3442916e6d1aSEric Dumazet 	bool rearm_timer = false;
3443840a3cbeSYuchung Cheng 	u32 max_segs;
344461eb55f4SIlpo Järvinen 	int mib_idx;
34456a438bbeSStephen Hemminger 
344645e77d31SIlpo Järvinen 	if (!tp->packets_out)
344745e77d31SIlpo Järvinen 		return;
344845e77d31SIlpo Järvinen 
344975c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3450b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3451ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
345275c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3453dca0aaf8SEric Dumazet 		__u8 sacked;
345410d3be56SEric Dumazet 		int segs;
34551da177e4SLinus Torvalds 
3456218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3457218af599SEric Dumazet 			break;
3458218af599SEric Dumazet 
34596a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
346051456b29SIan Morris 		if (!hole)
34616a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
34626a438bbeSStephen Hemminger 
346340570375SEric Dumazet 		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
346410d3be56SEric Dumazet 		if (segs <= 0)
3465916e6d1aSEric Dumazet 			break;
3466dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3467a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3468a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3469a3d2e9f8SEric Dumazet 		 */
3470a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
34710e1c54c2SIlpo Järvinen 
3472840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3473006f582cSIlpo Järvinen 			break;
34740e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
347551456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
34760e1c54c2SIlpo Järvinen 				hole = skb;
347761eb55f4SIlpo Järvinen 			continue;
34781da177e4SLinus Torvalds 
34790e1c54c2SIlpo Järvinen 		} else {
34800e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
34810e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
34820e1c54c2SIlpo Järvinen 			else
34830e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
34840e1c54c2SIlpo Järvinen 		}
34850e1c54c2SIlpo Järvinen 
34860e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
348761eb55f4SIlpo Järvinen 			continue;
348840b215e5SPavel Emelyanov 
3489f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3490916e6d1aSEric Dumazet 			break;
3491f9616c35SEric Dumazet 
349210d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
3493916e6d1aSEric Dumazet 			break;
349424ab6becSYuchung Cheng 
3495de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
34961da177e4SLinus Torvalds 
3497684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3498a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3499a262f0cdSNandita Dukkipati 
350075c119afSEric Dumazet 		if (skb == rtx_head &&
350157dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3502916e6d1aSEric Dumazet 			rearm_timer = true;
3503916e6d1aSEric Dumazet 
3504916e6d1aSEric Dumazet 	}
3505916e6d1aSEric Dumazet 	if (rearm_timer)
35063f80e08fSEric Dumazet 		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35073f421baaSArnaldo Carvalho de Melo 				     inet_csk(sk)->icsk_rto,
35088dc242adSEric Dumazet 				     TCP_RTO_MAX);
35091da177e4SLinus Torvalds }
35101da177e4SLinus Torvalds 
3511d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3512d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3513845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3514845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3515a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3516a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3517d83769a5SEric Dumazet  */
3518a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3519d83769a5SEric Dumazet {
3520c4ee1185SEric Dumazet 	int delta, amt;
3521d83769a5SEric Dumazet 
3522c4ee1185SEric Dumazet 	delta = size - sk->sk_forward_alloc;
3523c4ee1185SEric Dumazet 	if (delta <= 0)
3524d83769a5SEric Dumazet 		return;
3525c4ee1185SEric Dumazet 	amt = sk_mem_pages(delta);
35265e6300e7SEric Dumazet 	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3527e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3528e805605cSJohannes Weiner 
3529baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
35304b1327beSWei Wang 		mem_cgroup_charge_skmem(sk->sk_memcg, amt,
35314b1327beSWei Wang 					gfp_memcg_charge() | __GFP_NOFAIL);
3532d83769a5SEric Dumazet }
3533d83769a5SEric Dumazet 
3534845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3535845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
35361da177e4SLinus Torvalds  */
35371da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
35381da177e4SLinus Torvalds {
3539ee2aabd3SEric Dumazet 	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
35401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35411da177e4SLinus Torvalds 
3542845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3543845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3544845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3545845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
35461da177e4SLinus Torvalds 	 */
3547ee2aabd3SEric Dumazet 	tskb = tail;
354875c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
354975c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
355075c119afSEric Dumazet 
355175c119afSEric Dumazet 	if (tskb) {
3552845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3553845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
35541da177e4SLinus Torvalds 		tp->write_seq++;
3555ee2aabd3SEric Dumazet 		if (!tail) {
3556845704a5SEric Dumazet 			/* This means tskb was already sent.
3557845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3558845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3559845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3560845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3561845704a5SEric Dumazet 			 */
3562e0d694d6SEric Dumazet 			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3563845704a5SEric Dumazet 			return;
3564845704a5SEric Dumazet 		}
35651da177e4SLinus Torvalds 	} else {
3566*94062790SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER,
3567*94062790SEric Dumazet 				       sk_gfp_mask(sk, GFP_ATOMIC |
3568*94062790SEric Dumazet 						       __GFP_NOWARN));
3569d1edc085SColin Ian King 		if (unlikely(!skb))
3570845704a5SEric Dumazet 			return;
3571d1edc085SColin Ian King 
3572e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3573d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3574a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
35751da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3576e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3577a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
35781da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
35791da177e4SLinus Torvalds 	}
3580845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
35811da177e4SLinus Torvalds }
35821da177e4SLinus Torvalds 
35831da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
35841da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
35851da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
358665bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
35871da177e4SLinus Torvalds  */
3588dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
35891da177e4SLinus Torvalds {
35901da177e4SLinus Torvalds 	struct sk_buff *skb;
35911da177e4SLinus Torvalds 
35927cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
35937cc2b043SGao Feng 
35941da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
35951da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
35961da177e4SLinus Torvalds 	if (!skb) {
35974e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
35981da177e4SLinus Torvalds 		return;
35991da177e4SLinus Torvalds 	}
36001da177e4SLinus Torvalds 
36011da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36021da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3603e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3604a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
36059a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
36061da177e4SLinus Torvalds 	/* Send it off. */
3607dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
36084e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3609c24b14c4SSong Liu 
3610c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3611c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3612c24b14c4SSong Liu 	 */
3613c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
36141da177e4SLinus Torvalds }
36151da177e4SLinus Torvalds 
361667edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
361767edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
36181da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
36191da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
36201da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
36211da177e4SLinus Torvalds  */
36221da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
36231da177e4SLinus Torvalds {
36241da177e4SLinus Torvalds 	struct sk_buff *skb;
36251da177e4SLinus Torvalds 
362675c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
362751456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
362875c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
36291da177e4SLinus Torvalds 		return -EFAULT;
36301da177e4SLinus Torvalds 	}
36314de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
36321da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3633e2080072SEric Dumazet 			struct sk_buff *nskb;
3634e2080072SEric Dumazet 
3635e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3636e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3637e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
363851456b29SIan Morris 			if (!nskb)
36391da177e4SLinus Torvalds 				return -ENOMEM;
3640e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
36412bec445fSEric Dumazet 			tcp_highest_sack_replace(sk, skb, nskb);
364275c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3643f4a775d1SEric Dumazet 			__skb_header_release(nskb);
364475c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3645ab4e846aSEric Dumazet 			sk_wmem_queued_add(sk, nskb->truesize);
36463ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
36471da177e4SLinus Torvalds 			skb = nskb;
36481da177e4SLinus Torvalds 		}
36491da177e4SLinus Torvalds 
36504de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3651735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
36521da177e4SLinus Torvalds 	}
3653dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
36541da177e4SLinus Torvalds }
36551da177e4SLinus Torvalds 
36564aea39c1SEric Dumazet /**
3657331fca43SMartin KaFai Lau  * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3658331fca43SMartin KaFai Lau  * @sk: listener socket
3659331fca43SMartin KaFai Lau  * @dst: dst entry attached to the SYNACK. It is consumed and caller
3660331fca43SMartin KaFai Lau  *       should not use it again.
3661331fca43SMartin KaFai Lau  * @req: request_sock pointer
3662331fca43SMartin KaFai Lau  * @foc: cookie for tcp fast open
3663331fca43SMartin KaFai Lau  * @synack_type: Type of synack to prepare
3664331fca43SMartin KaFai Lau  * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
36654aea39c1SEric Dumazet  */
36665d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3667e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3668ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3669331fca43SMartin KaFai Lau 				enum tcp_synack_type synack_type,
3670331fca43SMartin KaFai Lau 				struct sk_buff *syn_skb)
36711da177e4SLinus Torvalds {
36722e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
36735d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
36745d062de7SEric Dumazet 	struct tcp_out_options opts;
36751e03d32bSDmitry Safonov 	struct tcp_key key = {};
36765d062de7SEric Dumazet 	struct sk_buff *skb;
3677bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
36785d062de7SEric Dumazet 	struct tcphdr *th;
3679f5fff5dcSTom Quetchenbach 	int mss;
3680a842fe14SEric Dumazet 	u64 now;
36811da177e4SLinus Torvalds 
3682ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
36834aea39c1SEric Dumazet 	if (unlikely(!skb)) {
36844aea39c1SEric Dumazet 		dst_release(dst);
36851da177e4SLinus Torvalds 		return NULL;
36864aea39c1SEric Dumazet 	}
36871da177e4SLinus Torvalds 	/* Reserve space for headers. */
36881da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36891da177e4SLinus Torvalds 
3690b3d05147SEric Dumazet 	switch (synack_type) {
3691b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
36929e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3693b3d05147SEric Dumazet 		break;
3694b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3695b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3696b3d05147SEric Dumazet 		 * to avoid false sharing.
3697b3d05147SEric Dumazet 		 */
3698b3d05147SEric Dumazet 		break;
3699b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3700ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3701ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3702ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3703ca6fb065SEric Dumazet 		 */
3704ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3705b3d05147SEric Dumazet 		break;
3706ca6fb065SEric Dumazet 	}
37074aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
37081da177e4SLinus Torvalds 
37093541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3710f5fff5dcSTom Quetchenbach 
371133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3712a842fe14SEric Dumazet 	now = tcp_clock_ns();
37138b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
3714f8ace8d9SFlorian Westphal 	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3715a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3716a1ac9c8aSMartin KaFai Lau 				      true);
37178b5f12d0SFlorian Westphal 	else
37188b5f12d0SFlorian Westphal #endif
37199e450c1eSYuchung Cheng 	{
3720a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, now, true);
37219e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
37229e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
37239e450c1eSYuchung Cheng 	}
372480f03e27SEric Dumazet 
37259427c6aaSDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
372680f03e27SEric Dumazet 	rcu_read_lock();
37279427c6aaSDmitry Safonov #endif
37289427c6aaSDmitry Safonov 	if (tcp_rsk_used_ao(req)) {
37299427c6aaSDmitry Safonov #ifdef CONFIG_TCP_AO
37309427c6aaSDmitry Safonov 		struct tcp_ao_key *ao_key = NULL;
37319427c6aaSDmitry Safonov 		u8 keyid = tcp_rsk(req)->ao_keyid;
37329427c6aaSDmitry Safonov 
37339427c6aaSDmitry Safonov 		ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
37349427c6aaSDmitry Safonov 							    keyid, -1);
37359427c6aaSDmitry Safonov 		/* If there is no matching key - avoid sending anything,
37369427c6aaSDmitry Safonov 		 * especially usigned segments. It could try harder and lookup
37379427c6aaSDmitry Safonov 		 * for another peer-matching key, but the peer has requested
37389427c6aaSDmitry Safonov 		 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
37399427c6aaSDmitry Safonov 		 */
37409396c4eeSDmitry Safonov 		if (unlikely(!ao_key)) {
37419427c6aaSDmitry Safonov 			rcu_read_unlock();
37429427c6aaSDmitry Safonov 			kfree_skb(skb);
37439396c4eeSDmitry Safonov 			net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
37449396c4eeSDmitry Safonov 					     keyid);
37459427c6aaSDmitry Safonov 			return NULL;
37469427c6aaSDmitry Safonov 		}
37479427c6aaSDmitry Safonov 		key.ao_key = ao_key;
37489427c6aaSDmitry Safonov 		key.type = TCP_KEY_AO;
37499427c6aaSDmitry Safonov #endif
37509427c6aaSDmitry Safonov 	} else {
37519427c6aaSDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
37529427c6aaSDmitry Safonov 		key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
37539427c6aaSDmitry Safonov 					req_to_sk(req));
37549427c6aaSDmitry Safonov 		if (key.md5_key)
37551e03d32bSDmitry Safonov 			key.type = TCP_KEY_MD5;
375680f03e27SEric Dumazet #endif
37579427c6aaSDmitry Safonov 	}
37585e526552SEric Dumazet 	skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
3759331fca43SMartin KaFai Lau 	/* bpf program will be interested in the tcp_flags */
3760331fca43SMartin KaFai Lau 	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
37619427c6aaSDmitry Safonov 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts,
37629427c6aaSDmitry Safonov 					     &key, foc, synack_type, syn_skb)
37639427c6aaSDmitry Safonov 					+ sizeof(*th);
376433ad798cSAdam Langley 
3765aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3766aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
37671da177e4SLinus Torvalds 
3768ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
37691da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
37701da177e4SLinus Torvalds 	th->syn = 1;
37711da177e4SLinus Torvalds 	th->ack = 1;
37726ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3773b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3774634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3775e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
37763b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
37773b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
37788336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
37798336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
37801da177e4SLinus Torvalds 
37811da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3782ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
37839427c6aaSDmitry Safonov 	tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key);
37841da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
3785bced3f7dSBreno Leitao 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3786cfb6eeb4SYOSHIFUJI Hideaki 
3787cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
37889427c6aaSDmitry Safonov 	if (tcp_key_is_md5(&key)) {
37899427c6aaSDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
3790bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
37919427c6aaSDmitry Safonov 					key.md5_key, req_to_sk(req), skb);
37929427c6aaSDmitry Safonov #endif
37939427c6aaSDmitry Safonov 	} else if (tcp_key_is_ao(&key)) {
37949427c6aaSDmitry Safonov #ifdef CONFIG_TCP_AO
37959427c6aaSDmitry Safonov 		tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location,
37969427c6aaSDmitry Safonov 					key.ao_key, req, skb,
37979427c6aaSDmitry Safonov 					opts.hash_location - (u8 *)th, 0);
37989427c6aaSDmitry Safonov #endif
37999427c6aaSDmitry Safonov 	}
38009427c6aaSDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
380180f03e27SEric Dumazet 	rcu_read_unlock();
3802cfb6eeb4SYOSHIFUJI Hideaki #endif
3803cfb6eeb4SYOSHIFUJI Hideaki 
3804331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3805331fca43SMartin KaFai Lau 				synack_type, &opts);
3806331fca43SMartin KaFai Lau 
3807a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, now, true);
3808a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3809a842fe14SEric Dumazet 
38101da177e4SLinus Torvalds 	return skb;
38111da177e4SLinus Torvalds }
38124bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
38131da177e4SLinus Torvalds 
381481164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
381581164413SDaniel Borkmann {
381681164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
381781164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
381881164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
381981164413SDaniel Borkmann 
382081164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
382181164413SDaniel Borkmann 		return;
382281164413SDaniel Borkmann 
382381164413SDaniel Borkmann 	rcu_read_lock();
382481164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
38250baf26b0SMartin KaFai Lau 	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
38260baf26b0SMartin KaFai Lau 		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
382781164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
382881164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
382981164413SDaniel Borkmann 	}
383081164413SDaniel Borkmann 	rcu_read_unlock();
383181164413SDaniel Borkmann }
383281164413SDaniel Borkmann 
383367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3834f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
38351da177e4SLinus Torvalds {
3836cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
38371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
38381da177e4SLinus Torvalds 	__u8 rcv_wscale;
383913d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
38401da177e4SLinus Torvalds 
38411da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
38421da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
38431da177e4SLinus Torvalds 	 */
38445d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
38453666f666SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
38465d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
38471da177e4SLinus Torvalds 
38487c2ffaf2SDmitry Safonov 	tcp_ao_connect_init(sk);
38497c2ffaf2SDmitry Safonov 
38501da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
38511da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
38521da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
38531da177e4SLinus Torvalds 	tp->max_window = 0;
38545d424d5aSJohn Heffner 	tcp_mtup_init(sk);
38551da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
38561da177e4SLinus Torvalds 
385781164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
385881164413SDaniel Borkmann 
38591da177e4SLinus Torvalds 	if (!tp->window_clamp)
38601da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
38613541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3862f5fff5dcSTom Quetchenbach 
38631da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
38641da177e4SLinus Torvalds 
3865e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3866e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3867e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3868e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3869e88c64f0SHagen Paul Pfeifer 
387013d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
387113d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
387213d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
387313d3b1ebSLawrence Brakmo 
3874ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
38751da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
38761da177e4SLinus Torvalds 				  &tp->rcv_wnd,
38771da177e4SLinus Torvalds 				  &tp->window_clamp,
38783666f666SKuniyuki Iwashima 				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
387931d12926Slaurent chavey 				  &rcv_wscale,
388013d3b1ebSLawrence Brakmo 				  rcv_wnd);
38811da177e4SLinus Torvalds 
38821da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
38831da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
38841da177e4SLinus Torvalds 
3885e13ec3daSEric Dumazet 	WRITE_ONCE(sk->sk_err, 0);
38861da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
38871da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3888ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
38897f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
38901da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
38911da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
389233f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3893e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3894ee995283SPavel Emelyanov 
3895ee995283SPavel Emelyanov 	if (likely(!tp->repair))
38961da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3897c7781a6eSAndrew Vagin 	else
389870eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3899ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
39007db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
39011da177e4SLinus Torvalds 
39028550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3903463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
39041da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
39051da177e4SLinus Torvalds }
39061da177e4SLinus Torvalds 
3907783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3908783237e8SYuchung Cheng {
3909783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3910783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3911783237e8SYuchung Cheng 
3912783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3913f4a775d1SEric Dumazet 	__skb_header_release(skb);
3914ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
3915783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
39160f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3917783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3918783237e8SYuchung Cheng }
3919783237e8SYuchung Cheng 
3920783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3921783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3922783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3923783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3924783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3925783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3926783237e8SYuchung Cheng  */
3927783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3928783237e8SYuchung Cheng {
3929ed0c99dcSJakub Kicinski 	struct inet_connection_sock *icsk = inet_csk(sk);
3930783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3931783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3932fbf93406SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
3933355a901eSEric Dumazet 	struct sk_buff *syn_data;
3934fbf93406SEric Dumazet 	int space, err = 0;
3935783237e8SYuchung Cheng 
393667da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3937065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3938783237e8SYuchung Cheng 		goto fallback;
3939783237e8SYuchung Cheng 
3940783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3941783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3942783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3943783237e8SYuchung Cheng 	 */
39443541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3945ed0c99dcSJakub Kicinski 	/* Sync mss_cache after updating the mss_clamp */
3946ed0c99dcSJakub Kicinski 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
39473541f9e8SEric Dumazet 
3948ed0c99dcSJakub Kicinski 	space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
3949783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3950783237e8SYuchung Cheng 
3951f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3952f5ddcbbbSEric Dumazet 
3953fbf93406SEric Dumazet 	if (space &&
3954fbf93406SEric Dumazet 	    !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
3955fbf93406SEric Dumazet 				  pfrag, sk->sk_allocation))
3956fbf93406SEric Dumazet 		goto fallback;
39575882efffSEric Dumazet 	syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false);
3958355a901eSEric Dumazet 	if (!syn_data)
3959783237e8SYuchung Cheng 		goto fallback;
3960355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
396107e100f9SEric Dumazet 	if (space) {
3962fbf93406SEric Dumazet 		space = min_t(size_t, space, pfrag->size - pfrag->offset);
3963fbf93406SEric Dumazet 		space = tcp_wmem_schedule(sk, space);
3964fbf93406SEric Dumazet 	}
3965fbf93406SEric Dumazet 	if (space) {
3966fbf93406SEric Dumazet 		space = copy_page_from_iter(pfrag->page, pfrag->offset,
3967fbf93406SEric Dumazet 					    space, &fo->data->msg_iter);
3968fbf93406SEric Dumazet 		if (unlikely(!space)) {
3969ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3970355a901eSEric Dumazet 			kfree_skb(syn_data);
3971783237e8SYuchung Cheng 			goto fallback;
3972783237e8SYuchung Cheng 		}
3973fbf93406SEric Dumazet 		skb_fill_page_desc(syn_data, 0, pfrag->page,
3974fbf93406SEric Dumazet 				   pfrag->offset, space);
3975fbf93406SEric Dumazet 		page_ref_inc(pfrag->page);
3976fbf93406SEric Dumazet 		pfrag->offset += space;
3977fbf93406SEric Dumazet 		skb_len_add(syn_data, space);
3978f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
397907e100f9SEric Dumazet 	}
3980355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3981355a901eSEric Dumazet 	if (space == fo->size)
3982355a901eSEric Dumazet 		fo->data = NULL;
3983355a901eSEric Dumazet 	fo->copied = space;
3984783237e8SYuchung Cheng 
3985355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
39860f87230dSFrancis Yan 	if (syn_data->len)
39870f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3988355a901eSEric Dumazet 
3989355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3990355a901eSEric Dumazet 
3991a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
3992355a901eSEric Dumazet 
3993355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3994355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3995355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3996355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3997431a9124SEric Dumazet 	 */
3998355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3999355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
4000355a901eSEric Dumazet 	if (!err) {
400167da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
400275c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
4003f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
4004783237e8SYuchung Cheng 		goto done;
4005783237e8SYuchung Cheng 	}
4006783237e8SYuchung Cheng 
400775c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
400875c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
4009b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
4010b5b7db8dSEric Dumazet 
4011783237e8SYuchung Cheng fallback:
4012783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
4013783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
4014783237e8SYuchung Cheng 		fo->cookie.len = 0;
4015783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
4016783237e8SYuchung Cheng 	if (err)
4017783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
4018783237e8SYuchung Cheng done:
4019783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
4020783237e8SYuchung Cheng 	return err;
4021783237e8SYuchung Cheng }
4022783237e8SYuchung Cheng 
402367edfef7SAndi Kleen /* Build a SYN and send it off. */
40241da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
40251da177e4SLinus Torvalds {
40261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
40271da177e4SLinus Torvalds 	struct sk_buff *buff;
4028ee586811SEric Paris 	int err;
40291da177e4SLinus Torvalds 
4030de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
40318ba60924SEric Dumazet 
40320aadc739SDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
40330aadc739SDmitry Safonov 	/* Has to be checked late, after setting daddr/saddr/ops.
40340aadc739SDmitry Safonov 	 * Return error if the peer has both a md5 and a tcp-ao key
40350aadc739SDmitry Safonov 	 * configured as this is ambiguous.
40360aadc739SDmitry Safonov 	 */
40370aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->md5sig_info,
40380aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
40390aadc739SDmitry Safonov 		bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1);
40400aadc739SDmitry Safonov 		bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk);
40410aadc739SDmitry Safonov 		struct tcp_ao_info *ao_info;
40420aadc739SDmitry Safonov 
40430aadc739SDmitry Safonov 		ao_info = rcu_dereference_check(tp->ao_info,
40440aadc739SDmitry Safonov 						lockdep_sock_is_held(sk));
40450aadc739SDmitry Safonov 		if (ao_info) {
40460aadc739SDmitry Safonov 			/* This is an extra check: tcp_ao_required() in
40470aadc739SDmitry Safonov 			 * tcp_v{4,6}_parse_md5_keys() should prevent adding
40480aadc739SDmitry Safonov 			 * md5 keys on ao_required socket.
40490aadc739SDmitry Safonov 			 */
40500aadc739SDmitry Safonov 			needs_ao |= ao_info->ao_required;
40510aadc739SDmitry Safonov 			WARN_ON_ONCE(ao_info->ao_required && needs_md5);
40520aadc739SDmitry Safonov 		}
40530aadc739SDmitry Safonov 		if (needs_md5 && needs_ao)
40540aadc739SDmitry Safonov 			return -EKEYREJECTED;
40550aadc739SDmitry Safonov 
40560aadc739SDmitry Safonov 		/* If we have a matching md5 key and no matching tcp-ao key
40570aadc739SDmitry Safonov 		 * then free up ao_info if allocated.
40580aadc739SDmitry Safonov 		 */
40590aadc739SDmitry Safonov 		if (needs_md5) {
4060decde258SDmitry Safonov 			tcp_ao_destroy_sock(sk, false);
40610aadc739SDmitry Safonov 		} else if (needs_ao) {
40620aadc739SDmitry Safonov 			tcp_clear_md5_list(sk);
40630aadc739SDmitry Safonov 			kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
40640aadc739SDmitry Safonov 						  lockdep_sock_is_held(sk)));
40650aadc739SDmitry Safonov 		}
40660aadc739SDmitry Safonov 	}
40670aadc739SDmitry Safonov #endif
40680aadc739SDmitry Safonov #ifdef CONFIG_TCP_AO
40690aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->ao_info,
40700aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
40710aadc739SDmitry Safonov 		/* Don't allow connecting if ao is configured but no
40720aadc739SDmitry Safonov 		 * matching key is found.
40730aadc739SDmitry Safonov 		 */
40740aadc739SDmitry Safonov 		if (!tp->af_specific->ao_lookup(sk, sk, -1, -1))
40750aadc739SDmitry Safonov 			return -EKEYREJECTED;
40760aadc739SDmitry Safonov 	}
40770aadc739SDmitry Safonov #endif
40780aadc739SDmitry Safonov 
40798ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
40808ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
40818ba60924SEric Dumazet 
40821da177e4SLinus Torvalds 	tcp_connect_init(sk);
40831da177e4SLinus Torvalds 
40842b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
40852b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
40862b916477SAndrey Vagin 		return 0;
40872b916477SAndrey Vagin 	}
40882b916477SAndrey Vagin 
40895882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
4090355a901eSEric Dumazet 	if (unlikely(!buff))
40911da177e4SLinus Torvalds 		return -ENOBUFS;
40921da177e4SLinus Torvalds 
4093a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
40949a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
40959d0c00f5SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp_ts(tp);
4096783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
4097735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
409875c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
40991da177e4SLinus Torvalds 
4100783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
4101783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
4102783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
4103ee586811SEric Paris 	if (err == -ECONNREFUSED)
4104ee586811SEric Paris 		return err;
4105bd37a088SWei Yongjun 
4106bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
4107bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
4108bd37a088SWei Yongjun 	 */
4109e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4110bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
4111b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
4112b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
4113e0d694d6SEric Dumazet 		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
4114b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
4115b5b7db8dSEric Dumazet 	}
411681cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
41171da177e4SLinus Torvalds 
41181da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
41193f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
41203f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
41211da177e4SLinus Torvalds 	return 0;
41221da177e4SLinus Torvalds }
41234bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
41241da177e4SLinus Torvalds 
4125bbf80d71SEric Dumazet u32 tcp_delack_max(const struct sock *sk)
4126bbf80d71SEric Dumazet {
4127bbf80d71SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
4128bbf80d71SEric Dumazet 	u32 delack_max = inet_csk(sk)->icsk_delack_max;
4129bbf80d71SEric Dumazet 
4130bbf80d71SEric Dumazet 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
4131bbf80d71SEric Dumazet 		u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
4132bbf80d71SEric Dumazet 		u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
4133bbf80d71SEric Dumazet 
4134bbf80d71SEric Dumazet 		delack_max = min_t(u32, delack_max, delack_from_rto_min);
4135bbf80d71SEric Dumazet 	}
4136bbf80d71SEric Dumazet 	return delack_max;
4137bbf80d71SEric Dumazet }
4138bbf80d71SEric Dumazet 
41391da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
41401da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
41411da177e4SLinus Torvalds  * for details.
41421da177e4SLinus Torvalds  */
41431da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
41441da177e4SLinus Torvalds {
4145463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
4146463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
41471da177e4SLinus Torvalds 	unsigned long timeout;
41481da177e4SLinus Torvalds 
41491da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
4150463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
41511da177e4SLinus Torvalds 		int max_ato = HZ / 2;
41521da177e4SLinus Torvalds 
415331954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
4154056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
41551da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
41561da177e4SLinus Torvalds 
41571da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
41581da177e4SLinus Torvalds 
41591da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
4160463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
41611da177e4SLinus Torvalds 		 * directly.
41621da177e4SLinus Torvalds 		 */
4163740b0f18SEric Dumazet 		if (tp->srtt_us) {
4164740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
4165740b0f18SEric Dumazet 					TCP_DELACK_MIN);
41661da177e4SLinus Torvalds 
41671da177e4SLinus Torvalds 			if (rtt < max_ato)
41681da177e4SLinus Torvalds 				max_ato = rtt;
41691da177e4SLinus Torvalds 		}
41701da177e4SLinus Torvalds 
41711da177e4SLinus Torvalds 		ato = min(ato, max_ato);
41721da177e4SLinus Torvalds 	}
41731da177e4SLinus Torvalds 
4174bbf80d71SEric Dumazet 	ato = min_t(u32, ato, tcp_delack_max(sk));
41752b8ee4f0SMartin KaFai Lau 
41761da177e4SLinus Torvalds 	/* Stay within the limit we were given */
41771da177e4SLinus Torvalds 	timeout = jiffies + ato;
41781da177e4SLinus Torvalds 
41791da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
4180463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
4181b6b6d653SEric Dumazet 		/* If delack timer is about to expire, send ACK now. */
4182b6b6d653SEric Dumazet 		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
41831da177e4SLinus Torvalds 			tcp_send_ack(sk);
41841da177e4SLinus Torvalds 			return;
41851da177e4SLinus Torvalds 		}
41861da177e4SLinus Torvalds 
4187463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
4188463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
41891da177e4SLinus Torvalds 	}
4190463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
4191463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
4192463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
41931da177e4SLinus Torvalds }
41941da177e4SLinus Torvalds 
41951da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
41962987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
41971da177e4SLinus Torvalds {
41981da177e4SLinus Torvalds 	struct sk_buff *buff;
41991da177e4SLinus Torvalds 
4200058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
4201058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4202058dc334SIlpo Järvinen 		return;
4203058dc334SIlpo Järvinen 
42041da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
42051da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
42061da177e4SLinus Torvalds 	 * sock.
42071da177e4SLinus Torvalds 	 */
42087450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
42097450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
42107450aaf6SEric Dumazet 	if (unlikely(!buff)) {
4211a37c2134SEric Dumazet 		struct inet_connection_sock *icsk = inet_csk(sk);
4212a37c2134SEric Dumazet 		unsigned long delay;
4213a37c2134SEric Dumazet 
4214a37c2134SEric Dumazet 		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4215a37c2134SEric Dumazet 		if (delay < TCP_RTO_MAX)
4216a37c2134SEric Dumazet 			icsk->icsk_ack.retry++;
4217463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
4218a37c2134SEric Dumazet 		icsk->icsk_ack.ato = TCP_ATO_MIN;
4219a37c2134SEric Dumazet 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
42201da177e4SLinus Torvalds 		return;
42211da177e4SLinus Torvalds 	}
42221da177e4SLinus Torvalds 
42231da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
42241da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
4225a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
42261da177e4SLinus Torvalds 
422798781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
422898781965SEric Dumazet 	 * too much.
422998781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
423098781965SEric Dumazet 	 */
423198781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
423298781965SEric Dumazet 
42331da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
42342987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
42351da177e4SLinus Torvalds }
423627cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
42372987babbSYuchung Cheng 
42382987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
42392987babbSYuchung Cheng {
42402987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
42411da177e4SLinus Torvalds }
42421da177e4SLinus Torvalds 
42431da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
42441da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
42451da177e4SLinus Torvalds  *
42461da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
42471da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
42481da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
42491da177e4SLinus Torvalds  *
42501da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
42511da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
42521da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
42531da177e4SLinus Torvalds  */
4254e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
42551da177e4SLinus Torvalds {
42561da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
42571da177e4SLinus Torvalds 	struct sk_buff *skb;
42581da177e4SLinus Torvalds 
42591da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
42607450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
42617450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
426251456b29SIan Morris 	if (!skb)
42631da177e4SLinus Torvalds 		return -1;
42641da177e4SLinus Torvalds 
42651da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
42661da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
42671da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
42681da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
42691da177e4SLinus Torvalds 	 * send it.
42701da177e4SLinus Torvalds 	 */
4271a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4272e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
42737450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
42741da177e4SLinus Torvalds }
42751da177e4SLinus Torvalds 
4276385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
4277ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
4278ee995283SPavel Emelyanov {
4279ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
4280ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
42819a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
4282e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4283ee995283SPavel Emelyanov 	}
4284ee995283SPavel Emelyanov }
4285ee995283SPavel Emelyanov 
428667edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
4287e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
42881da177e4SLinus Torvalds {
42891da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
42901da177e4SLinus Torvalds 	struct sk_buff *skb;
42911da177e4SLinus Torvalds 
4292058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4293058dc334SIlpo Järvinen 		return -1;
4294058dc334SIlpo Järvinen 
429500db4124SIan Morris 	skb = tcp_send_head(sk);
429600db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
42971da177e4SLinus Torvalds 		int err;
42980c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
429990840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
43001da177e4SLinus Torvalds 
43011da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
43021da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
43031da177e4SLinus Torvalds 
43041da177e4SLinus Torvalds 		/* We are probing the opening of a window
43051da177e4SLinus Torvalds 		 * but the window size is != 0
43061da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
43071da177e4SLinus Torvalds 		 */
43081da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
43091da177e4SLinus Torvalds 		    skb->len > mss) {
43101da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
43114de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
431275c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
431375c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
43141da177e4SLinus Torvalds 				return -1;
43151da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
43165bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
43171da177e4SLinus Torvalds 
43184de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4319dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
432066f5fe62SIlpo Järvinen 		if (!err)
432166f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
43221da177e4SLinus Torvalds 		return err;
43231da177e4SLinus Torvalds 	} else {
432433f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4325e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
4326e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
43271da177e4SLinus Torvalds 	}
43281da177e4SLinus Torvalds }
43291da177e4SLinus Torvalds 
43301da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
43311da177e4SLinus Torvalds  * a partial packet else a zero probe.
43321da177e4SLinus Torvalds  */
43331da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
43341da177e4SLinus Torvalds {
4335463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
43361da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4337c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
4338c1d5674fSYuchung Cheng 	unsigned long timeout;
43391da177e4SLinus Torvalds 	int err;
43401da177e4SLinus Torvalds 
4341e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
43421da177e4SLinus Torvalds 
434375c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
43441da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
43456687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
4346463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
43479d9b1ee0SEnke Chen 		icsk->icsk_probes_tstamp = 0;
43481da177e4SLinus Torvalds 		return;
43491da177e4SLinus Torvalds 	}
43501da177e4SLinus Torvalds 
4351c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
43521da177e4SLinus Torvalds 	if (err <= 0) {
435339e24435SKuniyuki Iwashima 		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4354463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
4355c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
43561da177e4SLinus Torvalds 	} else {
43571da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
4358c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
43591da177e4SLinus Torvalds 		 */
4360c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
43611da177e4SLinus Torvalds 	}
4362344db93aSEnke Chen 
4363344db93aSEnke Chen 	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
43648dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
43651da177e4SLinus Torvalds }
43665db92c99SOctavian Purdila 
4367ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
43685db92c99SOctavian Purdila {
43695db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
43705db92c99SOctavian Purdila 	struct flowi fl;
43715db92c99SOctavian Purdila 	int res;
43725db92c99SOctavian Purdila 
4373cb6cd2ceSAkhmat Karakotov 	/* Paired with WRITE_ONCE() in sock_setsockopt() */
4374cb6cd2ceSAkhmat Karakotov 	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
43755e526552SEric Dumazet 		WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
4376331fca43SMartin KaFai Lau 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4377331fca43SMartin KaFai Lau 				  NULL);
43785db92c99SOctavian Purdila 	if (!res) {
43790a375c82SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
43800a375c82SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4381e9d9da91SEric Dumazet 		if (unlikely(tcp_passive_fastopen(sk))) {
4382e9d9da91SEric Dumazet 			/* sk has const attribute because listeners are lockless.
4383e9d9da91SEric Dumazet 			 * However in this case, we are dealing with a passive fastopen
4384e9d9da91SEric Dumazet 			 * socket thus we can change total_retrans value.
4385e9d9da91SEric Dumazet 			 */
4386e9d9da91SEric Dumazet 			tcp_sk_rw(sk)->total_retrans++;
4387e9d9da91SEric Dumazet 		}
4388cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
43895db92c99SOctavian Purdila 	}
43905db92c99SOctavian Purdila 	return res;
43915db92c99SOctavian Purdila }
43925db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
4393