xref: /linux/net/ipv4/tcp_output.c (revision 594ce0b8a998aa4d05827cd7c0d0dcec9a1e3ae2)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
41eda7acddSPeter Krystad #include <net/mptcp.h>
42f3d93817SEric Dumazet #include <net/proto_memory.h>
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds #include <linux/compiler.h>
455a0e3ad6STejun Heo #include <linux/gfp.h>
461da177e4SLinus Torvalds #include <linux/module.h>
4760e2a778SUrsula Braun #include <linux/static_key.h>
48f6d827b1SMina Almasry #include <linux/skbuff_ref.h>
491da177e4SLinus Torvalds 
50e086101bSCong Wang #include <trace/events/tcp.h>
5135089bb2SDavid S. Miller 
529799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
539799ccb0SEric Dumazet  * ensuring monotically increasing values.
549799ccb0SEric Dumazet  */
559799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
569799ccb0SEric Dumazet {
579799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
589799ccb0SEric Dumazet 
595f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
60e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
619799ccb0SEric Dumazet }
629799ccb0SEric Dumazet 
6346d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6446d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
65519855c5SWilliam Allen Simpson 
6667edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6775c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
686ff03ac3SIlpo Järvinen {
696ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
706ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7166f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
729e412ba7SIlpo Järvinen 
73e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
748512430eSIlpo Järvinen 
7575c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7675c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7775c119afSEric Dumazet 
7885369750SCambda Zhu 	if (tp->highest_sack == NULL)
7985369750SCambda Zhu 		tp->highest_sack = skb;
8085369750SCambda Zhu 
8166f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
82bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
83750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
84f19c29e3SYuchung Cheng 
85f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
86f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
874bfe744fSEric Dumazet 	tcp_check_space(sk);
886a5dc9e5SEric Dumazet }
891da177e4SLinus Torvalds 
90a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
91a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
921da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
931da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
941da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
951da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
961da177e4SLinus Torvalds  */
97cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
981da177e4SLinus Torvalds {
99cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1009e412ba7SIlpo Järvinen 
101a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
102a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
103a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1041da177e4SLinus Torvalds 		return tp->snd_nxt;
1051da177e4SLinus Torvalds 	else
10690840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1071da177e4SLinus Torvalds }
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1101da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1111da177e4SLinus Torvalds  *
1121da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1131da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1141da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1151da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1161da177e4SLinus Torvalds  *    large MSS.
1171da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1181da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1191da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1201da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1211da177e4SLinus Torvalds  *    probably even Jumbo".
1221da177e4SLinus Torvalds  */
1231da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1241da177e4SLinus Torvalds {
1251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
126cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1271da177e4SLinus Torvalds 	int mss = tp->advmss;
1281da177e4SLinus Torvalds 
1290dbaee3bSDavid S. Miller 	if (dst) {
1300dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1310dbaee3bSDavid S. Miller 
1320dbaee3bSDavid S. Miller 		if (metric < mss) {
1330dbaee3bSDavid S. Miller 			mss = metric;
1341da177e4SLinus Torvalds 			tp->advmss = mss;
1351da177e4SLinus Torvalds 		}
1360dbaee3bSDavid S. Miller 	}
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds 	return (__u16)mss;
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1426f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1436f021c62SEric Dumazet  */
1446f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1451da177e4SLinus Torvalds {
146463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1476f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
14840570375SEric Dumazet 	u32 cwnd = tcp_snd_cwnd(tp);
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1511da177e4SLinus Torvalds 
1526687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1531da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1541da177e4SLinus Torvalds 
155463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1561da177e4SLinus Torvalds 		cwnd >>= 1;
15740570375SEric Dumazet 	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
158c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1591da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds 
16267edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16340efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
164cf533ea5SEric Dumazet 				struct sock *sk)
1651da177e4SLinus Torvalds {
166463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
167d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1681da177e4SLinus Torvalds 
16905c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
17005c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
17105c5a46dSNeal Cardwell 
1724a41f453SWei Wang 	tp->lsndtime = now;
1734d8f24eeSWei Wang 
1744d8f24eeSWei Wang 	/* If it is a reply for ato after last received
175562b1fdfSHaiyang Zhang 	 * packet, increase pingpong count.
1764d8f24eeSWei Wang 	 */
1774d8f24eeSWei Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
178562b1fdfSHaiyang Zhang 		inet_csk_inc_pingpong_cnt(sk);
1791da177e4SLinus Torvalds }
1801da177e4SLinus Torvalds 
18167edfef7SAndi Kleen /* Account for an ACK we sent. */
182059217c1SNeal Cardwell static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
1831da177e4SLinus Torvalds {
1845d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1855d9f4262SEric Dumazet 
1862b195850SEric Dumazet 	if (unlikely(tp->compressed_ack)) {
187200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
1882b195850SEric Dumazet 			      tp->compressed_ack);
1892b195850SEric Dumazet 		tp->compressed_ack = 0;
1905d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1915d9f4262SEric Dumazet 			__sock_put(sk);
1925d9f4262SEric Dumazet 	}
19327cde44aSYuchung Cheng 
19427cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19527cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
196059217c1SNeal Cardwell 	tcp_dec_quickack_mode(sk);
197463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
2001da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2011da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2021da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2031da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2041da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2051da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2061da177e4SLinus Torvalds  */
207ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
208f410cbeaSEric Dumazet 			       __u32 *rcv_wnd, __u32 *__window_clamp,
20931d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21031d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2111da177e4SLinus Torvalds {
2121da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
213f410cbeaSEric Dumazet 	u32 window_clamp = READ_ONCE(*__window_clamp);
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
216f410cbeaSEric Dumazet 	if (window_clamp == 0)
217f410cbeaSEric Dumazet 		window_clamp = (U16_MAX << TCP_MAX_WSCALE);
218f410cbeaSEric Dumazet 	space = min(window_clamp, space);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2211da177e4SLinus Torvalds 	if (space > mss)
222589c49cbSGao Feng 		space = rounddown(space, mss);
2231da177e4SLinus Torvalds 
2241da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22515d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22615d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22715d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22815d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22915d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23015d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2311da177e4SLinus Torvalds 	 */
2320f1e4d06SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
2331da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23415d99e02SRick Jones 	else
235*378979e9SJason Xing 		(*rcv_wnd) = space;
236a337531bSYuchung Cheng 
237a337531bSYuchung Cheng 	if (init_rcv_wnd)
238a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23915d99e02SRick Jones 
24019bf6261SEric Dumazet 	*rcv_wscale = 0;
2411da177e4SLinus Torvalds 	if (wscale_ok) {
242589c49cbSGao Feng 		/* Set window scaling on max possible window */
24302739545SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
2441227c177SKuniyuki Iwashima 		space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
245f410cbeaSEric Dumazet 		space = min_t(u32, space, window_clamp);
24619bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24719bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2481da177e4SLinus Torvalds 	}
2491da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
250f410cbeaSEric Dumazet 	WRITE_ONCE(*__window_clamp,
251f410cbeaSEric Dumazet 		   min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
2521da177e4SLinus Torvalds }
2534bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2561da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2571da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2581da177e4SLinus Torvalds  * frame.
2591da177e4SLinus Torvalds  */
26040efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2611da177e4SLinus Torvalds {
2621da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
263b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
264e2142825SMenglong Dong 	u32 old_win = tp->rcv_wnd;
265e2142825SMenglong Dong 	u32 cur_win, new_win;
2661da177e4SLinus Torvalds 
267e2142825SMenglong Dong 	/* Make the window 0 if we failed to queue the data because we
268e2142825SMenglong Dong 	 * are out of memory. The window is temporary, so we don't store
269e2142825SMenglong Dong 	 * it on the socket.
270e2142825SMenglong Dong 	 */
271e2142825SMenglong Dong 	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
272e2142825SMenglong Dong 		return 0;
273e2142825SMenglong Dong 
274e2142825SMenglong Dong 	cur_win = tcp_receive_window(tp);
275e2142825SMenglong Dong 	new_win = __tcp_select_window(sk);
2761da177e4SLinus Torvalds 	if (new_win < cur_win) {
2771da177e4SLinus Torvalds 		/* Danger Will Robinson!
2781da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2791da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2801da177e4SLinus Torvalds 		 * window in time.  --DaveM
2811da177e4SLinus Torvalds 		 *
2821da177e4SLinus Torvalds 		 * Relax Will Robinson.
2831da177e4SLinus Torvalds 		 */
284b650d953Smfreemon@cloudflare.com 		if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
285b650d953Smfreemon@cloudflare.com 			/* Never shrink the offered window */
2868e165e20SFlorian Westphal 			if (new_win == 0)
287b650d953Smfreemon@cloudflare.com 				NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
288607bfbf2SPatrick McHardy 			new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2891da177e4SLinus Torvalds 		}
290b650d953Smfreemon@cloudflare.com 	}
291b650d953Smfreemon@cloudflare.com 
2921da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2931da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2961da177e4SLinus Torvalds 	 * scaled window.
2971da177e4SLinus Torvalds 	 */
298ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
299b650d953Smfreemon@cloudflare.com 	    READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
3001da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
3011da177e4SLinus Torvalds 	else
3021da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3051da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3061da177e4SLinus Torvalds 
30731770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3088e165e20SFlorian Westphal 	if (new_win == 0) {
30931770e34SFlorian Westphal 		tp->pred_flags = 0;
3108e165e20SFlorian Westphal 		if (old_win)
311b650d953Smfreemon@cloudflare.com 			NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
3128e165e20SFlorian Westphal 	} else if (old_win == 0) {
313b650d953Smfreemon@cloudflare.com 		NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
3148e165e20SFlorian Westphal 	}
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds 	return new_win;
3171da177e4SLinus Torvalds }
3181da177e4SLinus Torvalds 
31967edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
320735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
321bdf1ee5dSIlpo Järvinen {
32230e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
32330e502a3SDaniel Borkmann 
3244de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
325bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3264de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32791b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
32891b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
32930e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
330bdf1ee5dSIlpo Järvinen }
331bdf1ee5dSIlpo Järvinen 
33267edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
333735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
334bdf1ee5dSIlpo Järvinen {
335bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
33691b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
3374785a667SKuniyuki Iwashima 	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
33891b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
339f7b3bec6SFlorian Westphal 
340f7b3bec6SFlorian Westphal 	if (!use_ecn) {
341f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
342f7b3bec6SFlorian Westphal 
343f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
344f7b3bec6SFlorian Westphal 			use_ecn = true;
345f7b3bec6SFlorian Westphal 	}
346bdf1ee5dSIlpo Järvinen 
347bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
348f7b3bec6SFlorian Westphal 
349f7b3bec6SFlorian Westphal 	if (use_ecn) {
3504de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
351bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
35291b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
35330e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
354bdf1ee5dSIlpo Järvinen 	}
355bdf1ee5dSIlpo Järvinen }
356bdf1ee5dSIlpo Järvinen 
35749213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
35849213555SDaniel Borkmann {
35912b8d9caSKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
36049213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
36149213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
36249213555SDaniel Borkmann 		 */
36349213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
36449213555SDaniel Borkmann }
36549213555SDaniel Borkmann 
366735d3831SFlorian Westphal static void
3676ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
368bdf1ee5dSIlpo Järvinen {
3696ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
370bdf1ee5dSIlpo Järvinen 		th->ece = 1;
371bdf1ee5dSIlpo Järvinen }
372bdf1ee5dSIlpo Järvinen 
37367edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
37467edfef7SAndi Kleen  * be sent.
37567edfef7SAndi Kleen  */
376735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
377ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
378bdf1ee5dSIlpo Järvinen {
379bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
380bdf1ee5dSIlpo Järvinen 
381bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
382bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
383bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
384bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
385bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
386bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
387bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
388ea1627c2SEric Dumazet 				th->cwr = 1;
389bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
390bdf1ee5dSIlpo Järvinen 			}
39130e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
392bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
393bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
394bdf1ee5dSIlpo Järvinen 		}
395bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
396ea1627c2SEric Dumazet 			th->ece = 1;
397bdf1ee5dSIlpo Järvinen 	}
398bdf1ee5dSIlpo Järvinen }
399bdf1ee5dSIlpo Järvinen 
400e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
401e870a8efSIlpo Järvinen  * auto increment end seqno.
402e870a8efSIlpo Järvinen  */
403e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
404e870a8efSIlpo Järvinen {
4052e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
406e870a8efSIlpo Järvinen 
4074de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
408e870a8efSIlpo Järvinen 
409cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
410e870a8efSIlpo Järvinen 
411e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
412a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
413e870a8efSIlpo Järvinen 		seq++;
414e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
415e870a8efSIlpo Järvinen }
416e870a8efSIlpo Järvinen 
417a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41833f5f57eSIlpo Järvinen {
41933f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
42033f5f57eSIlpo Järvinen }
42133f5f57eSIlpo Järvinen 
4223b65abb8SLeonard Crestez #define OPTION_SACK_ADVERTISE	BIT(0)
4233b65abb8SLeonard Crestez #define OPTION_TS		BIT(1)
4243b65abb8SLeonard Crestez #define OPTION_MD5		BIT(2)
4253b65abb8SLeonard Crestez #define OPTION_WSCALE		BIT(3)
4263b65abb8SLeonard Crestez #define OPTION_FAST_OPEN_COOKIE	BIT(8)
4273b65abb8SLeonard Crestez #define OPTION_SMC		BIT(9)
4283b65abb8SLeonard Crestez #define OPTION_MPTCP		BIT(10)
4291e03d32bSDmitry Safonov #define OPTION_AO		BIT(11)
43060e2a778SUrsula Braun 
43160e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
43260e2a778SUrsula Braun {
43360e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
43460e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
43560e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
43660e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
43760e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
43860e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
43960e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
44060e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
44160e2a778SUrsula Braun 		}
44260e2a778SUrsula Braun 	}
44360e2a778SUrsula Braun #endif
44460e2a778SUrsula Braun }
44533ad798cSAdam Langley 
44633ad798cSAdam Langley struct tcp_out_options {
4472100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4482100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
44933ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
45033ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
451bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
452331fca43SMartin KaFai Lau 	u8 bpf_opt_len;		/* length of BPF hdr option */
453bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4542100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4552100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
456eda7acddSPeter Krystad 	struct mptcp_out_options mptcp;
45733ad798cSAdam Langley };
45833ad798cSAdam Langley 
459ea66758cSPaolo Abeni static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
460ea66758cSPaolo Abeni 				struct tcp_sock *tp,
461fa3fe2b1SFlorian Westphal 				struct tcp_out_options *opts)
462eda7acddSPeter Krystad {
463eda7acddSPeter Krystad #if IS_ENABLED(CONFIG_MPTCP)
464eda7acddSPeter Krystad 	if (unlikely(OPTION_MPTCP & opts->options))
465ea66758cSPaolo Abeni 		mptcp_write_options(th, ptr, tp, &opts->mptcp);
466eda7acddSPeter Krystad #endif
467eda7acddSPeter Krystad }
468eda7acddSPeter Krystad 
469331fca43SMartin KaFai Lau #ifdef CONFIG_CGROUP_BPF
4700813a841SMartin KaFai Lau static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
4710813a841SMartin KaFai Lau 					enum tcp_synack_type synack_type)
4720813a841SMartin KaFai Lau {
4730813a841SMartin KaFai Lau 	if (unlikely(!skb))
4740813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
4750813a841SMartin KaFai Lau 
4760813a841SMartin KaFai Lau 	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
4770813a841SMartin KaFai Lau 		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
4780813a841SMartin KaFai Lau 
4790813a841SMartin KaFai Lau 	return 0;
4800813a841SMartin KaFai Lau }
4810813a841SMartin KaFai Lau 
482331fca43SMartin KaFai Lau /* req, syn_skb and synack_type are used when writing synack */
483331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
484331fca43SMartin KaFai Lau 				  struct request_sock *req,
485331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
486331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
487331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
488331fca43SMartin KaFai Lau 				  unsigned int *remaining)
489331fca43SMartin KaFai Lau {
4900813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
4910813a841SMartin KaFai Lau 	int err;
4920813a841SMartin KaFai Lau 
493331fca43SMartin KaFai Lau 	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
494331fca43SMartin KaFai Lau 					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
495331fca43SMartin KaFai Lau 	    !*remaining)
496331fca43SMartin KaFai Lau 		return;
497331fca43SMartin KaFai Lau 
4980813a841SMartin KaFai Lau 	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
4990813a841SMartin KaFai Lau 
5000813a841SMartin KaFai Lau 	/* init sock_ops */
5010813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
5020813a841SMartin KaFai Lau 
5030813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
5040813a841SMartin KaFai Lau 
5050813a841SMartin KaFai Lau 	if (req) {
5060813a841SMartin KaFai Lau 		/* The listen "sk" cannot be passed here because
5070813a841SMartin KaFai Lau 		 * it is not locked.  It would not make too much
5080813a841SMartin KaFai Lau 		 * sense to do bpf_setsockopt(listen_sk) based
5090813a841SMartin KaFai Lau 		 * on individual connection request also.
5100813a841SMartin KaFai Lau 		 *
5110813a841SMartin KaFai Lau 		 * Thus, "req" is passed here and the cgroup-bpf-progs
5120813a841SMartin KaFai Lau 		 * of the listen "sk" will be run.
5130813a841SMartin KaFai Lau 		 *
5140813a841SMartin KaFai Lau 		 * "req" is also used here for fastopen even the "sk" here is
5150813a841SMartin KaFai Lau 		 * a fullsock "child" sk.  It is to keep the behavior
5160813a841SMartin KaFai Lau 		 * consistent between fastopen and non-fastopen on
5170813a841SMartin KaFai Lau 		 * the bpf programming side.
518331fca43SMartin KaFai Lau 		 */
5190813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5200813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5210813a841SMartin KaFai Lau 	} else {
5220813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5230813a841SMartin KaFai Lau 
5240813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5250813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5260813a841SMartin KaFai Lau 	}
5270813a841SMartin KaFai Lau 
5280813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5290813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = *remaining;
5300813a841SMartin KaFai Lau 	/* tcp_current_mss() does not pass a skb */
5310813a841SMartin KaFai Lau 	if (skb)
5320813a841SMartin KaFai Lau 		bpf_skops_init_skb(&sock_ops, skb, 0);
5330813a841SMartin KaFai Lau 
5340813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5350813a841SMartin KaFai Lau 
5360813a841SMartin KaFai Lau 	if (err || sock_ops.remaining_opt_len == *remaining)
5370813a841SMartin KaFai Lau 		return;
5380813a841SMartin KaFai Lau 
5390813a841SMartin KaFai Lau 	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
5400813a841SMartin KaFai Lau 	/* round up to 4 bytes */
5410813a841SMartin KaFai Lau 	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
5420813a841SMartin KaFai Lau 
5430813a841SMartin KaFai Lau 	*remaining -= opts->bpf_opt_len;
544331fca43SMartin KaFai Lau }
545331fca43SMartin KaFai Lau 
546331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
547331fca43SMartin KaFai Lau 				    struct request_sock *req,
548331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
549331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
550331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
551331fca43SMartin KaFai Lau {
5520813a841SMartin KaFai Lau 	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
5530813a841SMartin KaFai Lau 	struct bpf_sock_ops_kern sock_ops;
5540813a841SMartin KaFai Lau 	int err;
5550813a841SMartin KaFai Lau 
5560813a841SMartin KaFai Lau 	if (likely(!max_opt_len))
557331fca43SMartin KaFai Lau 		return;
558331fca43SMartin KaFai Lau 
5590813a841SMartin KaFai Lau 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
5600813a841SMartin KaFai Lau 
5610813a841SMartin KaFai Lau 	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
5620813a841SMartin KaFai Lau 
5630813a841SMartin KaFai Lau 	if (req) {
5640813a841SMartin KaFai Lau 		sock_ops.sk = (struct sock *)req;
5650813a841SMartin KaFai Lau 		sock_ops.syn_skb = syn_skb;
5660813a841SMartin KaFai Lau 	} else {
5670813a841SMartin KaFai Lau 		sock_owned_by_me(sk);
5680813a841SMartin KaFai Lau 
5690813a841SMartin KaFai Lau 		sock_ops.is_fullsock = 1;
5700813a841SMartin KaFai Lau 		sock_ops.sk = sk;
5710813a841SMartin KaFai Lau 	}
5720813a841SMartin KaFai Lau 
5730813a841SMartin KaFai Lau 	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
5740813a841SMartin KaFai Lau 	sock_ops.remaining_opt_len = max_opt_len;
5750813a841SMartin KaFai Lau 	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
5760813a841SMartin KaFai Lau 	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
5770813a841SMartin KaFai Lau 
5780813a841SMartin KaFai Lau 	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
5790813a841SMartin KaFai Lau 
5800813a841SMartin KaFai Lau 	if (err)
5810813a841SMartin KaFai Lau 		nr_written = 0;
5820813a841SMartin KaFai Lau 	else
5830813a841SMartin KaFai Lau 		nr_written = max_opt_len - sock_ops.remaining_opt_len;
5840813a841SMartin KaFai Lau 
5850813a841SMartin KaFai Lau 	if (nr_written < max_opt_len)
5860813a841SMartin KaFai Lau 		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
5870813a841SMartin KaFai Lau 		       max_opt_len - nr_written);
588331fca43SMartin KaFai Lau }
589331fca43SMartin KaFai Lau #else
590331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
591331fca43SMartin KaFai Lau 				  struct request_sock *req,
592331fca43SMartin KaFai Lau 				  struct sk_buff *syn_skb,
593331fca43SMartin KaFai Lau 				  enum tcp_synack_type synack_type,
594331fca43SMartin KaFai Lau 				  struct tcp_out_options *opts,
595331fca43SMartin KaFai Lau 				  unsigned int *remaining)
596331fca43SMartin KaFai Lau {
597331fca43SMartin KaFai Lau }
598331fca43SMartin KaFai Lau 
599331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
600331fca43SMartin KaFai Lau 				    struct request_sock *req,
601331fca43SMartin KaFai Lau 				    struct sk_buff *syn_skb,
602331fca43SMartin KaFai Lau 				    enum tcp_synack_type synack_type,
603331fca43SMartin KaFai Lau 				    struct tcp_out_options *opts)
604331fca43SMartin KaFai Lau {
605331fca43SMartin KaFai Lau }
606331fca43SMartin KaFai Lau #endif
607331fca43SMartin KaFai Lau 
6087425627bSNathan Chancellor static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
6097425627bSNathan Chancellor 				      const struct tcp_request_sock *tcprsk,
6107425627bSNathan Chancellor 				      struct tcp_out_options *opts,
6117425627bSNathan Chancellor 				      struct tcp_key *key, __be32 *ptr)
6127425627bSNathan Chancellor {
6137425627bSNathan Chancellor #ifdef CONFIG_TCP_AO
6147425627bSNathan Chancellor 	u8 maclen = tcp_ao_maclen(key->ao_key);
6157425627bSNathan Chancellor 
6167425627bSNathan Chancellor 	if (tcprsk) {
6177425627bSNathan Chancellor 		u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
6187425627bSNathan Chancellor 
6197425627bSNathan Chancellor 		*ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
6207425627bSNathan Chancellor 			       (tcprsk->ao_keyid << 8) |
6217425627bSNathan Chancellor 			       (tcprsk->ao_rcv_next));
6227425627bSNathan Chancellor 	} else {
6237425627bSNathan Chancellor 		struct tcp_ao_key *rnext_key;
6247425627bSNathan Chancellor 		struct tcp_ao_info *ao_info;
6257425627bSNathan Chancellor 
6267425627bSNathan Chancellor 		ao_info = rcu_dereference_check(tp->ao_info,
6277425627bSNathan Chancellor 			lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
6287425627bSNathan Chancellor 		rnext_key = READ_ONCE(ao_info->rnext_key);
6297425627bSNathan Chancellor 		if (WARN_ON_ONCE(!rnext_key))
6307425627bSNathan Chancellor 			return ptr;
6317425627bSNathan Chancellor 		*ptr++ = htonl((TCPOPT_AO << 24) |
6327425627bSNathan Chancellor 			       (tcp_ao_len(key->ao_key) << 16) |
6337425627bSNathan Chancellor 			       (key->ao_key->sndid << 8) |
6347425627bSNathan Chancellor 			       (rnext_key->rcvid));
6357425627bSNathan Chancellor 	}
6367425627bSNathan Chancellor 	opts->hash_location = (__u8 *)ptr;
6377425627bSNathan Chancellor 	ptr += maclen / sizeof(*ptr);
6387425627bSNathan Chancellor 	if (unlikely(maclen % sizeof(*ptr))) {
6397425627bSNathan Chancellor 		memset(ptr, TCPOPT_NOP, sizeof(*ptr));
6407425627bSNathan Chancellor 		ptr++;
6417425627bSNathan Chancellor 	}
6427425627bSNathan Chancellor #endif
6437425627bSNathan Chancellor 	return ptr;
6447425627bSNathan Chancellor }
6457425627bSNathan Chancellor 
64667edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
64767edfef7SAndi Kleen  *
64867edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
649fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
650fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
6518e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
652fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
653fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
654fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
655fd6149d3SIlpo Järvinen  *
656fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
657fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
658fd6149d3SIlpo Järvinen  */
659ea66758cSPaolo Abeni static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
66006b22ef2SDmitry Safonov 			      const struct tcp_request_sock *tcprsk,
6611e03d32bSDmitry Safonov 			      struct tcp_out_options *opts,
6621e03d32bSDmitry Safonov 			      struct tcp_key *key)
663bd0388aeSWilliam Allen Simpson {
664ea66758cSPaolo Abeni 	__be32 *ptr = (__be32 *)(th + 1);
6652100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
666bd0388aeSWilliam Allen Simpson 
6671e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
6681a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
6691a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
670bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
671bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
67233ad798cSAdam Langley 		ptr += 4;
6731e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
6747425627bSNathan Chancellor 		ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
6751e03d32bSDmitry Safonov 	}
676fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
677fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
678fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
679fd6149d3SIlpo Järvinen 			       opts->mss);
680fd6149d3SIlpo Järvinen 	}
681fd6149d3SIlpo Järvinen 
682bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
683bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
68433ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
68533ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
68633ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
68733ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
688bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
68933ad798cSAdam Langley 		} else {
690496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
69140efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
69240efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
69340efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
69440efc6faSStephen Hemminger 		}
69533ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
69633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
69733ad798cSAdam Langley 	}
69833ad798cSAdam Langley 
699bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
70033ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
70133ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
70233ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
70333ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
70433ad798cSAdam Langley 	}
70533ad798cSAdam Langley 
706bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
70733ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
70833ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
70933ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
71033ad798cSAdam Langley 			       opts->ws);
71133ad798cSAdam Langley 	}
71233ad798cSAdam Langley 
71333ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
71433ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
71533ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
71640efc6faSStephen Hemminger 		int this_sack;
71740efc6faSStephen Hemminger 
71840efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
71940efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
72040efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
72133ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
72240efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
7232de979bdSStephen Hemminger 
72433ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
72533ad798cSAdam Langley 		     ++this_sack) {
72640efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
72740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
72840efc6faSStephen Hemminger 		}
7292de979bdSStephen Hemminger 
73040efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
73140efc6faSStephen Hemminger 	}
7322100c8d2SYuchung Cheng 
7332100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
7342100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
7357f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
7367f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
7372100c8d2SYuchung Cheng 
7387f9b838bSDaniel Lee 		if (foc->exp) {
7397f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
7407f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
7412100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
7427f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
7437f9b838bSDaniel Lee 		} else {
7447f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
7457f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
7467f9b838bSDaniel Lee 			*p++ = len;
7472100c8d2SYuchung Cheng 		}
7487f9b838bSDaniel Lee 
7497f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
7507f9b838bSDaniel Lee 		if ((len & 3) == 2) {
7517f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
7527f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
7537f9b838bSDaniel Lee 		}
7547f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
7552100c8d2SYuchung Cheng 	}
75660e2a778SUrsula Braun 
75760e2a778SUrsula Braun 	smc_options_write(ptr, &options);
758eda7acddSPeter Krystad 
759ea66758cSPaolo Abeni 	mptcp_options_write(th, ptr, tp, opts);
76060e2a778SUrsula Braun }
76160e2a778SUrsula Braun 
76260e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
76360e2a778SUrsula Braun 			   struct tcp_out_options *opts,
76460e2a778SUrsula Braun 			   unsigned int *remaining)
76560e2a778SUrsula Braun {
76660e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
76760e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
76860e2a778SUrsula Braun 		if (tp->syn_smc) {
76960e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
77060e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
77160e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
77260e2a778SUrsula Braun 			}
77360e2a778SUrsula Braun 		}
77460e2a778SUrsula Braun 	}
77560e2a778SUrsula Braun #endif
77660e2a778SUrsula Braun }
77760e2a778SUrsula Braun 
77860e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
77960e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
78060e2a778SUrsula Braun 				struct tcp_out_options *opts,
78160e2a778SUrsula Braun 				unsigned int *remaining)
78260e2a778SUrsula Braun {
78360e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
78460e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
78560e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
78660e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
78760e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
78860e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
78960e2a778SUrsula Braun 			}
79060e2a778SUrsula Braun 		}
79160e2a778SUrsula Braun 	}
79260e2a778SUrsula Braun #endif
79340efc6faSStephen Hemminger }
79440efc6faSStephen Hemminger 
795cec37a6eSPeter Krystad static void mptcp_set_option_cond(const struct request_sock *req,
796cec37a6eSPeter Krystad 				  struct tcp_out_options *opts,
797cec37a6eSPeter Krystad 				  unsigned int *remaining)
798cec37a6eSPeter Krystad {
799cec37a6eSPeter Krystad 	if (rsk_is_mptcp(req)) {
800cec37a6eSPeter Krystad 		unsigned int size;
801cec37a6eSPeter Krystad 
802cec37a6eSPeter Krystad 		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
803cec37a6eSPeter Krystad 			if (*remaining >= size) {
804cec37a6eSPeter Krystad 				opts->options |= OPTION_MPTCP;
805cec37a6eSPeter Krystad 				*remaining -= size;
806cec37a6eSPeter Krystad 			}
807cec37a6eSPeter Krystad 		}
808cec37a6eSPeter Krystad 	}
809cec37a6eSPeter Krystad }
810cec37a6eSPeter Krystad 
81167edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
81267edfef7SAndi Kleen  * network wire format yet.
81367edfef7SAndi Kleen  */
81495c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
81533ad798cSAdam Langley 				struct tcp_out_options *opts,
8161e03d32bSDmitry Safonov 				struct tcp_key *key)
817cf533ea5SEric Dumazet {
81833ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
81995c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
820783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
8211e03d32bSDmitry Safonov 	bool timestamps;
82233ad798cSAdam Langley 
8231e03d32bSDmitry Safonov 	/* Better than switch (key.type) as it has static branches */
8241e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
8251e03d32bSDmitry Safonov 		timestamps = false;
82633ad798cSAdam Langley 		opts->options |= OPTION_MD5;
827bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
8281e03d32bSDmitry Safonov 	} else {
8291e03d32bSDmitry Safonov 		timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
8301e03d32bSDmitry Safonov 		if (tcp_key_is_ao(key)) {
8311e03d32bSDmitry Safonov 			opts->options |= OPTION_AO;
832da7dfaa6SDmitry Safonov 			remaining -= tcp_ao_len_aligned(key->ao_key);
833cfb6eeb4SYOSHIFUJI Hideaki 		}
8348c2320e8SEric Dumazet 	}
83533ad798cSAdam Langley 
83633ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
83733ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
83833ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
83933ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
84033ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
84133ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
84233ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
84333ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
84433ad798cSAdam Langley 	 * going out.  */
84533ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
846bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
84733ad798cSAdam Langley 
8481e03d32bSDmitry Safonov 	if (likely(timestamps)) {
84933ad798cSAdam Langley 		opts->options |= OPTION_TS;
850614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
85133ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
852bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
85333ad798cSAdam Langley 	}
8543666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
85533ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
85689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
857bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
85833ad798cSAdam Langley 	}
8593666f666SKuniyuki Iwashima 	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
86033ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
861b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
862bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
86333ad798cSAdam Langley 	}
86433ad798cSAdam Langley 
865783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
8662646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
8672646c831SDaniel Lee 
8682646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
8692646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
870783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
871783237e8SYuchung Cheng 		if (remaining >= need) {
872783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
873783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
874783237e8SYuchung Cheng 			remaining -= need;
875783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
8762646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
877783237e8SYuchung Cheng 		}
878783237e8SYuchung Cheng 	}
879bd0388aeSWilliam Allen Simpson 
88060e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
88160e2a778SUrsula Braun 
882cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
883cec37a6eSPeter Krystad 		unsigned int size;
884cec37a6eSPeter Krystad 
885cc7972eaSChristoph Paasch 		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
886cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
887cec37a6eSPeter Krystad 			remaining -= size;
888cec37a6eSPeter Krystad 		}
889cec37a6eSPeter Krystad 	}
890cec37a6eSPeter Krystad 
891331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
892331fca43SMartin KaFai Lau 
893bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
89433ad798cSAdam Langley }
89533ad798cSAdam Langley 
89667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
89760e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
89860e2a778SUrsula Braun 				       struct request_sock *req,
89995c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
90033ad798cSAdam Langley 				       struct tcp_out_options *opts,
9019427c6aaSDmitry Safonov 				       const struct tcp_key *key,
902e114e1e8SEric Dumazet 				       struct tcp_fastopen_cookie *foc,
903331fca43SMartin KaFai Lau 				       enum tcp_synack_type synack_type,
904331fca43SMartin KaFai Lau 				       struct sk_buff *syn_skb)
9054957faadSWilliam Allen Simpson {
90633ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
90795c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
90833ad798cSAdam Langley 
9099427c6aaSDmitry Safonov 	if (tcp_key_is_md5(key)) {
91033ad798cSAdam Langley 		opts->options |= OPTION_MD5;
9114957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
9124957faadSWilliam Allen Simpson 
9134957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
9144957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
9154957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
9164957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
9174957faadSWilliam Allen Simpson 		 */
918e114e1e8SEric Dumazet 		if (synack_type != TCP_SYNACK_COOKIE)
919de213e5eSEric Dumazet 			ireq->tstamp_ok &= !ireq->sack_ok;
9209427c6aaSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
9219427c6aaSDmitry Safonov 		opts->options |= OPTION_AO;
922da7dfaa6SDmitry Safonov 		remaining -= tcp_ao_len_aligned(key->ao_key);
9239427c6aaSDmitry Safonov 		ireq->tstamp_ok &= !ireq->sack_ok;
92433ad798cSAdam Langley 	}
92533ad798cSAdam Langley 
9264957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
92733ad798cSAdam Langley 	opts->mss = mss;
9284957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
92933ad798cSAdam Langley 
93033ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
93133ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
93289e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
9334957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
93433ad798cSAdam Langley 	}
935de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
93633ad798cSAdam Langley 		opts->options |= OPTION_TS;
937614e8316SEric Dumazet 		opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
938614e8316SEric Dumazet 			      tcp_rsk(req)->ts_off;
939eba20811SEric Dumazet 		opts->tsecr = READ_ONCE(req->ts_recent);
9404957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
94133ad798cSAdam Langley 	}
94233ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
94333ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
944de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
9454957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
94633ad798cSAdam Langley 	}
9477f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
9487f9b838bSDaniel Lee 		u32 need = foc->len;
9497f9b838bSDaniel Lee 
9507f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
9517f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
9528336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
9538336886fSJerry Chu 		if (remaining >= need) {
9548336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
9558336886fSJerry Chu 			opts->fastopen_cookie = foc;
9568336886fSJerry Chu 			remaining -= need;
9578336886fSJerry Chu 		}
9588336886fSJerry Chu 	}
9594957faadSWilliam Allen Simpson 
960cec37a6eSPeter Krystad 	mptcp_set_option_cond(req, opts, &remaining);
961cec37a6eSPeter Krystad 
96260e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
96360e2a778SUrsula Braun 
964331fca43SMartin KaFai Lau 	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
965331fca43SMartin KaFai Lau 			      synack_type, opts, &remaining);
966331fca43SMartin KaFai Lau 
9674957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
96833ad798cSAdam Langley }
96933ad798cSAdam Langley 
97067edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
97167edfef7SAndi Kleen  * final wire format yet.
97267edfef7SAndi Kleen  */
97395c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
97433ad798cSAdam Langley 					struct tcp_out_options *opts,
9751e03d32bSDmitry Safonov 					struct tcp_key *key)
976cf533ea5SEric Dumazet {
97733ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
97895c96174SEric Dumazet 	unsigned int size = 0;
979cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
98033ad798cSAdam Langley 
9815843ef42SAndi Kleen 	opts->options = 0;
9825843ef42SAndi Kleen 
9831e03d32bSDmitry Safonov 	/* Better than switch (key.type) as it has static branches */
9841e03d32bSDmitry Safonov 	if (tcp_key_is_md5(key)) {
98533ad798cSAdam Langley 		opts->options |= OPTION_MD5;
98633ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
9871e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(key)) {
9881e03d32bSDmitry Safonov 		opts->options |= OPTION_AO;
989da7dfaa6SDmitry Safonov 		size += tcp_ao_len_aligned(key->ao_key);
99033ad798cSAdam Langley 	}
99133ad798cSAdam Langley 
99233ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
99333ad798cSAdam Langley 		opts->options |= OPTION_TS;
994614e8316SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
995614e8316SEric Dumazet 				tp->tsoffset : 0;
99633ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
99733ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
99833ad798cSAdam Langley 	}
99933ad798cSAdam Langley 
1000cec37a6eSPeter Krystad 	/* MPTCP options have precedence over SACK for the limited TCP
1001cec37a6eSPeter Krystad 	 * option space because a MPTCP connection would be forced to
1002cec37a6eSPeter Krystad 	 * fall back to regular TCP if a required multipath option is
1003cec37a6eSPeter Krystad 	 * missing. SACK still gets a chance to use whatever space is
1004cec37a6eSPeter Krystad 	 * left.
1005cec37a6eSPeter Krystad 	 */
1006cec37a6eSPeter Krystad 	if (sk_is_mptcp(sk)) {
1007cec37a6eSPeter Krystad 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1008cec37a6eSPeter Krystad 		unsigned int opt_size = 0;
1009cec37a6eSPeter Krystad 
1010cec37a6eSPeter Krystad 		if (mptcp_established_options(sk, skb, &opt_size, remaining,
1011cec37a6eSPeter Krystad 					      &opts->mptcp)) {
1012cec37a6eSPeter Krystad 			opts->options |= OPTION_MPTCP;
1013cec37a6eSPeter Krystad 			size += opt_size;
1014cec37a6eSPeter Krystad 		}
1015cec37a6eSPeter Krystad 	}
1016cec37a6eSPeter Krystad 
1017cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
1018cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
101995c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
10209cfcca23SMat Martineau 		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
10219cfcca23SMat Martineau 					 TCPOLEN_SACK_PERBLOCK))
10229cfcca23SMat Martineau 			return size;
10239cfcca23SMat Martineau 
102433ad798cSAdam Langley 		opts->num_sack_blocks =
102595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
102633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
102733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
10289cfcca23SMat Martineau 
102933ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
103033ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
103133ad798cSAdam Langley 	}
103233ad798cSAdam Langley 
1033331fca43SMartin KaFai Lau 	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
1034331fca43SMartin KaFai Lau 					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
1035331fca43SMartin KaFai Lau 		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1036331fca43SMartin KaFai Lau 
1037331fca43SMartin KaFai Lau 		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
1038331fca43SMartin KaFai Lau 
1039331fca43SMartin KaFai Lau 		size = MAX_TCP_OPTION_SPACE - remaining;
1040331fca43SMartin KaFai Lau 	}
1041331fca43SMartin KaFai Lau 
104233ad798cSAdam Langley 	return size;
104340efc6faSStephen Hemminger }
10441da177e4SLinus Torvalds 
104546d3ceabSEric Dumazet 
104646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
104746d3ceabSEric Dumazet  *
104846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
104946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
105046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
105146d3ceabSEric Dumazet  *
105246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
105346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
10548e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
105546d3ceabSEric Dumazet  *
105646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
105746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
105846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
105946d3ceabSEric Dumazet  */
106046d3ceabSEric Dumazet struct tsq_tasklet {
106146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
106246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
106346d3ceabSEric Dumazet };
106446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
106546d3ceabSEric Dumazet 
106673a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
10676f458dfbSEric Dumazet {
10686f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
10696f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1070f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1071f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
1072f9616c35SEric Dumazet 
1073f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
107440570375SEric Dumazet 		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
10753a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
1076f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
10773a91d29fSKoichiro Den 		}
1078f9616c35SEric Dumazet 
1079f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1080bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
10816f458dfbSEric Dumazet 	}
1082f9616c35SEric Dumazet }
108373a6bab5SEric Dumazet 
108473a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
108573a6bab5SEric Dumazet {
108673a6bab5SEric Dumazet 	bh_lock_sock(sk);
108773a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
108873a6bab5SEric Dumazet 		tcp_tsq_write(sk);
108973a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
109073a6bab5SEric Dumazet 		sock_hold(sk);
109173a6bab5SEric Dumazet 	bh_unlock_sock(sk);
109273a6bab5SEric Dumazet }
109346d3ceabSEric Dumazet /*
10948e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
109546d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
10968e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
109746d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
109846d3ceabSEric Dumazet  */
1099c6533ca8SAllen Pais static void tcp_tasklet_func(struct tasklet_struct *t)
110046d3ceabSEric Dumazet {
1101c6533ca8SAllen Pais 	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
110246d3ceabSEric Dumazet 	LIST_HEAD(list);
110346d3ceabSEric Dumazet 	unsigned long flags;
110446d3ceabSEric Dumazet 	struct list_head *q, *n;
110546d3ceabSEric Dumazet 	struct tcp_sock *tp;
110646d3ceabSEric Dumazet 	struct sock *sk;
110746d3ceabSEric Dumazet 
110846d3ceabSEric Dumazet 	local_irq_save(flags);
110946d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
111046d3ceabSEric Dumazet 	local_irq_restore(flags);
111146d3ceabSEric Dumazet 
111246d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
111346d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
111446d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
111546d3ceabSEric Dumazet 
111646d3ceabSEric Dumazet 		sk = (struct sock *)tp;
11170a9648f1SEric Dumazet 		smp_mb__before_atomic();
11187aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
11197aa5470cSEric Dumazet 
11206f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
112146d3ceabSEric Dumazet 		sk_free(sk);
112246d3ceabSEric Dumazet 	}
112346d3ceabSEric Dumazet }
112446d3ceabSEric Dumazet 
112540fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
112640fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
112740fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
1128133c4c0dSEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED |	\
1129133c4c0dSEric Dumazet 			  TCPF_ACK_DEFERRED)
113046d3ceabSEric Dumazet /**
113146d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
113246d3ceabSEric Dumazet  * @sk: socket
113346d3ceabSEric Dumazet  *
113446d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
113546d3ceabSEric Dumazet  * actions before socket release.
113646d3ceabSEric Dumazet  */
113746d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
113846d3ceabSEric Dumazet {
1139fac30731SEric Dumazet 	unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1140fac30731SEric Dumazet 	unsigned long nflags;
114146d3ceabSEric Dumazet 
11426f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
11436f458dfbSEric Dumazet 	do {
11446f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
11456f458dfbSEric Dumazet 			return;
11466f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
1147fac30731SEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
11486f458dfbSEric Dumazet 
114973a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
115073a6bab5SEric Dumazet 		tcp_tsq_write(sk);
115173a6bab5SEric Dumazet 		__sock_put(sk);
115273a6bab5SEric Dumazet 	}
1153c3f9b018SEric Dumazet 
115440fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
11556f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
1156144d56e9SEric Dumazet 		__sock_put(sk);
1157144d56e9SEric Dumazet 	}
115840fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
11596f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
1160144d56e9SEric Dumazet 		__sock_put(sk);
1161144d56e9SEric Dumazet 	}
116240fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
11634fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1164144d56e9SEric Dumazet 		__sock_put(sk);
1165144d56e9SEric Dumazet 	}
1166133c4c0dSEric Dumazet 	if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
1167133c4c0dSEric Dumazet 		tcp_send_ack(sk);
116846d3ceabSEric Dumazet }
116946d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
117046d3ceabSEric Dumazet 
117146d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
117246d3ceabSEric Dumazet {
117346d3ceabSEric Dumazet 	int i;
117446d3ceabSEric Dumazet 
117546d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
117646d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
117746d3ceabSEric Dumazet 
117846d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
1179c6533ca8SAllen Pais 		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
118046d3ceabSEric Dumazet 	}
118146d3ceabSEric Dumazet }
118246d3ceabSEric Dumazet 
118346d3ceabSEric Dumazet /*
118446d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
11858e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
118646d3ceabSEric Dumazet  * hold qdisc lock.
118746d3ceabSEric Dumazet  */
1188d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
118946d3ceabSEric Dumazet {
119046d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
119146d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1192408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
1193b548b17aSEric Dumazet 	struct tsq_tasklet *tsq;
1194b548b17aSEric Dumazet 	bool empty;
11959b462d02SEric Dumazet 
11969b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
11979b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
11989b462d02SEric Dumazet 	 */
119914afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
12009b462d02SEric Dumazet 
12019b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
12029b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
12039b462d02SEric Dumazet 	 * This gives :
12049b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
12059b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
12069b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
12079b462d02SEric Dumazet 	 */
120814afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
12099b462d02SEric Dumazet 		goto out;
121046d3ceabSEric Dumazet 
1211b548b17aSEric Dumazet 	oval = smp_load_acquire(&sk->sk_tsq_flags);
1212b548b17aSEric Dumazet 	do {
1213408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1214408f0a6cSEric Dumazet 			goto out;
1215408f0a6cSEric Dumazet 
121673a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1217b548b17aSEric Dumazet 	} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
1218408f0a6cSEric Dumazet 
121946d3ceabSEric Dumazet 	/* queue this socket to tasklet queue */
122046d3ceabSEric Dumazet 	local_irq_save(flags);
1221903ceff7SChristoph Lameter 	tsq = this_cpu_ptr(&tsq_tasklet);
1222a9b204d1SEric Dumazet 	empty = list_empty(&tsq->head);
122346d3ceabSEric Dumazet 	list_add(&tp->tsq_node, &tsq->head);
1224a9b204d1SEric Dumazet 	if (empty)
122546d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
122646d3ceabSEric Dumazet 	local_irq_restore(flags);
12279b462d02SEric Dumazet 	return;
12289b462d02SEric Dumazet out:
12299b462d02SEric Dumazet 	sk_free(sk);
123046d3ceabSEric Dumazet }
123146d3ceabSEric Dumazet 
123273a6bab5SEric Dumazet /* Note: Called under soft irq.
123373a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
1234218af599SEric Dumazet  */
1235218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1236218af599SEric Dumazet {
1237218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1238218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
1239218af599SEric Dumazet 
124073a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
124173a6bab5SEric Dumazet 	sock_put(sk);
1242218af599SEric Dumazet 
1243218af599SEric Dumazet 	return HRTIMER_NORESTART;
1244218af599SEric Dumazet }
1245218af599SEric Dumazet 
1246a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1247a7a25630SEric Dumazet 				      u64 prior_wstamp)
1248e2080072SEric Dumazet {
1249ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1250ab408b6dSEric Dumazet 
1251ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
125228b24f90SEric Dumazet 		unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
1253ab408b6dSEric Dumazet 
1254ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
1255ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
1256ab408b6dSEric Dumazet 		 * this is a minor annoyance.
1257ab408b6dSEric Dumazet 		 */
125876a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1259a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1260a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1261a7a25630SEric Dumazet 
1262a7a25630SEric Dumazet 			/* take into account OS jitter */
1263a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
1264a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
1265ab408b6dSEric Dumazet 		}
1266ab408b6dSEric Dumazet 	}
1267e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1268e2080072SEric Dumazet }
1269e2080072SEric Dumazet 
127005e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
127105e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1272dd2e0b86SEric Dumazet INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
127305e22e83SEric Dumazet 
12741da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
12751da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
12761da177e4SLinus Torvalds  * transmission and possible later retransmissions.
12771da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
12781da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
12791da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
12801da177e4SLinus Torvalds  * device.
12811da177e4SLinus Torvalds  *
12821da177e4SLinus Torvalds  * We are working here with either a clone of the original
12831da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
12841da177e4SLinus Torvalds  */
12852987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
12862987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
12871da177e4SLinus Torvalds {
12886687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1289dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1290dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1291dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
129233ad798cSAdam Langley 	struct tcp_out_options opts;
129395c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
12948c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
12951e03d32bSDmitry Safonov 	struct tcp_key key;
12961da177e4SLinus Torvalds 	struct tcphdr *th;
1297a7a25630SEric Dumazet 	u64 prior_wstamp;
12981da177e4SLinus Torvalds 	int err;
12991da177e4SLinus Torvalds 
1300dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
13016f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
13027f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
13037f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1304a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
1305ccdbb6e9SEric Dumazet 	if (clone_it) {
13068c72c65bSEric Dumazet 		oskb = skb;
1307e2080072SEric Dumazet 
1308e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1309e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1310e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1311dfb4b9dcSDavid S. Miller 			else
1312e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1313e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1314e2080072SEric Dumazet 
1315dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1316dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1317b738a185SEric Dumazet 		/* retransmit skbs might have a non zero value in skb->dev
1318b738a185SEric Dumazet 		 * because skb->dev is aliased with skb->rbnode.rb_left
1319b738a185SEric Dumazet 		 */
1320b738a185SEric Dumazet 		skb->dev = NULL;
1321dfb4b9dcSDavid S. Miller 	}
13225f6188a8SEric Dumazet 
1323dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1324dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
132533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
13261da177e4SLinus Torvalds 
13271e03d32bSDmitry Safonov 	tcp_get_current_key(sk, &key);
1328051ba674SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
13291e03d32bSDmitry Safonov 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &key);
1330051ba674SEric Dumazet 	} else {
13311e03d32bSDmitry Safonov 		tcp_options_size = tcp_established_options(sk, skb, &opts, &key);
1332051ba674SEric Dumazet 		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1333051ba674SEric Dumazet 		 * at receiver : This slightly improve GRO performance.
1334051ba674SEric Dumazet 		 * Note that we do not force the PSH flag for non GSO packets,
1335051ba674SEric Dumazet 		 * because they might be sent under high congestion events,
1336051ba674SEric Dumazet 		 * and in this case it is better to delay the delivery of 1-MSS
1337051ba674SEric Dumazet 		 * packets and thus the corresponding ACK packet that would
1338051ba674SEric Dumazet 		 * release the following packet.
1339051ba674SEric Dumazet 		 */
1340051ba674SEric Dumazet 		if (tcp_skb_pcount(skb) > 1)
1341051ba674SEric Dumazet 			tcb->tcp_flags |= TCPHDR_PSH;
1342051ba674SEric Dumazet 	}
134333ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
13441da177e4SLinus Torvalds 
1345726e9e8bSEric Dumazet 	/* We set skb->ooo_okay to one if this packet can select
1346726e9e8bSEric Dumazet 	 * a different TX queue than prior packets of this flow,
1347726e9e8bSEric Dumazet 	 * to avoid self inflicted reorders.
1348726e9e8bSEric Dumazet 	 * The 'other' queue decision is based on current cpu number
1349726e9e8bSEric Dumazet 	 * if XPS is enabled, or sk->sk_txhash otherwise.
1350726e9e8bSEric Dumazet 	 * We can switch to another (and better) queue if:
1351726e9e8bSEric Dumazet 	 * 1) No packet with payload is in qdisc/device queues.
1352726e9e8bSEric Dumazet 	 *    Delays in TX completion can defeat the test
1353726e9e8bSEric Dumazet 	 *    even if packets were already sent.
1354726e9e8bSEric Dumazet 	 * 2) Or rtx queue is empty.
1355726e9e8bSEric Dumazet 	 *    This mitigates above case if ACK packets for
1356726e9e8bSEric Dumazet 	 *    all prior packets were already processed.
1357547669d4SEric Dumazet 	 */
1358726e9e8bSEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) ||
1359726e9e8bSEric Dumazet 			tcp_rtx_queue_empty(sk);
13601da177e4SLinus Torvalds 
136138ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
136238ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
136338ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
136438ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
136538ab52e8SEric Dumazet 	 */
136638ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
136738ab52e8SEric Dumazet 
1368aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1369aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
137046d3ceabSEric Dumazet 
137146d3ceabSEric Dumazet 	skb_orphan(skb);
137246d3ceabSEric Dumazet 	skb->sk = sk;
13731d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
137414afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
13751da177e4SLinus Torvalds 
1376eb44ad4eSEric Dumazet 	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
1377c3a2e837SJulian Anastasov 
13781da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1379ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1380c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1381c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
13821da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
13832987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1384df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
13854de075e0SEric Dumazet 					tcb->tcp_flags);
1386dfb4b9dcSDavid S. Miller 
13871da177e4SLinus Torvalds 	th->check		= 0;
13881da177e4SLinus Torvalds 	th->urg_ptr		= 0;
13891da177e4SLinus Torvalds 
139033f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
13917691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
13927691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
13931da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
13941da177e4SLinus Torvalds 			th->urg = 1;
13957691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
13960eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
13977691367dSHerbert Xu 			th->urg = 1;
13987691367dSHerbert Xu 		}
13991da177e4SLinus Torvalds 	}
14001da177e4SLinus Torvalds 
140151466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1402ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1403ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1404ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1405ea1627c2SEric Dumazet 	} else {
1406ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1407ea1627c2SEric Dumazet 		 * is never scaled.
1408ea1627c2SEric Dumazet 		 */
1409ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1410ea1627c2SEric Dumazet 	}
1411fa3fe2b1SFlorian Westphal 
141206b22ef2SDmitry Safonov 	tcp_options_write(th, tp, NULL, &opts, &key);
1413fa3fe2b1SFlorian Westphal 
14141e03d32bSDmitry Safonov 	if (tcp_key_is_md5(&key)) {
1415cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1416cfb6eeb4SYOSHIFUJI Hideaki 		/* Calculate the MD5 hash, as we have all we need now */
1417aba54656SEric Dumazet 		sk_gso_disable(sk);
1418bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
14191e03d32bSDmitry Safonov 					       key.md5_key, sk, skb);
1420cfb6eeb4SYOSHIFUJI Hideaki #endif
14211e03d32bSDmitry Safonov 	} else if (tcp_key_is_ao(&key)) {
14221e03d32bSDmitry Safonov 		int err;
14231e03d32bSDmitry Safonov 
14241e03d32bSDmitry Safonov 		err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th,
14251e03d32bSDmitry Safonov 					  opts.hash_location);
14261e03d32bSDmitry Safonov 		if (err) {
14271e03d32bSDmitry Safonov 			kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
14281e03d32bSDmitry Safonov 			return -ENOMEM;
14291e03d32bSDmitry Safonov 		}
14301e03d32bSDmitry Safonov 	}
1431cfb6eeb4SYOSHIFUJI Hideaki 
1432331fca43SMartin KaFai Lau 	/* BPF prog is the last one writing header option */
1433331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1434331fca43SMartin KaFai Lau 
1435dd2e0b86SEric Dumazet 	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1436dd2e0b86SEric Dumazet 			   tcp_v6_send_check, tcp_v4_send_check,
1437dd2e0b86SEric Dumazet 			   sk, skb);
14381da177e4SLinus Torvalds 
14394de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1440059217c1SNeal Cardwell 		tcp_event_ack_sent(sk, rcv_nxt);
14411da177e4SLinus Torvalds 
1442a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1443cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1444a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1445ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1446a44d6eacSMartin KaFai Lau 	}
14471da177e4SLinus Torvalds 
1448bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1449aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1450aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
14511da177e4SLinus Torvalds 
14522efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
14530ae5b43dSYuchung Cheng 	skb_set_hash_from_sk(skb, sk);
1454f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1455cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1456f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1457cd7d8498SEric Dumazet 
1458d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1459971f10ecSEric Dumazet 
1460971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1461971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1462971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1463971f10ecSEric Dumazet 
1464a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1465a842fe14SEric Dumazet 
146605e22e83SEric Dumazet 	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
146705e22e83SEric Dumazet 				 inet6_csk_xmit, ip_queue_xmit,
146805e22e83SEric Dumazet 				 sk, skb, &inet->cork.fl);
14697faee5c0SEric Dumazet 
14708c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
14715ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
14728c72c65bSEric Dumazet 		err = net_xmit_eval(err);
14738c72c65bSEric Dumazet 	}
1474fc225799SEric Dumazet 	if (!err && oskb) {
1475a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1476fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1477fc225799SEric Dumazet 	}
14788c72c65bSEric Dumazet 	return err;
14791da177e4SLinus Torvalds }
14801da177e4SLinus Torvalds 
14812987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
14822987babbSYuchung Cheng 			    gfp_t gfp_mask)
14832987babbSYuchung Cheng {
14842987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
14852987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
14862987babbSYuchung Cheng }
14872987babbSYuchung Cheng 
148867edfef7SAndi Kleen /* This routine just queues the buffer for sending.
14891da177e4SLinus Torvalds  *
14901da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
14911da177e4SLinus Torvalds  * otherwise socket can stall.
14921da177e4SLinus Torvalds  */
14931da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
14941da177e4SLinus Torvalds {
14951da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
14961da177e4SLinus Torvalds 
14971da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
14980f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1499f4a775d1SEric Dumazet 	__skb_header_release(skb);
1500fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
1501ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
15023ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
15031da177e4SLinus Torvalds }
15041da177e4SLinus Torvalds 
150567edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
1506d5b38a71SEric Dumazet static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1507f6302d1dSDavid S. Miller {
1508d5b38a71SEric Dumazet 	int tso_segs;
1509d5b38a71SEric Dumazet 
15104a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1511f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1512f6302d1dSDavid S. Miller 		 * non-TSO case.
1513f6302d1dSDavid S. Miller 		 */
1514f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1515d5b38a71SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1516d5b38a71SEric Dumazet 		return 1;
15171da177e4SLinus Torvalds 	}
1518d5b38a71SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1519d5b38a71SEric Dumazet 	tso_segs = DIV_ROUND_UP(skb->len, mss_now);
1520d5b38a71SEric Dumazet 	tcp_skb_pcount_set(skb, tso_segs);
1521d5b38a71SEric Dumazet 	return tso_segs;
15221da177e4SLinus Torvalds }
15231da177e4SLinus Torvalds 
1524797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1525797108d1SIlpo Järvinen  * tweaks to fix counters
1526797108d1SIlpo Järvinen  */
1527cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1528797108d1SIlpo Järvinen {
1529797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1530797108d1SIlpo Järvinen 
1531797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1532797108d1SIlpo Järvinen 
1533797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1534797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1535797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1536797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1537797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1538797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1539797108d1SIlpo Järvinen 
1540797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1541797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1542797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1543797108d1SIlpo Järvinen 
1544797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1545797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1546713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1547797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1548797108d1SIlpo Järvinen 
1549797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1550797108d1SIlpo Järvinen }
1551797108d1SIlpo Järvinen 
15520a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
15530a2cf20cSSoheil Hassas Yeganeh {
15540a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
15550a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
15560a2cf20cSSoheil Hassas Yeganeh }
15570a2cf20cSSoheil Hassas Yeganeh 
1558490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1559490cc7d0SWillem de Bruijn {
1560490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1561490cc7d0SWillem de Bruijn 
15620a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1563490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1564490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1565490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1566490cc7d0SWillem de Bruijn 
1567490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1568490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1569490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1570b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1571b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1572490cc7d0SWillem de Bruijn 	}
1573490cc7d0SWillem de Bruijn }
1574490cc7d0SWillem de Bruijn 
1575a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1576a166140eSMartin KaFai Lau {
1577a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1578a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1579a166140eSMartin KaFai Lau }
1580a166140eSMartin KaFai Lau 
158175c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
158275c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
158375c119afSEric Dumazet 					 struct sk_buff *buff,
158475c119afSEric Dumazet 					 struct sock *sk,
158575c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
158675c119afSEric Dumazet {
158775c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
158875c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
158975c119afSEric Dumazet 	else
159075c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
159175c119afSEric Dumazet }
159275c119afSEric Dumazet 
15931da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
15941da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
15951da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
15961da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
15971da177e4SLinus Torvalds  */
159875c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
159975c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
16006cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
16011da177e4SLinus Torvalds {
16021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16031da177e4SLinus Torvalds 	struct sk_buff *buff;
1604b4a24397SEric Dumazet 	int old_factor;
1605b617158dSEric Dumazet 	long limit;
1606b60b49eaSHerbert Xu 	int nlen;
16079ce01461SIlpo Järvinen 	u8 flags;
16081da177e4SLinus Torvalds 
16092fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
16102fceec13SIlpo Järvinen 		return -EINVAL;
16116a438bbeSStephen Hemminger 
1612b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
16131da177e4SLinus Torvalds 
1614b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1615b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1616b617158dSEric Dumazet 	 * SO_SNDBUF values.
1617b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1618b617158dSEric Dumazet 	 */
16197c4e983cSAlexander Duyck 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1620b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1621b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1622b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1623b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1624f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1625f070ef2aSEric Dumazet 		return -ENOMEM;
1626f070ef2aSEric Dumazet 	}
1627f070ef2aSEric Dumazet 
1628c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, gfp))
16291da177e4SLinus Torvalds 		return -ENOMEM;
16301da177e4SLinus Torvalds 
16311da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
16325882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
163351456b29SIan Morris 	if (!buff)
16341da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
163541477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
16365a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
1637ef5cb973SHerbert Xu 
1638ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
16393ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1640b4a24397SEric Dumazet 	nlen = skb->len - len;
1641b60b49eaSHerbert Xu 	buff->truesize += nlen;
1642b60b49eaSHerbert Xu 	skb->truesize -= nlen;
16431da177e4SLinus Torvalds 
16441da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
16451da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
16461da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
16471da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
16481da177e4SLinus Torvalds 
16491da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
16504de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
16514de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
16524de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1653e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1654a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
16551da177e4SLinus Torvalds 
16561da177e4SLinus Torvalds 	skb_split(skb, buff, len);
16571da177e4SLinus Torvalds 
1658a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(buff, skb->tstamp, true);
1659490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
16601da177e4SLinus Torvalds 
16616475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
16626475be16SDavid S. Miller 
16631da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
16645bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
16655bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
16661da177e4SLinus Torvalds 
1667b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1668b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1669b9f64820SYuchung Cheng 
16706475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
16716475be16SDavid S. Miller 	 * adjust the various packet counters.
16726475be16SDavid S. Miller 	 */
1673cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
16746475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
16756475be16SDavid S. Miller 			tcp_skb_pcount(buff);
16761da177e4SLinus Torvalds 
1677797108d1SIlpo Järvinen 		if (diff)
1678797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
16791da177e4SLinus Torvalds 	}
16801da177e4SLinus Torvalds 
16811da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1682f4a775d1SEric Dumazet 	__skb_header_release(buff);
168375c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1684f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1685e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
16861da177e4SLinus Torvalds 
16871da177e4SLinus Torvalds 	return 0;
16881da177e4SLinus Torvalds }
16891da177e4SLinus Torvalds 
1690f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1691f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
16921da177e4SLinus Torvalds  */
16937162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
16941da177e4SLinus Torvalds {
16957b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
16961da177e4SLinus Torvalds 	int i, k, eat;
16971da177e4SLinus Torvalds 
1698b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
16991da177e4SLinus Torvalds 	eat = len;
17001da177e4SLinus Torvalds 	k = 0;
17017b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
17027b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
17037b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
17049e903e08SEric Dumazet 
17059e903e08SEric Dumazet 		if (size <= eat) {
1706aff65da0SIan Campbell 			skb_frag_unref(skb, i);
17079e903e08SEric Dumazet 			eat -= size;
17081da177e4SLinus Torvalds 		} else {
17097b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
17101da177e4SLinus Torvalds 			if (eat) {
1711b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[k], eat);
17127b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
17131da177e4SLinus Torvalds 				eat = 0;
17141da177e4SLinus Torvalds 			}
17151da177e4SLinus Torvalds 			k++;
17161da177e4SLinus Torvalds 		}
17171da177e4SLinus Torvalds 	}
17187b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
17191da177e4SLinus Torvalds 
17201da177e4SLinus Torvalds 	skb->data_len -= len;
17211da177e4SLinus Torvalds 	skb->len = skb->data_len;
17227162fb24SEric Dumazet 	return len;
17231da177e4SLinus Torvalds }
17241da177e4SLinus Torvalds 
172567edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
17261da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
17271da177e4SLinus Torvalds {
17287162fb24SEric Dumazet 	u32 delta_truesize;
17297162fb24SEric Dumazet 
1730c4777efaSEric Dumazet 	if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
17311da177e4SLinus Torvalds 		return -ENOMEM;
17321da177e4SLinus Torvalds 
17337162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
17341da177e4SLinus Torvalds 
17351da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
17361da177e4SLinus Torvalds 
17377162fb24SEric Dumazet 	skb->truesize	   -= delta_truesize;
1738ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, -delta_truesize);
17399b65b17dSTalal Ahmad 	if (!skb_zcopy_pure(skb))
17407162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
17411da177e4SLinus Torvalds 
17425b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
17431da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
17445bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
17451da177e4SLinus Torvalds 
17461da177e4SLinus Torvalds 	return 0;
17471da177e4SLinus Torvalds }
17481da177e4SLinus Torvalds 
17491b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
17501b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
17515d424d5aSJohn Heffner {
1752cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1753cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
17545d424d5aSJohn Heffner 	int mss_now;
17555d424d5aSJohn Heffner 
17565d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
17575d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
17585d424d5aSJohn Heffner 	 */
17595d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
17605d424d5aSJohn Heffner 
17615d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
17625d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
17635d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
17645d424d5aSJohn Heffner 
17655d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
17665d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
17675d424d5aSJohn Heffner 
17685d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
176978eb166cSKuniyuki Iwashima 	mss_now = max(mss_now,
177078eb166cSKuniyuki Iwashima 		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
17715d424d5aSJohn Heffner 	return mss_now;
17725d424d5aSJohn Heffner }
17735d424d5aSJohn Heffner 
17741b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
17751b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
17761b63edd6SYuchung Cheng {
17771b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
17781b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
17791b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
17801b63edd6SYuchung Cheng }
1781c7bb4b89SEric Dumazet EXPORT_SYMBOL(tcp_mtu_to_mss);
17821b63edd6SYuchung Cheng 
17835d424d5aSJohn Heffner /* Inverse of above */
178467469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
17855d424d5aSJohn Heffner {
1786cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1787cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
17885d424d5aSJohn Heffner 
1789e57a3447SYan Zhai 	return mss +
17905d424d5aSJohn Heffner 	      tp->tcp_header_len +
17915d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
17925d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
17935d424d5aSJohn Heffner }
1794556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
17955d424d5aSJohn Heffner 
179667edfef7SAndi Kleen /* MTU probing init per socket */
17975d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
17985d424d5aSJohn Heffner {
17995d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
18005d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1801b0f9ca53SFan Du 	struct net *net = sock_net(sk);
18025d424d5aSJohn Heffner 
1803f47d00e0SKuniyuki Iwashima 	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
18045d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
18055d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
180688d78bc0SKuniyuki Iwashima 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
18075d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
180805cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1809c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
18105d424d5aSJohn Heffner }
18114bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
18125d424d5aSJohn Heffner 
18131da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
18141da177e4SLinus Torvalds 
18151da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
18161da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
18171da177e4SLinus Torvalds 
18181da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1819caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
18201da177e4SLinus Torvalds    It also does not include TCP options.
18211da177e4SLinus Torvalds 
1822d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
18231da177e4SLinus Torvalds 
18241da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
18251da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
18261da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
18271da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
18301da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
18311da177e4SLinus Torvalds 
1832d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1833d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
18341da177e4SLinus Torvalds  */
18351da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
18361da177e4SLinus Torvalds {
18371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1838d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
18395d424d5aSJohn Heffner 	int mss_now;
18401da177e4SLinus Torvalds 
18415d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
18425d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
18431da177e4SLinus Torvalds 
18445d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1845409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
18461da177e4SLinus Torvalds 
18471da177e4SLinus Torvalds 	/* And store cached results */
1848d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
18495d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
18505d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1851c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
18521da177e4SLinus Torvalds 
18531da177e4SLinus Torvalds 	return mss_now;
18541da177e4SLinus Torvalds }
18554bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
18561da177e4SLinus Torvalds 
18571da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
18581da177e4SLinus Torvalds  * and even PMTU discovery events into account.
18591da177e4SLinus Torvalds  */
18600c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
18611da177e4SLinus Torvalds {
1862cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1863cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1864c1b4a7e6SDavid S. Miller 	u32 mss_now;
186595c96174SEric Dumazet 	unsigned int header_len;
186633ad798cSAdam Langley 	struct tcp_out_options opts;
18671e03d32bSDmitry Safonov 	struct tcp_key key;
18681da177e4SLinus Torvalds 
1869c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1870c1b4a7e6SDavid S. Miller 
18711da177e4SLinus Torvalds 	if (dst) {
18721da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1873d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
18741da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
18751da177e4SLinus Torvalds 	}
18761e03d32bSDmitry Safonov 	tcp_get_current_key(sk, &key);
18771e03d32bSDmitry Safonov 	header_len = tcp_established_options(sk, NULL, &opts, &key) +
187833ad798cSAdam Langley 		     sizeof(struct tcphdr);
187933ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
188033ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
188133ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
188233ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
188333ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
188433ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
188533ad798cSAdam Langley 		mss_now -= delta;
188633ad798cSAdam Langley 	}
1887cfb6eeb4SYOSHIFUJI Hideaki 
18881da177e4SLinus Torvalds 	return mss_now;
18891da177e4SLinus Torvalds }
18901da177e4SLinus Torvalds 
189186fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
189286fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
189386fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
189486fd14adSWeiping Pan  */
189586fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1896a762a980SDavid S. Miller {
18979e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1898a762a980SDavid S. Miller 
189986fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
190086fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
190186fd14adSWeiping Pan 		/* Limited by application or receiver window. */
190286fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
190386fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
190440570375SEric Dumazet 		if (win_used < tcp_snd_cwnd(tp)) {
190586fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
190640570375SEric Dumazet 			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
190786fd14adSWeiping Pan 		}
190886fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
190986fd14adSWeiping Pan 	}
1910c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
191186fd14adSWeiping Pan }
191286fd14adSWeiping Pan 
1913ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1914a762a980SDavid S. Miller {
19151b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1916a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1917a762a980SDavid S. Miller 
1918f4ce91ceSNeal Cardwell 	/* Track the strongest available signal of the degree to which the cwnd
1919f4ce91ceSNeal Cardwell 	 * is fully utilized. If cwnd-limited then remember that fact for the
1920f4ce91ceSNeal Cardwell 	 * current window. If not cwnd-limited then track the maximum number of
1921f4ce91ceSNeal Cardwell 	 * outstanding packets in the current window. (If cwnd-limited then we
1922f4ce91ceSNeal Cardwell 	 * chose to not update tp->max_packets_out to avoid an extra else
1923f4ce91ceSNeal Cardwell 	 * clause with no functional impact.)
1924ca8a2263SNeal Cardwell 	 */
1925f4ce91ceSNeal Cardwell 	if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1926f4ce91ceSNeal Cardwell 	    is_cwnd_limited ||
1927f4ce91ceSNeal Cardwell 	    (!tp->is_cwnd_limited &&
1928f4ce91ceSNeal Cardwell 	     tp->packets_out > tp->max_packets_out)) {
1929ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1930f4ce91ceSNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1931f4ce91ceSNeal Cardwell 		tp->cwnd_usage_seq = tp->snd_nxt;
1932ca8a2263SNeal Cardwell 	}
1933e114a710SEric Dumazet 
193424901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1935a762a980SDavid S. Miller 		/* Network is feed fully. */
1936a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1937c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1938a762a980SDavid S. Miller 	} else {
1939a762a980SDavid S. Miller 		/* Network starves. */
1940a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1941a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1942a762a980SDavid S. Miller 
19434845b571SKuniyuki Iwashima 		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
1944c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
19451b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1946a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1947b0f71bd3SFrancis Yan 
1948b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1949b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1950b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1951b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
195275c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1953b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1954b0f71bd3SFrancis Yan 		 */
195575c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1956b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1957b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1958b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1959a762a980SDavid S. Miller 	}
1960a762a980SDavid S. Miller }
1961a762a980SDavid S. Miller 
1962d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1963d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1964d4589926SEric Dumazet {
1965d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1966d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1967d4589926SEric Dumazet }
1968d4589926SEric Dumazet 
1969d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1970d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1971d4589926SEric Dumazet  * The test is really :
1972d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1973d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1974d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1975d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
19760e3a4803SIlpo Järvinen  */
1977d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1978d4589926SEric Dumazet 				const struct sk_buff *skb)
1979d4589926SEric Dumazet {
1980d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1981d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1982d4589926SEric Dumazet }
1983d4589926SEric Dumazet 
1984d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1985d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1986d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1987d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1988d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1989d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1990d4589926SEric Dumazet  */
1991d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1992cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1993d4589926SEric Dumazet {
1994d4589926SEric Dumazet 	return partial &&
1995d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1996d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1997d4589926SEric Dumazet }
1998605ad7f1SEric Dumazet 
1999605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
200065466904SEric Dumazet  * depending on current pacing rate, and how close the peer is.
200165466904SEric Dumazet  *
200265466904SEric Dumazet  * Rationale is:
200365466904SEric Dumazet  * - For close peers, we rather send bigger packets to reduce
200465466904SEric Dumazet  *   cpu costs, because occasional losses will be repaired fast.
200565466904SEric Dumazet  * - For long distance/rtt flows, we would like to get ACK clocking
200665466904SEric Dumazet  *   with 1 ACK per ms.
200765466904SEric Dumazet  *
200865466904SEric Dumazet  * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
200965466904SEric Dumazet  * in bigger TSO bursts. We we cut the RTT-based allowance in half
201065466904SEric Dumazet  * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
201165466904SEric Dumazet  * is below 1500 bytes after 6 * ~500 usec = 3ms.
2012605ad7f1SEric Dumazet  */
2013dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
20141b3878caSNeal Cardwell 			    int min_tso_segs)
2015605ad7f1SEric Dumazet {
201665466904SEric Dumazet 	unsigned long bytes;
201765466904SEric Dumazet 	u32 r;
2018605ad7f1SEric Dumazet 
201928b24f90SEric Dumazet 	bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
2020605ad7f1SEric Dumazet 
20212455e61bSKuniyuki Iwashima 	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
202265466904SEric Dumazet 	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
202365466904SEric Dumazet 		bytes += sk->sk_gso_max_size >> r;
2024605ad7f1SEric Dumazet 
202565466904SEric Dumazet 	bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
202665466904SEric Dumazet 
202765466904SEric Dumazet 	return max_t(u32, bytes / mss_now, min_tso_segs);
2028605ad7f1SEric Dumazet }
2029605ad7f1SEric Dumazet 
2030ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
2031ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
2032ed6e7268SNeal Cardwell  */
2033ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
2034ed6e7268SNeal Cardwell {
2035ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2036dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
2037ed6e7268SNeal Cardwell 
2038dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
2039dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
2040e0bb4ab9SKuniyuki Iwashima 			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
2041dcb8c9b4SEric Dumazet 
2042dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2043350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
2044ed6e7268SNeal Cardwell }
2045ed6e7268SNeal Cardwell 
2046d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
2047d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
2048d4589926SEric Dumazet 					const struct sk_buff *skb,
2049d4589926SEric Dumazet 					unsigned int mss_now,
2050d4589926SEric Dumazet 					unsigned int max_segs,
2051d4589926SEric Dumazet 					int nonagle)
2052c1b4a7e6SDavid S. Miller {
2053cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2054d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
2055c1b4a7e6SDavid S. Miller 
205690840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
20571485348dSBen Hutchings 	max_len = mss_now * max_segs;
20580e3a4803SIlpo Järvinen 
20591485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
20601485348dSBen Hutchings 		return max_len;
20610e3a4803SIlpo Järvinen 
20625ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
20635ea3a748SIlpo Järvinen 
20641485348dSBen Hutchings 	if (max_len <= needed)
20651485348dSBen Hutchings 		return max_len;
20660e3a4803SIlpo Järvinen 
2067d4589926SEric Dumazet 	partial = needed % mss_now;
2068d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
2069d4589926SEric Dumazet 	 * to include this last segment in this skb.
2070d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
2071d4589926SEric Dumazet 	 */
2072cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
2073d4589926SEric Dumazet 		return needed - partial;
2074d4589926SEric Dumazet 
2075d4589926SEric Dumazet 	return needed;
2076c1b4a7e6SDavid S. Miller }
2077c1b4a7e6SDavid S. Miller 
2078c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
2079c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
2080c1b4a7e6SDavid S. Miller  */
208122555032SEric Dumazet static u32 tcp_cwnd_test(const struct tcp_sock *tp)
2082c1b4a7e6SDavid S. Miller {
2083d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
2084c1b4a7e6SDavid S. Miller 
2085c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
208640570375SEric Dumazet 	cwnd = tcp_snd_cwnd(tp);
2087d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
2088c1b4a7e6SDavid S. Miller 		return 0;
2089d649a7a8SEric Dumazet 
2090d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
2091d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
2092d649a7a8SEric Dumazet 	 */
2093d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
2094d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
2095c1b4a7e6SDavid S. Miller }
2096c1b4a7e6SDavid S. Miller 
2097b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
209867edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
2099c1b4a7e6SDavid S. Miller  * SKB onto the wire.
2100c1b4a7e6SDavid S. Miller  */
21015bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2102c1b4a7e6SDavid S. Miller {
2103c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
2104c1b4a7e6SDavid S. Miller 
2105d5b38a71SEric Dumazet 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now))
2106d5b38a71SEric Dumazet 		return tcp_set_skb_tso_segs(skb, mss_now);
2107d5b38a71SEric Dumazet 
2108c1b4a7e6SDavid S. Miller 	return tso_segs;
2109c1b4a7e6SDavid S. Miller }
2110c1b4a7e6SDavid S. Miller 
2111c1b4a7e6SDavid S. Miller 
2112a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
2113c1b4a7e6SDavid S. Miller  * sent now.
2114c1b4a7e6SDavid S. Miller  */
2115a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2116c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
2117c1b4a7e6SDavid S. Miller {
2118c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
2119c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
2120c1b4a7e6SDavid S. Miller 	 *
2121c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
2122c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
2123c1b4a7e6SDavid S. Miller 	 */
2124c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
2125a2a385d6SEric Dumazet 		return true;
2126c1b4a7e6SDavid S. Miller 
21279b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
21289b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2129a2a385d6SEric Dumazet 		return true;
2130c1b4a7e6SDavid S. Miller 
2131cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2132a2a385d6SEric Dumazet 		return true;
2133c1b4a7e6SDavid S. Miller 
2134a2a385d6SEric Dumazet 	return false;
2135c1b4a7e6SDavid S. Miller }
2136c1b4a7e6SDavid S. Miller 
2137c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
2138a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2139a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
2140056834d9SIlpo Järvinen 			     unsigned int cur_mss)
2141c1b4a7e6SDavid S. Miller {
2142c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2143c1b4a7e6SDavid S. Miller 
2144c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
2145c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2146c1b4a7e6SDavid S. Miller 
214790840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
2148c1b4a7e6SDavid S. Miller }
2149c1b4a7e6SDavid S. Miller 
2150c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2151c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
2152c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
2153c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
2154c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
2155c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
2156c1b4a7e6SDavid S. Miller  */
215756483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2158c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
2159c1b4a7e6SDavid S. Miller {
2160c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
216156483341SEric Dumazet 	struct sk_buff *buff;
21629ce01461SIlpo Järvinen 	u8 flags;
2163c1b4a7e6SDavid S. Miller 
2164c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
2165b4a24397SEric Dumazet 	DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len);
2166c1b4a7e6SDavid S. Miller 
21675882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, gfp, true);
216851456b29SIan Morris 	if (unlikely(!buff))
2169c1b4a7e6SDavid S. Miller 		return -ENOMEM;
217041477662SJakub Kicinski 	skb_copy_decrypted(buff, skb);
21715a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(buff, skb);
2172c1b4a7e6SDavid S. Miller 
2173ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, buff->truesize);
21743ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
2175b60b49eaSHerbert Xu 	buff->truesize += nlen;
2176c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
2177c1b4a7e6SDavid S. Miller 
2178c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
2179c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2180c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2181c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2182c1b4a7e6SDavid S. Miller 
2183c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
21844de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
21854de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
21864de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
2187c1b4a7e6SDavid S. Miller 
2188a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
2189a166140eSMartin KaFai Lau 
2190c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
2191490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
2192c1b4a7e6SDavid S. Miller 
2193c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
21945bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
21955bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
2196c1b4a7e6SDavid S. Miller 
2197c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
2198f4a775d1SEric Dumazet 	__skb_header_release(buff);
219956483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2200c1b4a7e6SDavid S. Miller 
2201c1b4a7e6SDavid S. Miller 	return 0;
2202c1b4a7e6SDavid S. Miller }
2203c1b4a7e6SDavid S. Miller 
2204c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
2205c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2206c1b4a7e6SDavid S. Miller  *
2207c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
2208c1b4a7e6SDavid S. Miller  */
2209ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2210f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
2211f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
2212f9bfe4e6SEric Dumazet 				 u32 max_segs)
2213c1b4a7e6SDavid S. Miller {
22146687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2215f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
221650c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
221750c8339eSEric Dumazet 	struct sk_buff *head;
2218ad9f4f50SEric Dumazet 	int win_divisor;
2219f1c6ea38SEric Dumazet 	s64 delta;
2220c1b4a7e6SDavid S. Miller 
222199d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2222ae8064acSJohn Heffner 		goto send_now;
2223ae8064acSJohn Heffner 
22245f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
2225a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
2226a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2227a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
22285f852eb5SEric Dumazet 	 */
2229a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2230a682850aSEric Dumazet 	if (delta > 0)
2231ae8064acSJohn Heffner 		goto send_now;
2232908a75c1SDavid S. Miller 
2233c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
2234c1b4a7e6SDavid S. Miller 
2235c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
223640570375SEric Dumazet 	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2237c1b4a7e6SDavid S. Miller 
223890840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2239c1b4a7e6SDavid S. Miller 
2240c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
224140570375SEric Dumazet 	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2242c1b4a7e6SDavid S. Miller 
2243c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
2244c1b4a7e6SDavid S. Miller 
2245ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
2246605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
2247ae8064acSJohn Heffner 		goto send_now;
2248ba244fe9SDavid S. Miller 
224962ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
225062ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
225162ad2761SIlpo Järvinen 		goto send_now;
225262ad2761SIlpo Järvinen 
22535bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2254ad9f4f50SEric Dumazet 	if (win_divisor) {
225540570375SEric Dumazet 		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2256c1b4a7e6SDavid S. Miller 
2257c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
2258c1b4a7e6SDavid S. Miller 		 * just use it.
2259c1b4a7e6SDavid S. Miller 		 */
2260ad9f4f50SEric Dumazet 		chunk /= win_divisor;
2261c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
2262ae8064acSJohn Heffner 			goto send_now;
2263c1b4a7e6SDavid S. Miller 	} else {
2264c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
2265c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
2266c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
2267c1b4a7e6SDavid S. Miller 		 * then send now.
2268c1b4a7e6SDavid S. Miller 		 */
22696b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2270ae8064acSJohn Heffner 			goto send_now;
2271c1b4a7e6SDavid S. Miller 	}
2272c1b4a7e6SDavid S. Miller 
227375c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
227475c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
227575c119afSEric Dumazet 	if (!head)
227675c119afSEric Dumazet 		goto send_now;
2277f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
227850c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
2279f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
228050c8339eSEric Dumazet 		goto send_now;
228150c8339eSEric Dumazet 
2282f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
2283f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
2284f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
2285f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
2286f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
2287f9bfe4e6SEric Dumazet 	 */
2288f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
2289f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
2290ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2291f9bfe4e6SEric Dumazet 			return true;
2292f9bfe4e6SEric Dumazet 		}
2293f9bfe4e6SEric Dumazet 	} else {
2294f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2295f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2296f9bfe4e6SEric Dumazet 			return true;
2297f9bfe4e6SEric Dumazet 		}
2298f9bfe4e6SEric Dumazet 	}
2299f9bfe4e6SEric Dumazet 
2300f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2301d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2302d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2303f9bfe4e6SEric Dumazet 		goto send_now;
2304ca8a2263SNeal Cardwell 
2305a2a385d6SEric Dumazet 	return true;
2306ae8064acSJohn Heffner 
2307ae8064acSJohn Heffner send_now:
2308a2a385d6SEric Dumazet 	return false;
2309c1b4a7e6SDavid S. Miller }
2310c1b4a7e6SDavid S. Miller 
231105cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
231205cbc0dbSFan Du {
231305cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
231405cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
231505cbc0dbSFan Du 	struct net *net = sock_net(sk);
231605cbc0dbSFan Du 	u32 interval;
231705cbc0dbSFan Du 	s32 delta;
231805cbc0dbSFan Du 
23192a85388fSKuniyuki Iwashima 	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2320c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
232105cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
232205cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
232305cbc0dbSFan Du 
232405cbc0dbSFan Du 		/* Update current search range */
232505cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
232605cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
232705cbc0dbSFan Du 			sizeof(struct tcphdr) +
232805cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
232905cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
233005cbc0dbSFan Du 
233105cbc0dbSFan Du 		/* Update probe time stamp */
2332c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
233305cbc0dbSFan Du 	}
233405cbc0dbSFan Du }
233505cbc0dbSFan Du 
2336808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2337808cf9e3SIlya Lesokhin {
2338808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2339808cf9e3SIlya Lesokhin 
2340808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2341808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2342808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2343808cf9e3SIlya Lesokhin 			break;
2344808cf9e3SIlya Lesokhin 
23459b65b17dSTalal Ahmad 		if (unlikely(TCP_SKB_CB(skb)->eor) ||
23469b65b17dSTalal Ahmad 		    tcp_has_tx_tstamp(skb) ||
23479b65b17dSTalal Ahmad 		    !skb_pure_zcopy_same(skb, next))
2348808cf9e3SIlya Lesokhin 			return false;
2349808cf9e3SIlya Lesokhin 
2350808cf9e3SIlya Lesokhin 		len -= skb->len;
2351808cf9e3SIlya Lesokhin 	}
2352808cf9e3SIlya Lesokhin 
2353808cf9e3SIlya Lesokhin 	return true;
2354808cf9e3SIlya Lesokhin }
2355808cf9e3SIlya Lesokhin 
235673601329SEric Dumazet static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
235773601329SEric Dumazet 			     int probe_size)
235873601329SEric Dumazet {
235973601329SEric Dumazet 	skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
236073601329SEric Dumazet 	int i, todo, len = 0, nr_frags = 0;
236173601329SEric Dumazet 	const struct sk_buff *skb;
236273601329SEric Dumazet 
236373601329SEric Dumazet 	if (!sk_wmem_schedule(sk, to->truesize + probe_size))
236473601329SEric Dumazet 		return -ENOMEM;
236573601329SEric Dumazet 
236673601329SEric Dumazet 	skb_queue_walk(&sk->sk_write_queue, skb) {
236773601329SEric Dumazet 		const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
236873601329SEric Dumazet 
236973601329SEric Dumazet 		if (skb_headlen(skb))
237073601329SEric Dumazet 			return -EINVAL;
237173601329SEric Dumazet 
237273601329SEric Dumazet 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
237373601329SEric Dumazet 			if (len >= probe_size)
237473601329SEric Dumazet 				goto commit;
237573601329SEric Dumazet 			todo = min_t(int, skb_frag_size(fragfrom),
237673601329SEric Dumazet 				     probe_size - len);
237773601329SEric Dumazet 			len += todo;
237873601329SEric Dumazet 			if (lastfrag &&
237973601329SEric Dumazet 			    skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
238073601329SEric Dumazet 			    skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
238173601329SEric Dumazet 						      skb_frag_size(lastfrag)) {
238273601329SEric Dumazet 				skb_frag_size_add(lastfrag, todo);
238373601329SEric Dumazet 				continue;
238473601329SEric Dumazet 			}
238573601329SEric Dumazet 			if (unlikely(nr_frags == MAX_SKB_FRAGS))
238673601329SEric Dumazet 				return -E2BIG;
238773601329SEric Dumazet 			skb_frag_page_copy(fragto, fragfrom);
238873601329SEric Dumazet 			skb_frag_off_copy(fragto, fragfrom);
238973601329SEric Dumazet 			skb_frag_size_set(fragto, todo);
239073601329SEric Dumazet 			nr_frags++;
239173601329SEric Dumazet 			lastfrag = fragto++;
239273601329SEric Dumazet 		}
239373601329SEric Dumazet 	}
239473601329SEric Dumazet commit:
239573601329SEric Dumazet 	WARN_ON_ONCE(len != probe_size);
239673601329SEric Dumazet 	for (i = 0; i < nr_frags; i++)
239773601329SEric Dumazet 		skb_frag_ref(to, i);
239873601329SEric Dumazet 
239973601329SEric Dumazet 	skb_shinfo(to)->nr_frags = nr_frags;
240073601329SEric Dumazet 	to->truesize += probe_size;
240173601329SEric Dumazet 	to->len += probe_size;
240273601329SEric Dumazet 	to->data_len += probe_size;
240373601329SEric Dumazet 	__skb_header_release(to);
240473601329SEric Dumazet 	return 0;
240573601329SEric Dumazet }
240673601329SEric Dumazet 
24071bede0a1SEric Dumazet /* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if
24081bede0a1SEric Dumazet  * all its payload was moved to another one (dst).
24091bede0a1SEric Dumazet  * Make sure to transfer tcp_flags, eor, and tstamp.
24101bede0a1SEric Dumazet  */
24111bede0a1SEric Dumazet static void tcp_eat_one_skb(struct sock *sk,
24121bede0a1SEric Dumazet 			    struct sk_buff *dst,
24131bede0a1SEric Dumazet 			    struct sk_buff *src)
24141bede0a1SEric Dumazet {
24151bede0a1SEric Dumazet 	TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags;
24161bede0a1SEric Dumazet 	TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor;
24171bede0a1SEric Dumazet 	tcp_skb_collapse_tstamp(dst, src);
24181bede0a1SEric Dumazet 	tcp_unlink_write_queue(src, sk);
24191bede0a1SEric Dumazet 	tcp_wmem_free_skb(sk, src);
24201bede0a1SEric Dumazet }
24211bede0a1SEric Dumazet 
24225d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
242367edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
242467edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
242567edfef7SAndi Kleen  * changes resulting in larger path MTUs.
242667edfef7SAndi Kleen  *
24275d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
24285d424d5aSJohn Heffner  *         1 if a probe was sent,
2429056834d9SIlpo Järvinen  *         -1 otherwise
2430056834d9SIlpo Järvinen  */
24315d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
24325d424d5aSJohn Heffner {
24335d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
243412a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
24355d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
24366b58e0a5SFan Du 	struct net *net = sock_net(sk);
24375d424d5aSJohn Heffner 	int probe_size;
243891cc17c0SIlpo Järvinen 	int size_needed;
243912a59abcSEric Dumazet 	int copy, len;
24405d424d5aSJohn Heffner 	int mss_now;
24416b58e0a5SFan Du 	int interval;
24425d424d5aSJohn Heffner 
24435d424d5aSJohn Heffner 	/* Not currently probing/verifying,
24445d424d5aSJohn Heffner 	 * not in recovery,
24455d424d5aSJohn Heffner 	 * have enough cwnd, and
244612a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
244712a59abcSEric Dumazet 	 */
244812a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
24495d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
24505d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
245140570375SEric Dumazet 		   tcp_snd_cwnd(tp) < 11 ||
245212a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
24535d424d5aSJohn Heffner 		return -1;
24545d424d5aSJohn Heffner 
24556b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
24566b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
24576b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
24586b58e0a5SFan Du 	 */
24590c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
24606b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
24616b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
246291cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
24636b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
246405cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
246505cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
246605cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
246705cbc0dbSFan Du 	 */
24686b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
246992c0aa41SKuniyuki Iwashima 	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
247005cbc0dbSFan Du 		/* Check whether enough time has elaplased for
247105cbc0dbSFan Du 		 * another round of probing.
247205cbc0dbSFan Du 		 */
247305cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
24745d424d5aSJohn Heffner 		return -1;
24755d424d5aSJohn Heffner 	}
24765d424d5aSJohn Heffner 
24775d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
24787f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
24795d424d5aSJohn Heffner 		return -1;
24805d424d5aSJohn Heffner 
248191cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
24825d424d5aSJohn Heffner 		return -1;
248390840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
24845d424d5aSJohn Heffner 		return 0;
24855d424d5aSJohn Heffner 
2486d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
248740570375SEric Dumazet 	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2488d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
24895d424d5aSJohn Heffner 			return -1;
24905d424d5aSJohn Heffner 		else
24915d424d5aSJohn Heffner 			return 0;
24925d424d5aSJohn Heffner 	}
24935d424d5aSJohn Heffner 
2494808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2495808cf9e3SIlya Lesokhin 		return -1;
2496808cf9e3SIlya Lesokhin 
24975d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
24985882efffSEric Dumazet 	nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false);
249951456b29SIan Morris 	if (!nskb)
25005d424d5aSJohn Heffner 		return -1;
250173601329SEric Dumazet 
250273601329SEric Dumazet 	/* build the payload, and be prepared to abort if this fails. */
250373601329SEric Dumazet 	if (tcp_clone_payload(sk, nskb, probe_size)) {
250471c299c7SJakub Kicinski 		tcp_skb_tsorted_anchor_cleanup(nskb);
250573601329SEric Dumazet 		consume_skb(nskb);
250673601329SEric Dumazet 		return -1;
250773601329SEric Dumazet 	}
2508ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, nskb->truesize);
25093ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
25105d424d5aSJohn Heffner 
2511fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
251241477662SJakub Kicinski 	skb_copy_decrypted(nskb, skb);
25135a369ca6SPaolo Abeni 	mptcp_skb_ext_copy(nskb, skb);
25145d424d5aSJohn Heffner 
25155d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
25165d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
25174de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
25185d424d5aSJohn Heffner 
251950c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
25202b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
252150c4817eSIlpo Järvinen 
25225d424d5aSJohn Heffner 	len = 0;
2523234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
25245d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
25255d424d5aSJohn Heffner 
25265d424d5aSJohn Heffner 		if (skb->len <= copy) {
25271bede0a1SEric Dumazet 			tcp_eat_one_skb(sk, nskb, skb);
25285d424d5aSJohn Heffner 		} else {
25294de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2530a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
25315d424d5aSJohn Heffner 			__pskb_trim_head(skb, copy);
25325bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss_now);
25335d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
25345d424d5aSJohn Heffner 		}
25355d424d5aSJohn Heffner 
25365d424d5aSJohn Heffner 		len += copy;
2537234b6860SIlpo Järvinen 
2538234b6860SIlpo Järvinen 		if (len >= probe_size)
2539234b6860SIlpo Järvinen 			break;
25405d424d5aSJohn Heffner 	}
25415bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
25425d424d5aSJohn Heffner 
25435d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
25447faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
25457faee5c0SEric Dumazet 	 */
25465d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
25475d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
25485d424d5aSJohn Heffner 		 * effectively two packets. */
254940570375SEric Dumazet 		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
255066f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
25515d424d5aSJohn Heffner 
25525d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
25530e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
25540e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
25555d424d5aSJohn Heffner 
25565d424d5aSJohn Heffner 		return 1;
25575d424d5aSJohn Heffner 	}
25585d424d5aSJohn Heffner 
25595d424d5aSJohn Heffner 	return -1;
25605d424d5aSJohn Heffner }
25615d424d5aSJohn Heffner 
2562864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2563218af599SEric Dumazet {
2564864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2565864e5c09SEric Dumazet 
2566864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2567864e5c09SEric Dumazet 		return false;
2568864e5c09SEric Dumazet 
2569864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2570864e5c09SEric Dumazet 		return false;
2571864e5c09SEric Dumazet 
2572864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2573864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2574864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2575864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2576864e5c09SEric Dumazet 		sock_hold(sk);
2577864e5c09SEric Dumazet 	}
2578864e5c09SEric Dumazet 	return true;
2579218af599SEric Dumazet }
2580218af599SEric Dumazet 
2581f921a4a5SEric Dumazet static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
2582f921a4a5SEric Dumazet {
2583f921a4a5SEric Dumazet 	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
2584f921a4a5SEric Dumazet 
2585f921a4a5SEric Dumazet 	/* No skb in the rtx queue. */
2586f921a4a5SEric Dumazet 	if (!node)
2587f921a4a5SEric Dumazet 		return true;
2588f921a4a5SEric Dumazet 
2589f921a4a5SEric Dumazet 	/* Only one skb in rtx queue. */
2590f921a4a5SEric Dumazet 	return !node->rb_left && !node->rb_right;
2591f921a4a5SEric Dumazet }
2592f921a4a5SEric Dumazet 
2593f9616c35SEric Dumazet /* TCP Small Queues :
2594f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2595f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2596f9616c35SEric Dumazet  * This allows for :
2597f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2598f9616c35SEric Dumazet  *  - faster recovery
2599f9616c35SEric Dumazet  *  - high rates
2600f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2601f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2602f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2603f9616c35SEric Dumazet  */
2604f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2605f9616c35SEric Dumazet 				  unsigned int factor)
2606f9616c35SEric Dumazet {
260776a9ebe8SEric Dumazet 	unsigned long limit;
2608f9616c35SEric Dumazet 
260976a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
261076a9ebe8SEric Dumazet 		      2 * skb->truesize,
261128b24f90SEric Dumazet 		      READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
2612c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
261376a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
26149fb90193SKuniyuki Iwashima 			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2615f9616c35SEric Dumazet 	limit <<= factor;
2616f9616c35SEric Dumazet 
2617a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2618a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
261928b24f90SEric Dumazet 		u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
262028b24f90SEric Dumazet 				  tcp_sk(sk)->tcp_tx_delay;
2621a842fe14SEric Dumazet 
2622a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2623a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2624a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2625a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2626a842fe14SEric Dumazet 		 */
2627a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2628a842fe14SEric Dumazet 		limit += extra_bytes;
2629a842fe14SEric Dumazet 	}
263014afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2631f921a4a5SEric Dumazet 		/* Always send skb if rtx queue is empty or has one skb.
263275eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
263375eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
263475eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
263575eefc6cSEric Dumazet 		 */
2636f921a4a5SEric Dumazet 		if (tcp_rtx_queue_empty_or_single_skb(sk))
263775eefc6cSEric Dumazet 			return false;
263875eefc6cSEric Dumazet 
26397aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2640f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2641f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2642f9616c35SEric Dumazet 		 * test again the condition.
2643f9616c35SEric Dumazet 		 */
2644f9616c35SEric Dumazet 		smp_mb__after_atomic();
2645ce8299b6SEric Dumazet 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2646f9616c35SEric Dumazet 			return true;
2647f9616c35SEric Dumazet 	}
2648f9616c35SEric Dumazet 	return false;
2649f9616c35SEric Dumazet }
2650f9616c35SEric Dumazet 
265105b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
265205b055e8SFrancis Yan {
2653628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2654efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
265505b055e8SFrancis Yan 
2656efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2657efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
265805b055e8SFrancis Yan 	tp->chrono_start = now;
265905b055e8SFrancis Yan 	tp->chrono_type = new;
266005b055e8SFrancis Yan }
266105b055e8SFrancis Yan 
266205b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
266305b055e8SFrancis Yan {
266405b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
266505b055e8SFrancis Yan 
266605b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
26670f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
26680f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
266905b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
267005b055e8SFrancis Yan 	 */
267105b055e8SFrancis Yan 	if (type > tp->chrono_type)
267205b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
267305b055e8SFrancis Yan }
267405b055e8SFrancis Yan 
267505b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
267605b055e8SFrancis Yan {
267705b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
267805b055e8SFrancis Yan 
26790f87230dSFrancis Yan 
26800f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
26810f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
26820f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
26830f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
26840f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
26850f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
26860f87230dSFrancis Yan 	 */
268775c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
268805b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
26890f87230dSFrancis Yan 	else if (type == tp->chrono_type)
26900f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
269105b055e8SFrancis Yan }
269205b055e8SFrancis Yan 
26938ee602c6SEric Dumazet /* First skb in the write queue is smaller than ideal packet size.
26948ee602c6SEric Dumazet  * Check if we can move payload from the second skb in the queue.
26958ee602c6SEric Dumazet  */
26968ee602c6SEric Dumazet static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount)
26978ee602c6SEric Dumazet {
26988ee602c6SEric Dumazet 	struct sk_buff *next_skb = skb->next;
26998ee602c6SEric Dumazet 	unsigned int nlen;
27008ee602c6SEric Dumazet 
27018ee602c6SEric Dumazet 	if (tcp_skb_is_last(sk, skb))
27028ee602c6SEric Dumazet 		return;
27038ee602c6SEric Dumazet 
27048ee602c6SEric Dumazet 	if (!tcp_skb_can_collapse(skb, next_skb))
27058ee602c6SEric Dumazet 		return;
27068ee602c6SEric Dumazet 
27078ee602c6SEric Dumazet 	nlen = min_t(u32, amount, next_skb->len);
27088ee602c6SEric Dumazet 	if (!nlen || !skb_shift(skb, next_skb, nlen))
27098ee602c6SEric Dumazet 		return;
27108ee602c6SEric Dumazet 
27118ee602c6SEric Dumazet 	TCP_SKB_CB(skb)->end_seq += nlen;
27128ee602c6SEric Dumazet 	TCP_SKB_CB(next_skb)->seq += nlen;
27138ee602c6SEric Dumazet 
27148ee602c6SEric Dumazet 	if (!next_skb->len) {
27151bede0a1SEric Dumazet 		/* In case FIN is set, we need to update end_seq */
27168ee602c6SEric Dumazet 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27171bede0a1SEric Dumazet 
27181bede0a1SEric Dumazet 		tcp_eat_one_skb(sk, skb, next_skb);
27198ee602c6SEric Dumazet 	}
27208ee602c6SEric Dumazet }
27218ee602c6SEric Dumazet 
27221da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
27231da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
27241da177e4SLinus Torvalds  * window for us.
27251da177e4SLinus Torvalds  *
2726f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2727f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2728f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2729f8269a49SIlpo Järvinen  *
27306ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
27316ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
27326ba8a3b1SNandita Dukkipati 
2733a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2734a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
27351da177e4SLinus Torvalds  */
2736a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2737d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
27381da177e4SLinus Torvalds {
27391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
274092df7b51SDavid S. Miller 	struct sk_buff *skb;
2741c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
274222555032SEric Dumazet 	u32 cwnd_quota, max_segs;
27435d424d5aSJohn Heffner 	int result;
27445615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
27451da177e4SLinus Torvalds 
2746c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
27475d424d5aSJohn Heffner 
2748ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2749d5dd9175SIlpo Järvinen 	if (!push_one) {
27505d424d5aSJohn Heffner 		/* Do MTU probing. */
2751d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2752d5dd9175SIlpo Järvinen 		if (!result) {
2753a2a385d6SEric Dumazet 			return false;
27545d424d5aSJohn Heffner 		} else if (result > 0) {
27555d424d5aSJohn Heffner 			sent_pkts = 1;
27565d424d5aSJohn Heffner 		}
2757d5dd9175SIlpo Järvinen 	}
27585d424d5aSJohn Heffner 
2759ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2760fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2761c8ac3774SHerbert Xu 		unsigned int limit;
27628ee602c6SEric Dumazet 		int missing_bytes;
2763c8ac3774SHerbert Xu 
276479861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
276579861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2766a1ac9c8aSMartin KaFai Lau 			tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2767a1ac9c8aSMartin KaFai Lau 			skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
276879861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2769bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
277079861919SEric Dumazet 			goto repair; /* Skip network transmission */
277179861919SEric Dumazet 		}
277279861919SEric Dumazet 
2773218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2774218af599SEric Dumazet 			break;
2775218af599SEric Dumazet 
277622555032SEric Dumazet 		cwnd_quota = tcp_cwnd_test(tp);
27776ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
27786ba8a3b1SNandita Dukkipati 			if (push_one == 2)
27796ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
27806ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
27816ba8a3b1SNandita Dukkipati 			else
2782b68e9f85SHerbert Xu 				break;
27836ba8a3b1SNandita Dukkipati 		}
27848ee602c6SEric Dumazet 		cwnd_quota = min(cwnd_quota, max_segs);
27858ee602c6SEric Dumazet 		missing_bytes = cwnd_quota * mss_now - skb->len;
27868ee602c6SEric Dumazet 		if (missing_bytes > 0)
27878ee602c6SEric Dumazet 			tcp_grow_skb(sk, skb, missing_bytes);
2788b68e9f85SHerbert Xu 
2789d5b38a71SEric Dumazet 		tso_segs = tcp_set_skb_tso_segs(skb, mss_now);
2790d5b38a71SEric Dumazet 
27915615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
27925615f886SFrancis Yan 			is_rwnd_limited = true;
2793b68e9f85SHerbert Xu 			break;
27945615f886SFrancis Yan 		}
2795b68e9f85SHerbert Xu 
2796d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2797aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2798aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2799aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2800aa93466bSDavid S. Miller 				break;
2801c1b4a7e6SDavid S. Miller 		} else {
2802ca8a2263SNeal Cardwell 			if (!push_one &&
2803605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2804f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2805aa93466bSDavid S. Miller 				break;
2806c1b4a7e6SDavid S. Miller 		}
2807aa93466bSDavid S. Miller 
2808605ad7f1SEric Dumazet 		limit = mss_now;
2809d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2810605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
28118ee602c6SEric Dumazet 						    cwnd_quota,
2812605ad7f1SEric Dumazet 						    nonagle);
2813605ad7f1SEric Dumazet 
2814605ad7f1SEric Dumazet 		if (skb->len > limit &&
281556483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2816605ad7f1SEric Dumazet 			break;
2817605ad7f1SEric Dumazet 
2818f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
281946d3ceabSEric Dumazet 			break;
2820c9eeec26SEric Dumazet 
28211f85e626SEric Dumazet 		/* Argh, we hit an empty skb(), presumably a thread
28221f85e626SEric Dumazet 		 * is sleeping in sendmsg()/sk_stream_wait_memory().
28231f85e626SEric Dumazet 		 * We do not want to send a pure-ack packet and have
28241f85e626SEric Dumazet 		 * a strange looking rtx queue with empty packet(s).
28251f85e626SEric Dumazet 		 */
28261f85e626SEric Dumazet 		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
28271f85e626SEric Dumazet 			break;
28281f85e626SEric Dumazet 
2829d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
28301da177e4SLinus Torvalds 			break;
28311da177e4SLinus Torvalds 
2832ec342325SAndrew Vagin repair:
28331da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
28341da177e4SLinus Torvalds 		 * This call will increment packets_out.
28351da177e4SLinus Torvalds 		 */
283666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
28371da177e4SLinus Torvalds 
28381da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2839a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2840d5dd9175SIlpo Järvinen 
2841d5dd9175SIlpo Järvinen 		if (push_one)
2842d5dd9175SIlpo Järvinen 			break;
28431da177e4SLinus Torvalds 	}
28441da177e4SLinus Torvalds 
28455615f886SFrancis Yan 	if (is_rwnd_limited)
28465615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
28475615f886SFrancis Yan 	else
28485615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
28495615f886SFrancis Yan 
285040570375SEric Dumazet 	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
2851299bcb55SNeal Cardwell 	if (likely(sent_pkts || is_cwnd_limited))
2852299bcb55SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2853299bcb55SNeal Cardwell 
2854aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2855684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2856684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
28576ba8a3b1SNandita Dukkipati 
28586ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
28596ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2860ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2861a2a385d6SEric Dumazet 		return false;
28621da177e4SLinus Torvalds 	}
286375c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
28646ba8a3b1SNandita Dukkipati }
28656ba8a3b1SNandita Dukkipati 
2866ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
28676ba8a3b1SNandita Dukkipati {
28686ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
28696ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
28701c2709cfSNeal Cardwell 	u32 timeout, timeout_us, rto_delta_us;
28712ae21cf5SEric Dumazet 	int early_retrans;
28726ba8a3b1SNandita Dukkipati 
28736ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
28746ba8a3b1SNandita Dukkipati 	 * finishes.
28756ba8a3b1SNandita Dukkipati 	 */
2876d983ea6fSEric Dumazet 	if (rcu_access_pointer(tp->fastopen_rsk))
28776ba8a3b1SNandita Dukkipati 		return false;
28786ba8a3b1SNandita Dukkipati 
287952e65865SKuniyuki Iwashima 	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
28806ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2881b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
28826ba8a3b1SNandita Dukkipati 	 */
28832ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2884bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2885b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2886b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
28876ba8a3b1SNandita Dukkipati 		return false;
28886ba8a3b1SNandita Dukkipati 
2889bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2890f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2891f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
28926ba8a3b1SNandita Dukkipati 	 */
2893bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
28941c2709cfSNeal Cardwell 		timeout_us = tp->srtt_us >> 2;
28956ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
28961c2709cfSNeal Cardwell 			timeout_us += tcp_rto_min_us(sk);
2897bb4d991aSYuchung Cheng 		else
28981c2709cfSNeal Cardwell 			timeout_us += TCP_TIMEOUT_MIN_US;
28991c2709cfSNeal Cardwell 		timeout = usecs_to_jiffies(timeout_us);
2900bb4d991aSYuchung Cheng 	} else {
2901bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2902bb4d991aSYuchung Cheng 	}
29036ba8a3b1SNandita Dukkipati 
2904a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2905ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2906ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2907ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2908a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2909a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
29106ba8a3b1SNandita Dukkipati 
29118dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
29126ba8a3b1SNandita Dukkipati 	return true;
29136ba8a3b1SNandita Dukkipati }
29146ba8a3b1SNandita Dukkipati 
29151f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
29161f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
29171f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
29181f3279aeSEric Dumazet  */
2919f4dae54eSEric Dumazet static bool skb_still_in_host_queue(struct sock *sk,
29201f3279aeSEric Dumazet 				    const struct sk_buff *skb)
29211f3279aeSEric Dumazet {
292239bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2923f4dae54eSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2924f4dae54eSEric Dumazet 		smp_mb__after_atomic();
2925f4dae54eSEric Dumazet 		if (skb_fclone_busy(sk, skb)) {
2926c10d9310SEric Dumazet 			NET_INC_STATS(sock_net(sk),
29271f3279aeSEric Dumazet 				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
29281f3279aeSEric Dumazet 			return true;
29291f3279aeSEric Dumazet 		}
2930f4dae54eSEric Dumazet 	}
29311f3279aeSEric Dumazet 	return false;
29321f3279aeSEric Dumazet }
29331f3279aeSEric Dumazet 
2934b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
29356ba8a3b1SNandita Dukkipati  * retransmit the last segment.
29366ba8a3b1SNandita Dukkipati  */
29376ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
29386ba8a3b1SNandita Dukkipati {
29399b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
29406ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
29416ba8a3b1SNandita Dukkipati 	int pcount;
29426ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
29436ba8a3b1SNandita Dukkipati 
294476be93fcSYuchung Cheng 	/* At most one outstanding TLP */
294576be93fcSYuchung Cheng 	if (tp->tlp_high_seq)
294676be93fcSYuchung Cheng 		goto rearm_timer;
294776be93fcSYuchung Cheng 
294876be93fcSYuchung Cheng 	tp->tlp_retrans = 0;
2949b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
295075c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2951b340b264SYuchung Cheng 		pcount = tp->packets_out;
2952b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2953b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2954b340b264SYuchung Cheng 			goto probe_sent;
29556ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29566ba8a3b1SNandita Dukkipati 	}
295775c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2958b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2959b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2960b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
296140570375SEric Dumazet 			  tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
2962b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2963b2b7af86SYuchung Cheng 		return;
2964b2b7af86SYuchung Cheng 	}
29656ba8a3b1SNandita Dukkipati 
29661f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
29671f3279aeSEric Dumazet 		goto rearm_timer;
29681f3279aeSEric Dumazet 
29696ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
29706ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
29716ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29726ba8a3b1SNandita Dukkipati 
29736ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
297475c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
297575c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
29766cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
29776ba8a3b1SNandita Dukkipati 			goto rearm_timer;
297875c119afSEric Dumazet 		skb = skb_rb_next(skb);
29796ba8a3b1SNandita Dukkipati 	}
29806ba8a3b1SNandita Dukkipati 
29816ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
29826ba8a3b1SNandita Dukkipati 		goto rearm_timer;
29836ba8a3b1SNandita Dukkipati 
298410d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2985b340b264SYuchung Cheng 		goto rearm_timer;
29866ba8a3b1SNandita Dukkipati 
298776be93fcSYuchung Cheng 	tp->tlp_retrans = 1;
298876be93fcSYuchung Cheng 
298976be93fcSYuchung Cheng probe_sent:
29909b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
29919b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
29929b717a8dSNandita Dukkipati 
2993c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2994fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2995fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2996b340b264SYuchung Cheng rearm_timer:
2997fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
29981da177e4SLinus Torvalds }
29991da177e4SLinus Torvalds 
3000a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
3001a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
3002a762a980SDavid S. Miller  * The socket must be locked by the caller.
3003a762a980SDavid S. Miller  */
30049e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
30059e412ba7SIlpo Järvinen 			       int nonagle)
3006a762a980SDavid S. Miller {
3007726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
3008726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
3009726e07a8SIlpo Järvinen 	 * all will be happy.
3010726e07a8SIlpo Järvinen 	 */
3011726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
3012726e07a8SIlpo Järvinen 		return;
3013726e07a8SIlpo Järvinen 
301499a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
30157450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
30169e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
3017a762a980SDavid S. Miller }
3018a762a980SDavid S. Miller 
3019c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
3020c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
3021c1b4a7e6SDavid S. Miller  */
3022c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
3023c1b4a7e6SDavid S. Miller {
3024fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
3025c1b4a7e6SDavid S. Miller 
3026c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
3027c1b4a7e6SDavid S. Miller 
3028d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
3029c1b4a7e6SDavid S. Miller }
3030c1b4a7e6SDavid S. Miller 
30311da177e4SLinus Torvalds /* This function returns the amount that we can raise the
30321da177e4SLinus Torvalds  * usable window based on the following constraints
30331da177e4SLinus Torvalds  *
30341da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
30351da177e4SLinus Torvalds  * 2. We limit memory per socket
30361da177e4SLinus Torvalds  *
30371da177e4SLinus Torvalds  * RFC 1122:
30381da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
30391da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
30401da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
30411da177e4SLinus Torvalds  *
30421da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
30431da177e4SLinus Torvalds  * it at least MSS bytes.
30441da177e4SLinus Torvalds  *
30451da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
30461da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
30471da177e4SLinus Torvalds  *
30481da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
30491da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
30501da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
30511da177e4SLinus Torvalds  * window to always advance by a single byte.
30521da177e4SLinus Torvalds  *
30531da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
30541da177e4SLinus Torvalds  * then this will not be a problem.
30551da177e4SLinus Torvalds  *
30561da177e4SLinus Torvalds  * BSD seems to make the following compromise:
30571da177e4SLinus Torvalds  *
30581da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
30591da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
30601da177e4SLinus Torvalds  *	then set the window to 0.
30611da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
30621da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
30631da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
30641da177e4SLinus Torvalds  *
30651da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
30661da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
30671da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
30681da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
30691da177e4SLinus Torvalds  * because the pipeline is full.
30701da177e4SLinus Torvalds  *
30711da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
30721da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
30731da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
30741da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
30751da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
30761da177e4SLinus Torvalds  *
30771da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
30781da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
30791da177e4SLinus Torvalds  *
30801da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
30811da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
30821da177e4SLinus Torvalds  */
30831da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
30841da177e4SLinus Torvalds {
3085463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
30861da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3087b650d953Smfreemon@cloudflare.com 	struct net *net = sock_net(sk);
3088caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
30891da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
30901da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
30911da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
30921da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
30931da177e4SLinus Torvalds 	 */
3094463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
30951da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
309686c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
3097071c8ed6SFlorian Westphal 	int full_space, window;
3098071c8ed6SFlorian Westphal 
3099071c8ed6SFlorian Westphal 	if (sk_is_mptcp(sk))
3100071c8ed6SFlorian Westphal 		mptcp_space(sk, &free_space, &allowed_space);
3101071c8ed6SFlorian Westphal 
3102071c8ed6SFlorian Westphal 	full_space = min_t(int, tp->window_clamp, allowed_space);
31031da177e4SLinus Torvalds 
310406425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
31051da177e4SLinus Torvalds 		mss = full_space;
310606425c30SEric Dumazet 		if (mss <= 0)
310706425c30SEric Dumazet 			return 0;
310806425c30SEric Dumazet 	}
3109b650d953Smfreemon@cloudflare.com 
3110b650d953Smfreemon@cloudflare.com 	/* Only allow window shrink if the sysctl is enabled and we have
3111b650d953Smfreemon@cloudflare.com 	 * a non-zero scaling factor in effect.
3112b650d953Smfreemon@cloudflare.com 	 */
3113b650d953Smfreemon@cloudflare.com 	if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
3114b650d953Smfreemon@cloudflare.com 		goto shrink_window_allowed;
3115b650d953Smfreemon@cloudflare.com 
3116b650d953Smfreemon@cloudflare.com 	/* do not allow window to shrink */
3117b650d953Smfreemon@cloudflare.com 
3118b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
3119463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
31201da177e4SLinus Torvalds 
3121b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
3122053f3684SWei Wang 			tcp_adjust_rcv_ssthresh(sk);
31231da177e4SLinus Torvalds 
312486c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
312586c1a045SFlorian Westphal 		 * increase it due to wscale.
312686c1a045SFlorian Westphal 		 */
312786c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
312886c1a045SFlorian Westphal 
312986c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
313086c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
313186c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
313286c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
313386c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
313486c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
313586c1a045SFlorian Westphal 		 */
313686c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
31371da177e4SLinus Torvalds 			return 0;
31381da177e4SLinus Torvalds 	}
31391da177e4SLinus Torvalds 
31401da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
31411da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
31421da177e4SLinus Torvalds 
31431da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
31441da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
31451da177e4SLinus Torvalds 	 */
31461da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
31471da177e4SLinus Torvalds 		window = free_space;
31481da177e4SLinus Torvalds 
31491da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
31501da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
31511da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
31521da177e4SLinus Torvalds 		 */
31531935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
31541da177e4SLinus Torvalds 	} else {
31551935299dSGao Feng 		window = tp->rcv_wnd;
31561da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
31571da177e4SLinus Torvalds 		 * Window clamp already applied above.
31581da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
31591da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
31601da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
31611da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
31621da177e4SLinus Torvalds 		 * is too small.
31631da177e4SLinus Torvalds 		 */
31641da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
31651935299dSGao Feng 			window = rounddown(free_space, mss);
316684565070SJohn Heffner 		else if (mss == full_space &&
3167b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
316884565070SJohn Heffner 			window = free_space;
31691da177e4SLinus Torvalds 	}
31701da177e4SLinus Torvalds 
31711da177e4SLinus Torvalds 	return window;
3172b650d953Smfreemon@cloudflare.com 
3173b650d953Smfreemon@cloudflare.com shrink_window_allowed:
3174b650d953Smfreemon@cloudflare.com 	/* new window should always be an exact multiple of scaling factor */
3175b650d953Smfreemon@cloudflare.com 	free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3176b650d953Smfreemon@cloudflare.com 
3177b650d953Smfreemon@cloudflare.com 	if (free_space < (full_space >> 1)) {
3178b650d953Smfreemon@cloudflare.com 		icsk->icsk_ack.quick = 0;
3179b650d953Smfreemon@cloudflare.com 
3180b650d953Smfreemon@cloudflare.com 		if (tcp_under_memory_pressure(sk))
3181b650d953Smfreemon@cloudflare.com 			tcp_adjust_rcv_ssthresh(sk);
3182b650d953Smfreemon@cloudflare.com 
3183b650d953Smfreemon@cloudflare.com 		/* if free space is too low, return a zero window */
3184b650d953Smfreemon@cloudflare.com 		if (free_space < (allowed_space >> 4) || free_space < mss ||
3185b650d953Smfreemon@cloudflare.com 			free_space < (1 << tp->rx_opt.rcv_wscale))
3186b650d953Smfreemon@cloudflare.com 			return 0;
3187b650d953Smfreemon@cloudflare.com 	}
3188b650d953Smfreemon@cloudflare.com 
3189b650d953Smfreemon@cloudflare.com 	if (free_space > tp->rcv_ssthresh) {
3190b650d953Smfreemon@cloudflare.com 		free_space = tp->rcv_ssthresh;
3191b650d953Smfreemon@cloudflare.com 		/* new window should always be an exact multiple of scaling factor
3192b650d953Smfreemon@cloudflare.com 		 *
3193b650d953Smfreemon@cloudflare.com 		 * For this case, we ALIGN "up" (increase free_space) because
3194b650d953Smfreemon@cloudflare.com 		 * we know free_space is not zero here, it has been reduced from
3195b650d953Smfreemon@cloudflare.com 		 * the memory-based limit, and rcv_ssthresh is not a hard limit
3196b650d953Smfreemon@cloudflare.com 		 * (unlike sk_rcvbuf).
3197b650d953Smfreemon@cloudflare.com 		 */
3198b650d953Smfreemon@cloudflare.com 		free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
3199b650d953Smfreemon@cloudflare.com 	}
3200b650d953Smfreemon@cloudflare.com 
3201b650d953Smfreemon@cloudflare.com 	return free_space;
32021da177e4SLinus Torvalds }
32031da177e4SLinus Torvalds 
3204cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3205082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
3206082ac2d5SMartin KaFai Lau {
32070a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
32080a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
32090a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
3210082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
3211082ac2d5SMartin KaFai Lau 
32120a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3213082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
32142de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
32152de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
3216082ac2d5SMartin KaFai Lau 	}
3217082ac2d5SMartin KaFai Lau }
3218082ac2d5SMartin KaFai Lau 
32194a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
3220f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
32211da177e4SLinus Torvalds {
32221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
322375c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
322413dde04fSWei Yongjun 	int next_skb_size;
32251da177e4SLinus Torvalds 
3226058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
32271da177e4SLinus Torvalds 
3228058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
32291da177e4SLinus Torvalds 
3230bd446314SEric Dumazet 	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3231f8071cdeSEric Dumazet 		return false;
3232bd446314SEric Dumazet 
32332b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
3234a6963a6bSIlpo Järvinen 
32351da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
32361da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
32371da177e4SLinus Torvalds 
3238e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
32394de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
32401da177e4SLinus Torvalds 
32411da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
32421da177e4SLinus Torvalds 	 * packet counting does not break.
32431da177e4SLinus Torvalds 	 */
32444828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3245a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3246b7689205SIlpo Järvinen 
3247b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
3248ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
3249ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
3250ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
3251b7689205SIlpo Järvinen 
3252797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3253797108d1SIlpo Järvinen 
3254082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
3255082ac2d5SMartin KaFai Lau 
325675c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3257f8071cdeSEric Dumazet 	return true;
32581da177e4SLinus Torvalds }
32591da177e4SLinus Torvalds 
326067edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
3261a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
32624a17fc3aSIlpo Järvinen {
32634a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
3264a2a385d6SEric Dumazet 		return false;
32654a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
3266a2a385d6SEric Dumazet 		return false;
32672331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
32684a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3269a2a385d6SEric Dumazet 		return false;
32704a17fc3aSIlpo Järvinen 
3271a2a385d6SEric Dumazet 	return true;
32724a17fc3aSIlpo Järvinen }
32734a17fc3aSIlpo Järvinen 
327467edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
327567edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
327667edfef7SAndi Kleen  */
32774a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
32784a17fc3aSIlpo Järvinen 				     int space)
32794a17fc3aSIlpo Järvinen {
32804a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32814a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
3282a2a385d6SEric Dumazet 	bool first = true;
32834a17fc3aSIlpo Järvinen 
32841a63cb91SKuniyuki Iwashima 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
32854a17fc3aSIlpo Järvinen 		return;
32864de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
32874a17fc3aSIlpo Järvinen 		return;
32884a17fc3aSIlpo Järvinen 
328975c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
32904a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
32914a17fc3aSIlpo Järvinen 			break;
32924a17fc3aSIlpo Järvinen 
329385712484SMat Martineau 		if (!tcp_skb_can_collapse(to, skb))
3294a643b5d4SMartin KaFai Lau 			break;
3295a643b5d4SMartin KaFai Lau 
32964a17fc3aSIlpo Järvinen 		space -= skb->len;
32974a17fc3aSIlpo Järvinen 
32984a17fc3aSIlpo Järvinen 		if (first) {
3299a2a385d6SEric Dumazet 			first = false;
33004a17fc3aSIlpo Järvinen 			continue;
33014a17fc3aSIlpo Järvinen 		}
33024a17fc3aSIlpo Järvinen 
33034a17fc3aSIlpo Järvinen 		if (space < 0)
33044a17fc3aSIlpo Järvinen 			break;
33054a17fc3aSIlpo Järvinen 
33064a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
33074a17fc3aSIlpo Järvinen 			break;
33084a17fc3aSIlpo Järvinen 
3309f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
3310f8071cdeSEric Dumazet 			break;
33114a17fc3aSIlpo Järvinen 	}
33124a17fc3aSIlpo Järvinen }
33134a17fc3aSIlpo Järvinen 
33141da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
33151da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
33161da177e4SLinus Torvalds  * error occurred which prevented the send.
33171da177e4SLinus Torvalds  */
331810d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
33191da177e4SLinus Torvalds {
33205d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
332110d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
33227d227cd2SSridhar Samudrala 	unsigned int cur_mss;
332310d3be56SEric Dumazet 	int diff, len, err;
3324536a6c8eSYonglong Li 	int avail_wnd;
332510d3be56SEric Dumazet 
332610d3be56SEric Dumazet 	/* Inconclusive MTU probe */
332710d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
33285d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
33295d424d5aSJohn Heffner 
33301f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
33311f3279aeSEric Dumazet 		return -EBUSY;
33321f3279aeSEric Dumazet 
3333f99cd562SDong Chenchen start:
33341da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3335f99cd562SDong Chenchen 		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3336f99cd562SDong Chenchen 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
3337f99cd562SDong Chenchen 			TCP_SKB_CB(skb)->seq++;
3338f99cd562SDong Chenchen 			goto start;
3339f99cd562SDong Chenchen 		}
33407f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
33417f582b24SEric Dumazet 			WARN_ON_ONCE(1);
33427f582b24SEric Dumazet 			return -EINVAL;
33437f582b24SEric Dumazet 		}
33441da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
33451da177e4SLinus Torvalds 			return -ENOMEM;
33461da177e4SLinus Torvalds 	}
33471da177e4SLinus Torvalds 
33487d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
33497d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
33507d227cd2SSridhar Samudrala 
33510c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
3352536a6c8eSYonglong Li 	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
33537d227cd2SSridhar Samudrala 
33541da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
33551da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
33561da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
3357536a6c8eSYonglong Li 	 * our retransmit of one segment serves as a zero window probe.
33581da177e4SLinus Torvalds 	 */
3359536a6c8eSYonglong Li 	if (avail_wnd <= 0) {
3360536a6c8eSYonglong Li 		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
33611da177e4SLinus Torvalds 			return -EAGAIN;
3362536a6c8eSYonglong Li 		avail_wnd = cur_mss;
3363536a6c8eSYonglong Li 	}
33641da177e4SLinus Torvalds 
336510d3be56SEric Dumazet 	len = cur_mss * segs;
3366536a6c8eSYonglong Li 	if (len > avail_wnd) {
3367536a6c8eSYonglong Li 		len = rounddown(avail_wnd, cur_mss);
3368536a6c8eSYonglong Li 		if (!len)
3369536a6c8eSYonglong Li 			len = avail_wnd;
3370536a6c8eSYonglong Li 	}
337110d3be56SEric Dumazet 	if (skb->len > len) {
337275c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
337375c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
33741da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
337502276f3cSIlpo Järvinen 	} else {
3376c4777efaSEric Dumazet 		if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
3377c52e2421SEric Dumazet 			return -ENOMEM;
337810d3be56SEric Dumazet 
337910d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
338010d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
338110d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
338210d3be56SEric Dumazet 		if (diff)
338310d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
3384536a6c8eSYonglong Li 		avail_wnd = min_t(int, avail_wnd, cur_mss);
3385536a6c8eSYonglong Li 		if (skb->len < avail_wnd)
3386536a6c8eSYonglong Li 			tcp_retrans_try_collapse(sk, skb, avail_wnd);
33871da177e4SLinus Torvalds 	}
33881da177e4SLinus Torvalds 
338949213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
339049213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
339149213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
339249213555SDaniel Borkmann 
3393678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
3394678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
3395678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3396678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3397678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3398678550c6SYuchung Cheng 	tp->total_retrans += segs;
3399fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
3400678550c6SYuchung Cheng 
340150bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
340250bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
340350bceae9SThomas Graf 	 * beyond what csum_start can cover.
340450bceae9SThomas Graf 	 */
340550bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
340650bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
340710a81980SEric Dumazet 		struct sk_buff *nskb;
340810a81980SEric Dumazet 
3409e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
341010a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
341107f8e4d0SFlorian Westphal 			if (nskb) {
341207f8e4d0SFlorian Westphal 				nskb->dev = NULL;
341307f8e4d0SFlorian Westphal 				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
341407f8e4d0SFlorian Westphal 			} else {
341507f8e4d0SFlorian Westphal 				err = -ENOBUFS;
341607f8e4d0SFlorian Westphal 			}
3417e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
3418e2080072SEric Dumazet 
34195889e2c0SYousuk Seung 		if (!err) {
3420a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
34215889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
34225889e2c0SYousuk Seung 		}
3423117632e6SEric Dumazet 	} else {
3424c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3425117632e6SEric Dumazet 	}
3426c84a5711SYuchung Cheng 
3427a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3428a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3429a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
3430a31ad29eSLawrence Brakmo 
3431fc9f3501SEric Dumazet 	if (likely(!err)) {
3432e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
3433678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
3434ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3435fc9f3501SEric Dumazet 	}
34362bf90a57SPhilo Lu 
34372bf90a57SPhilo Lu 	/* To avoid taking spuriously low RTT samples based on a timestamp
34382bf90a57SPhilo Lu 	 * for a transmit that never happened, always mark EVER_RETRANS
34392bf90a57SPhilo Lu 	 */
34402bf90a57SPhilo Lu 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
34412bf90a57SPhilo Lu 
3442c84a5711SYuchung Cheng 	return err;
344393b174adSYuchung Cheng }
344493b174adSYuchung Cheng 
344510d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
344693b174adSYuchung Cheng {
344793b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
344810d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
34491da177e4SLinus Torvalds 
34501da177e4SLinus Torvalds 	if (err == 0) {
34511da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
34521da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3453e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
34541da177e4SLinus Torvalds 		}
34551da177e4SLinus Torvalds #endif
34561da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
34571da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
34587ae18975SYuchung Cheng 	}
34591da177e4SLinus Torvalds 
34607ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
34611da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
3462614e8316SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
34631da177e4SLinus Torvalds 
34646e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
34656e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
34666e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
34671da177e4SLinus Torvalds 	return err;
34681da177e4SLinus Torvalds }
34691da177e4SLinus Torvalds 
34701da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
34711da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
34721da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
34731da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
34741da177e4SLinus Torvalds  */
34751da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
34761da177e4SLinus Torvalds {
34776687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3478b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
34791da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3480916e6d1aSEric Dumazet 	bool rearm_timer = false;
3481840a3cbeSYuchung Cheng 	u32 max_segs;
348261eb55f4SIlpo Järvinen 	int mib_idx;
34836a438bbeSStephen Hemminger 
348445e77d31SIlpo Järvinen 	if (!tp->packets_out)
348545e77d31SIlpo Järvinen 		return;
348645e77d31SIlpo Järvinen 
348775c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3488b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3489ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
349075c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3491dca0aaf8SEric Dumazet 		__u8 sacked;
349210d3be56SEric Dumazet 		int segs;
34931da177e4SLinus Torvalds 
3494218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3495218af599SEric Dumazet 			break;
3496218af599SEric Dumazet 
34976a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
349851456b29SIan Morris 		if (!hole)
34996a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
35006a438bbeSStephen Hemminger 
350140570375SEric Dumazet 		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
350210d3be56SEric Dumazet 		if (segs <= 0)
3503916e6d1aSEric Dumazet 			break;
3504dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3505a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3506a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3507a3d2e9f8SEric Dumazet 		 */
3508a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
35090e1c54c2SIlpo Järvinen 
3510840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3511006f582cSIlpo Järvinen 			break;
35120e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
351351456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
35140e1c54c2SIlpo Järvinen 				hole = skb;
351561eb55f4SIlpo Järvinen 			continue;
35161da177e4SLinus Torvalds 
35170e1c54c2SIlpo Järvinen 		} else {
35180e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
35190e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
35200e1c54c2SIlpo Järvinen 			else
35210e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
35220e1c54c2SIlpo Järvinen 		}
35230e1c54c2SIlpo Järvinen 
35240e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
352561eb55f4SIlpo Järvinen 			continue;
352640b215e5SPavel Emelyanov 
3527f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3528916e6d1aSEric Dumazet 			break;
3529f9616c35SEric Dumazet 
353010d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
3531916e6d1aSEric Dumazet 			break;
353224ab6becSYuchung Cheng 
3533de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
35341da177e4SLinus Torvalds 
3535684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3536a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3537a262f0cdSNandita Dukkipati 
353875c119afSEric Dumazet 		if (skb == rtx_head &&
353957dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3540916e6d1aSEric Dumazet 			rearm_timer = true;
3541916e6d1aSEric Dumazet 
3542916e6d1aSEric Dumazet 	}
3543916e6d1aSEric Dumazet 	if (rearm_timer)
35443f80e08fSEric Dumazet 		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35453f421baaSArnaldo Carvalho de Melo 				     inet_csk(sk)->icsk_rto,
35468dc242adSEric Dumazet 				     TCP_RTO_MAX);
35471da177e4SLinus Torvalds }
35481da177e4SLinus Torvalds 
3549d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3550d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3551845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3552845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3553a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3554a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3555d83769a5SEric Dumazet  */
3556a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3557d83769a5SEric Dumazet {
3558c4ee1185SEric Dumazet 	int delta, amt;
3559d83769a5SEric Dumazet 
3560c4ee1185SEric Dumazet 	delta = size - sk->sk_forward_alloc;
3561c4ee1185SEric Dumazet 	if (delta <= 0)
3562d83769a5SEric Dumazet 		return;
3563c4ee1185SEric Dumazet 	amt = sk_mem_pages(delta);
35645e6300e7SEric Dumazet 	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3565e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3566e805605cSJohannes Weiner 
3567baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
35684b1327beSWei Wang 		mem_cgroup_charge_skmem(sk->sk_memcg, amt,
35694b1327beSWei Wang 					gfp_memcg_charge() | __GFP_NOFAIL);
3570d83769a5SEric Dumazet }
3571d83769a5SEric Dumazet 
3572845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3573845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
35741da177e4SLinus Torvalds  */
35751da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
35761da177e4SLinus Torvalds {
3577ee2aabd3SEric Dumazet 	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
35781da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35791da177e4SLinus Torvalds 
3580845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3581845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3582845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3583845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
35841da177e4SLinus Torvalds 	 */
3585ee2aabd3SEric Dumazet 	tskb = tail;
358675c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
358775c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
358875c119afSEric Dumazet 
358975c119afSEric Dumazet 	if (tskb) {
3590845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3591845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
35921da177e4SLinus Torvalds 		tp->write_seq++;
3593ee2aabd3SEric Dumazet 		if (!tail) {
3594845704a5SEric Dumazet 			/* This means tskb was already sent.
3595845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3596845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3597845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3598845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3599845704a5SEric Dumazet 			 */
3600e0d694d6SEric Dumazet 			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3601845704a5SEric Dumazet 			return;
3602845704a5SEric Dumazet 		}
36031da177e4SLinus Torvalds 	} else {
360494062790SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER,
360594062790SEric Dumazet 				       sk_gfp_mask(sk, GFP_ATOMIC |
360694062790SEric Dumazet 						       __GFP_NOWARN));
3607d1edc085SColin Ian King 		if (unlikely(!skb))
3608845704a5SEric Dumazet 			return;
3609d1edc085SColin Ian King 
3610e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3611d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3612a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
36131da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3614e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3615a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
36161da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
36171da177e4SLinus Torvalds 	}
3618845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
36191da177e4SLinus Torvalds }
36201da177e4SLinus Torvalds 
36211da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
36221da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
36231da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
362465bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
36251da177e4SLinus Torvalds  */
36265691276bSJason Xing void tcp_send_active_reset(struct sock *sk, gfp_t priority,
36275691276bSJason Xing 			   enum sk_rst_reason reason)
36281da177e4SLinus Torvalds {
36291da177e4SLinus Torvalds 	struct sk_buff *skb;
36301da177e4SLinus Torvalds 
36317cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
36327cc2b043SGao Feng 
36331da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
36341da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
36351da177e4SLinus Torvalds 	if (!skb) {
36364e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
36371da177e4SLinus Torvalds 		return;
36381da177e4SLinus Torvalds 	}
36391da177e4SLinus Torvalds 
36401da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36411da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3642e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3643a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
36449a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
36451da177e4SLinus Torvalds 	/* Send it off. */
3646dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
36474e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3648c24b14c4SSong Liu 
3649c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3650c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3651c24b14c4SSong Liu 	 */
3652b533fb9cSJason Xing 	trace_tcp_send_reset(sk, NULL, SK_RST_REASON_NOT_SPECIFIED);
36531da177e4SLinus Torvalds }
36541da177e4SLinus Torvalds 
365567edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
365667edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
36571da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
36581da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
36591da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
36601da177e4SLinus Torvalds  */
36611da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
36621da177e4SLinus Torvalds {
36631da177e4SLinus Torvalds 	struct sk_buff *skb;
36641da177e4SLinus Torvalds 
366575c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
366651456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
366775c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
36681da177e4SLinus Torvalds 		return -EFAULT;
36691da177e4SLinus Torvalds 	}
36704de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
36711da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3672e2080072SEric Dumazet 			struct sk_buff *nskb;
3673e2080072SEric Dumazet 
3674e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3675e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3676e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
367751456b29SIan Morris 			if (!nskb)
36781da177e4SLinus Torvalds 				return -ENOMEM;
3679e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
36802bec445fSEric Dumazet 			tcp_highest_sack_replace(sk, skb, nskb);
368175c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3682f4a775d1SEric Dumazet 			__skb_header_release(nskb);
368375c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3684ab4e846aSEric Dumazet 			sk_wmem_queued_add(sk, nskb->truesize);
36853ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
36861da177e4SLinus Torvalds 			skb = nskb;
36871da177e4SLinus Torvalds 		}
36881da177e4SLinus Torvalds 
36894de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3690735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
36911da177e4SLinus Torvalds 	}
3692dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
36931da177e4SLinus Torvalds }
36941da177e4SLinus Torvalds 
36954aea39c1SEric Dumazet /**
3696331fca43SMartin KaFai Lau  * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3697331fca43SMartin KaFai Lau  * @sk: listener socket
3698331fca43SMartin KaFai Lau  * @dst: dst entry attached to the SYNACK. It is consumed and caller
3699331fca43SMartin KaFai Lau  *       should not use it again.
3700331fca43SMartin KaFai Lau  * @req: request_sock pointer
3701331fca43SMartin KaFai Lau  * @foc: cookie for tcp fast open
3702331fca43SMartin KaFai Lau  * @synack_type: Type of synack to prepare
3703331fca43SMartin KaFai Lau  * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
37044aea39c1SEric Dumazet  */
37055d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3706e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3707ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3708331fca43SMartin KaFai Lau 				enum tcp_synack_type synack_type,
3709331fca43SMartin KaFai Lau 				struct sk_buff *syn_skb)
37101da177e4SLinus Torvalds {
37112e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
37125d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
37135d062de7SEric Dumazet 	struct tcp_out_options opts;
37141e03d32bSDmitry Safonov 	struct tcp_key key = {};
37155d062de7SEric Dumazet 	struct sk_buff *skb;
3716bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
37175d062de7SEric Dumazet 	struct tcphdr *th;
3718f5fff5dcSTom Quetchenbach 	int mss;
3719a842fe14SEric Dumazet 	u64 now;
37201da177e4SLinus Torvalds 
3721ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
37224aea39c1SEric Dumazet 	if (unlikely(!skb)) {
37234aea39c1SEric Dumazet 		dst_release(dst);
37241da177e4SLinus Torvalds 		return NULL;
37254aea39c1SEric Dumazet 	}
37261da177e4SLinus Torvalds 	/* Reserve space for headers. */
37271da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
37281da177e4SLinus Torvalds 
3729b3d05147SEric Dumazet 	switch (synack_type) {
3730b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
37319e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3732b3d05147SEric Dumazet 		break;
3733b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3734b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3735b3d05147SEric Dumazet 		 * to avoid false sharing.
3736b3d05147SEric Dumazet 		 */
3737b3d05147SEric Dumazet 		break;
3738b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3739ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3740ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3741ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3742ca6fb065SEric Dumazet 		 */
3743ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3744b3d05147SEric Dumazet 		break;
3745ca6fb065SEric Dumazet 	}
37464aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
37471da177e4SLinus Torvalds 
37483541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3749f5fff5dcSTom Quetchenbach 
375033ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3751a842fe14SEric Dumazet 	now = tcp_clock_ns();
37528b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
3753f8ace8d9SFlorian Westphal 	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3754a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3755a1ac9c8aSMartin KaFai Lau 				      true);
37568b5f12d0SFlorian Westphal 	else
37578b5f12d0SFlorian Westphal #endif
37589e450c1eSYuchung Cheng 	{
3759a1ac9c8aSMartin KaFai Lau 		skb_set_delivery_time(skb, now, true);
37609e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
37619e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
37629e450c1eSYuchung Cheng 	}
376380f03e27SEric Dumazet 
37649427c6aaSDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
376580f03e27SEric Dumazet 	rcu_read_lock();
37669427c6aaSDmitry Safonov #endif
37679427c6aaSDmitry Safonov 	if (tcp_rsk_used_ao(req)) {
37689427c6aaSDmitry Safonov #ifdef CONFIG_TCP_AO
37699427c6aaSDmitry Safonov 		struct tcp_ao_key *ao_key = NULL;
37709427c6aaSDmitry Safonov 		u8 keyid = tcp_rsk(req)->ao_keyid;
37719427c6aaSDmitry Safonov 
37729427c6aaSDmitry Safonov 		ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
37739427c6aaSDmitry Safonov 							    keyid, -1);
37749427c6aaSDmitry Safonov 		/* If there is no matching key - avoid sending anything,
37759427c6aaSDmitry Safonov 		 * especially usigned segments. It could try harder and lookup
37769427c6aaSDmitry Safonov 		 * for another peer-matching key, but the peer has requested
37779427c6aaSDmitry Safonov 		 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
37789427c6aaSDmitry Safonov 		 */
37799396c4eeSDmitry Safonov 		if (unlikely(!ao_key)) {
37809427c6aaSDmitry Safonov 			rcu_read_unlock();
37819427c6aaSDmitry Safonov 			kfree_skb(skb);
37829396c4eeSDmitry Safonov 			net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
37839396c4eeSDmitry Safonov 					     keyid);
37849427c6aaSDmitry Safonov 			return NULL;
37859427c6aaSDmitry Safonov 		}
37869427c6aaSDmitry Safonov 		key.ao_key = ao_key;
37879427c6aaSDmitry Safonov 		key.type = TCP_KEY_AO;
37889427c6aaSDmitry Safonov #endif
37899427c6aaSDmitry Safonov 	} else {
37909427c6aaSDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
37919427c6aaSDmitry Safonov 		key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
37929427c6aaSDmitry Safonov 					req_to_sk(req));
37939427c6aaSDmitry Safonov 		if (key.md5_key)
37941e03d32bSDmitry Safonov 			key.type = TCP_KEY_MD5;
379580f03e27SEric Dumazet #endif
37969427c6aaSDmitry Safonov 	}
37975e526552SEric Dumazet 	skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
3798331fca43SMartin KaFai Lau 	/* bpf program will be interested in the tcp_flags */
3799331fca43SMartin KaFai Lau 	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
38009427c6aaSDmitry Safonov 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts,
38019427c6aaSDmitry Safonov 					     &key, foc, synack_type, syn_skb)
38029427c6aaSDmitry Safonov 					+ sizeof(*th);
380333ad798cSAdam Langley 
3804aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3805aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
38061da177e4SLinus Torvalds 
3807ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
38081da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
38091da177e4SLinus Torvalds 	th->syn = 1;
38101da177e4SLinus Torvalds 	th->ack = 1;
38116ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3812b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3813634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3814e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
38153b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
38163b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
38178336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
38188336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
38191da177e4SLinus Torvalds 
38201da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3821ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
38229427c6aaSDmitry Safonov 	tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key);
38231da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
3824bced3f7dSBreno Leitao 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3825cfb6eeb4SYOSHIFUJI Hideaki 
3826cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
38279427c6aaSDmitry Safonov 	if (tcp_key_is_md5(&key)) {
38289427c6aaSDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
3829bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
38309427c6aaSDmitry Safonov 					key.md5_key, req_to_sk(req), skb);
38319427c6aaSDmitry Safonov #endif
38329427c6aaSDmitry Safonov 	} else if (tcp_key_is_ao(&key)) {
38339427c6aaSDmitry Safonov #ifdef CONFIG_TCP_AO
38349427c6aaSDmitry Safonov 		tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location,
38359427c6aaSDmitry Safonov 					key.ao_key, req, skb,
38369427c6aaSDmitry Safonov 					opts.hash_location - (u8 *)th, 0);
38379427c6aaSDmitry Safonov #endif
38389427c6aaSDmitry Safonov 	}
38399427c6aaSDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
384080f03e27SEric Dumazet 	rcu_read_unlock();
3841cfb6eeb4SYOSHIFUJI Hideaki #endif
3842cfb6eeb4SYOSHIFUJI Hideaki 
3843331fca43SMartin KaFai Lau 	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3844331fca43SMartin KaFai Lau 				synack_type, &opts);
3845331fca43SMartin KaFai Lau 
3846a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(skb, now, true);
3847a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3848a842fe14SEric Dumazet 
38491da177e4SLinus Torvalds 	return skb;
38501da177e4SLinus Torvalds }
38514bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
38521da177e4SLinus Torvalds 
385381164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
385481164413SDaniel Borkmann {
385581164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
385681164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
385781164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
385881164413SDaniel Borkmann 
385981164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
386081164413SDaniel Borkmann 		return;
386181164413SDaniel Borkmann 
386281164413SDaniel Borkmann 	rcu_read_lock();
386381164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
38640baf26b0SMartin KaFai Lau 	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
38650baf26b0SMartin KaFai Lau 		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
386681164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
386781164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
386881164413SDaniel Borkmann 	}
386981164413SDaniel Borkmann 	rcu_read_unlock();
387081164413SDaniel Borkmann }
387181164413SDaniel Borkmann 
387267edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3873f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
38741da177e4SLinus Torvalds {
3875cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
38761da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
38771da177e4SLinus Torvalds 	__u8 rcv_wscale;
387813d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
38791da177e4SLinus Torvalds 
38801da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
38811da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
38821da177e4SLinus Torvalds 	 */
38835d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
38843666f666SKuniyuki Iwashima 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
38855d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
38861da177e4SLinus Torvalds 
38877c2ffaf2SDmitry Safonov 	tcp_ao_connect_init(sk);
38887c2ffaf2SDmitry Safonov 
38891da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
38901da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
38911da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
38921da177e4SLinus Torvalds 	tp->max_window = 0;
38935d424d5aSJohn Heffner 	tcp_mtup_init(sk);
38941da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
38951da177e4SLinus Torvalds 
389681164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
389781164413SDaniel Borkmann 
38981da177e4SLinus Torvalds 	if (!tp->window_clamp)
3899f410cbeaSEric Dumazet 		WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
39003541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3901f5fff5dcSTom Quetchenbach 
39021da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
39031da177e4SLinus Torvalds 
3904e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3905e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3906e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3907f410cbeaSEric Dumazet 		WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
3908e88c64f0SHagen Paul Pfeifer 
390913d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
391013d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
391113d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
391213d3b1ebSLawrence Brakmo 
3913ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
39141da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
39151da177e4SLinus Torvalds 				  &tp->rcv_wnd,
39161da177e4SLinus Torvalds 				  &tp->window_clamp,
39173666f666SKuniyuki Iwashima 				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
391831d12926Slaurent chavey 				  &rcv_wscale,
391913d3b1ebSLawrence Brakmo 				  rcv_wnd);
39201da177e4SLinus Torvalds 
39211da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
39221da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
39231da177e4SLinus Torvalds 
3924e13ec3daSEric Dumazet 	WRITE_ONCE(sk->sk_err, 0);
39251da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
39261da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3927ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
39287f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
39291da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
39301da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
393133f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3932e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3933ee995283SPavel Emelyanov 
3934ee995283SPavel Emelyanov 	if (likely(!tp->repair))
39351da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3936c7781a6eSAndrew Vagin 	else
393770eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3938ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
39397db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
39401da177e4SLinus Torvalds 
39418550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3942463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
39431da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
39441da177e4SLinus Torvalds }
39451da177e4SLinus Torvalds 
3946783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3947783237e8SYuchung Cheng {
3948783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3949783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3950783237e8SYuchung Cheng 
3951783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3952f4a775d1SEric Dumazet 	__skb_header_release(skb);
3953ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
3954783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
39550f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3956783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3957783237e8SYuchung Cheng }
3958783237e8SYuchung Cheng 
3959783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3960783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3961783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3962783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3963783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3964783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3965783237e8SYuchung Cheng  */
3966783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3967783237e8SYuchung Cheng {
3968ed0c99dcSJakub Kicinski 	struct inet_connection_sock *icsk = inet_csk(sk);
3969783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3970783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3971fbf93406SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
3972355a901eSEric Dumazet 	struct sk_buff *syn_data;
3973fbf93406SEric Dumazet 	int space, err = 0;
3974783237e8SYuchung Cheng 
397567da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3976065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3977783237e8SYuchung Cheng 		goto fallback;
3978783237e8SYuchung Cheng 
3979783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3980783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3981783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3982783237e8SYuchung Cheng 	 */
39833541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3984ed0c99dcSJakub Kicinski 	/* Sync mss_cache after updating the mss_clamp */
3985ed0c99dcSJakub Kicinski 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
39863541f9e8SEric Dumazet 
3987ed0c99dcSJakub Kicinski 	space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
3988783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3989783237e8SYuchung Cheng 
3990f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3991f5ddcbbbSEric Dumazet 
3992fbf93406SEric Dumazet 	if (space &&
3993fbf93406SEric Dumazet 	    !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
3994fbf93406SEric Dumazet 				  pfrag, sk->sk_allocation))
3995fbf93406SEric Dumazet 		goto fallback;
39965882efffSEric Dumazet 	syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false);
3997355a901eSEric Dumazet 	if (!syn_data)
3998783237e8SYuchung Cheng 		goto fallback;
3999355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
400007e100f9SEric Dumazet 	if (space) {
4001fbf93406SEric Dumazet 		space = min_t(size_t, space, pfrag->size - pfrag->offset);
4002fbf93406SEric Dumazet 		space = tcp_wmem_schedule(sk, space);
4003fbf93406SEric Dumazet 	}
4004fbf93406SEric Dumazet 	if (space) {
4005fbf93406SEric Dumazet 		space = copy_page_from_iter(pfrag->page, pfrag->offset,
4006fbf93406SEric Dumazet 					    space, &fo->data->msg_iter);
4007fbf93406SEric Dumazet 		if (unlikely(!space)) {
4008ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
4009355a901eSEric Dumazet 			kfree_skb(syn_data);
4010783237e8SYuchung Cheng 			goto fallback;
4011783237e8SYuchung Cheng 		}
4012fbf93406SEric Dumazet 		skb_fill_page_desc(syn_data, 0, pfrag->page,
4013fbf93406SEric Dumazet 				   pfrag->offset, space);
4014fbf93406SEric Dumazet 		page_ref_inc(pfrag->page);
4015fbf93406SEric Dumazet 		pfrag->offset += space;
4016fbf93406SEric Dumazet 		skb_len_add(syn_data, space);
4017f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
401807e100f9SEric Dumazet 	}
4019355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
4020355a901eSEric Dumazet 	if (space == fo->size)
4021355a901eSEric Dumazet 		fo->data = NULL;
4022355a901eSEric Dumazet 	fo->copied = space;
4023783237e8SYuchung Cheng 
4024355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
40250f87230dSFrancis Yan 	if (syn_data->len)
40260f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
4027355a901eSEric Dumazet 
4028355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
4029355a901eSEric Dumazet 
4030a1ac9c8aSMartin KaFai Lau 	skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
4031355a901eSEric Dumazet 
4032355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
4033355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
4034355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
4035355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
4036431a9124SEric Dumazet 	 */
4037355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
4038355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
4039355a901eSEric Dumazet 	if (!err) {
404067da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
404175c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
4042f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
4043783237e8SYuchung Cheng 		goto done;
4044783237e8SYuchung Cheng 	}
4045783237e8SYuchung Cheng 
404675c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
404775c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
4048b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
4049b5b7db8dSEric Dumazet 
4050783237e8SYuchung Cheng fallback:
4051783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
4052783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
4053783237e8SYuchung Cheng 		fo->cookie.len = 0;
4054783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
4055783237e8SYuchung Cheng 	if (err)
4056783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
4057783237e8SYuchung Cheng done:
4058783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
4059783237e8SYuchung Cheng 	return err;
4060783237e8SYuchung Cheng }
4061783237e8SYuchung Cheng 
406267edfef7SAndi Kleen /* Build a SYN and send it off. */
40631da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
40641da177e4SLinus Torvalds {
40651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
40661da177e4SLinus Torvalds 	struct sk_buff *buff;
4067ee586811SEric Paris 	int err;
40681da177e4SLinus Torvalds 
4069de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
40708ba60924SEric Dumazet 
40710aadc739SDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
40720aadc739SDmitry Safonov 	/* Has to be checked late, after setting daddr/saddr/ops.
40730aadc739SDmitry Safonov 	 * Return error if the peer has both a md5 and a tcp-ao key
40740aadc739SDmitry Safonov 	 * configured as this is ambiguous.
40750aadc739SDmitry Safonov 	 */
40760aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->md5sig_info,
40770aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
40780aadc739SDmitry Safonov 		bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1);
40790aadc739SDmitry Safonov 		bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk);
40800aadc739SDmitry Safonov 		struct tcp_ao_info *ao_info;
40810aadc739SDmitry Safonov 
40820aadc739SDmitry Safonov 		ao_info = rcu_dereference_check(tp->ao_info,
40830aadc739SDmitry Safonov 						lockdep_sock_is_held(sk));
40840aadc739SDmitry Safonov 		if (ao_info) {
40850aadc739SDmitry Safonov 			/* This is an extra check: tcp_ao_required() in
40860aadc739SDmitry Safonov 			 * tcp_v{4,6}_parse_md5_keys() should prevent adding
40870aadc739SDmitry Safonov 			 * md5 keys on ao_required socket.
40880aadc739SDmitry Safonov 			 */
40890aadc739SDmitry Safonov 			needs_ao |= ao_info->ao_required;
40900aadc739SDmitry Safonov 			WARN_ON_ONCE(ao_info->ao_required && needs_md5);
40910aadc739SDmitry Safonov 		}
40920aadc739SDmitry Safonov 		if (needs_md5 && needs_ao)
40930aadc739SDmitry Safonov 			return -EKEYREJECTED;
40940aadc739SDmitry Safonov 
40950aadc739SDmitry Safonov 		/* If we have a matching md5 key and no matching tcp-ao key
40960aadc739SDmitry Safonov 		 * then free up ao_info if allocated.
40970aadc739SDmitry Safonov 		 */
40980aadc739SDmitry Safonov 		if (needs_md5) {
4099decde258SDmitry Safonov 			tcp_ao_destroy_sock(sk, false);
41000aadc739SDmitry Safonov 		} else if (needs_ao) {
41010aadc739SDmitry Safonov 			tcp_clear_md5_list(sk);
41020aadc739SDmitry Safonov 			kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
41030aadc739SDmitry Safonov 						  lockdep_sock_is_held(sk)));
41040aadc739SDmitry Safonov 		}
41050aadc739SDmitry Safonov 	}
41060aadc739SDmitry Safonov #endif
41070aadc739SDmitry Safonov #ifdef CONFIG_TCP_AO
41080aadc739SDmitry Safonov 	if (unlikely(rcu_dereference_protected(tp->ao_info,
41090aadc739SDmitry Safonov 					       lockdep_sock_is_held(sk)))) {
41100aadc739SDmitry Safonov 		/* Don't allow connecting if ao is configured but no
41110aadc739SDmitry Safonov 		 * matching key is found.
41120aadc739SDmitry Safonov 		 */
41130aadc739SDmitry Safonov 		if (!tp->af_specific->ao_lookup(sk, sk, -1, -1))
41140aadc739SDmitry Safonov 			return -EKEYREJECTED;
41150aadc739SDmitry Safonov 	}
41160aadc739SDmitry Safonov #endif
41170aadc739SDmitry Safonov 
41188ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
41198ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
41208ba60924SEric Dumazet 
41211da177e4SLinus Torvalds 	tcp_connect_init(sk);
41221da177e4SLinus Torvalds 
41232b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
41242b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
41252b916477SAndrey Vagin 		return 0;
41262b916477SAndrey Vagin 	}
41272b916477SAndrey Vagin 
41285882efffSEric Dumazet 	buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
4129355a901eSEric Dumazet 	if (unlikely(!buff))
41301da177e4SLinus Torvalds 		return -ENOBUFS;
41311da177e4SLinus Torvalds 
4132a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
41339a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
41349d0c00f5SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp_ts(tp);
4135783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
4136735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
413775c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
41381da177e4SLinus Torvalds 
4139783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
4140783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
4141783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
4142ee586811SEric Paris 	if (err == -ECONNREFUSED)
4143ee586811SEric Paris 		return err;
4144bd37a088SWei Yongjun 
4145bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
4146bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
4147bd37a088SWei Yongjun 	 */
4148e0d694d6SEric Dumazet 	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4149bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
4150b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
4151b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
4152e0d694d6SEric Dumazet 		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
4153b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
4154b5b7db8dSEric Dumazet 	}
415581cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
41561da177e4SLinus Torvalds 
41571da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
41583f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
41593f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
41601da177e4SLinus Torvalds 	return 0;
41611da177e4SLinus Torvalds }
41624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
41631da177e4SLinus Torvalds 
4164bbf80d71SEric Dumazet u32 tcp_delack_max(const struct sock *sk)
4165bbf80d71SEric Dumazet {
4166bbf80d71SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
4167bbf80d71SEric Dumazet 	u32 delack_max = inet_csk(sk)->icsk_delack_max;
4168bbf80d71SEric Dumazet 
4169bbf80d71SEric Dumazet 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
4170bbf80d71SEric Dumazet 		u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
4171bbf80d71SEric Dumazet 		u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
4172bbf80d71SEric Dumazet 
4173bbf80d71SEric Dumazet 		delack_max = min_t(u32, delack_max, delack_from_rto_min);
4174bbf80d71SEric Dumazet 	}
4175bbf80d71SEric Dumazet 	return delack_max;
4176bbf80d71SEric Dumazet }
4177bbf80d71SEric Dumazet 
41781da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
41791da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
41801da177e4SLinus Torvalds  * for details.
41811da177e4SLinus Torvalds  */
41821da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
41831da177e4SLinus Torvalds {
4184463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
4185463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
41861da177e4SLinus Torvalds 	unsigned long timeout;
41871da177e4SLinus Torvalds 
41881da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
4189463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
41901da177e4SLinus Torvalds 		int max_ato = HZ / 2;
41911da177e4SLinus Torvalds 
419231954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
4193056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
41941da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
41951da177e4SLinus Torvalds 
41961da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
41971da177e4SLinus Torvalds 
41981da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
4199463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
42001da177e4SLinus Torvalds 		 * directly.
42011da177e4SLinus Torvalds 		 */
4202740b0f18SEric Dumazet 		if (tp->srtt_us) {
4203740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
4204740b0f18SEric Dumazet 					TCP_DELACK_MIN);
42051da177e4SLinus Torvalds 
42061da177e4SLinus Torvalds 			if (rtt < max_ato)
42071da177e4SLinus Torvalds 				max_ato = rtt;
42081da177e4SLinus Torvalds 		}
42091da177e4SLinus Torvalds 
42101da177e4SLinus Torvalds 		ato = min(ato, max_ato);
42111da177e4SLinus Torvalds 	}
42121da177e4SLinus Torvalds 
4213bbf80d71SEric Dumazet 	ato = min_t(u32, ato, tcp_delack_max(sk));
42142b8ee4f0SMartin KaFai Lau 
42151da177e4SLinus Torvalds 	/* Stay within the limit we were given */
42161da177e4SLinus Torvalds 	timeout = jiffies + ato;
42171da177e4SLinus Torvalds 
42181da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
4219463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
4220b6b6d653SEric Dumazet 		/* If delack timer is about to expire, send ACK now. */
4221b6b6d653SEric Dumazet 		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
42221da177e4SLinus Torvalds 			tcp_send_ack(sk);
42231da177e4SLinus Torvalds 			return;
42241da177e4SLinus Torvalds 		}
42251da177e4SLinus Torvalds 
4226463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
4227463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
42281da177e4SLinus Torvalds 	}
4229463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
4230463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
4231463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
42321da177e4SLinus Torvalds }
42331da177e4SLinus Torvalds 
42341da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
42352987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
42361da177e4SLinus Torvalds {
42371da177e4SLinus Torvalds 	struct sk_buff *buff;
42381da177e4SLinus Torvalds 
4239058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
4240058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4241058dc334SIlpo Järvinen 		return;
4242058dc334SIlpo Järvinen 
42431da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
42441da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
42451da177e4SLinus Torvalds 	 * sock.
42461da177e4SLinus Torvalds 	 */
42477450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
42487450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
42497450aaf6SEric Dumazet 	if (unlikely(!buff)) {
4250a37c2134SEric Dumazet 		struct inet_connection_sock *icsk = inet_csk(sk);
4251a37c2134SEric Dumazet 		unsigned long delay;
4252a37c2134SEric Dumazet 
4253a37c2134SEric Dumazet 		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4254a37c2134SEric Dumazet 		if (delay < TCP_RTO_MAX)
4255a37c2134SEric Dumazet 			icsk->icsk_ack.retry++;
4256463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
4257a37c2134SEric Dumazet 		icsk->icsk_ack.ato = TCP_ATO_MIN;
4258a37c2134SEric Dumazet 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
42591da177e4SLinus Torvalds 		return;
42601da177e4SLinus Torvalds 	}
42611da177e4SLinus Torvalds 
42621da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
42631da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
4264a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
42651da177e4SLinus Torvalds 
426698781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
426798781965SEric Dumazet 	 * too much.
426898781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
426998781965SEric Dumazet 	 */
427098781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
427198781965SEric Dumazet 
42721da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
42732987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
42741da177e4SLinus Torvalds }
427527cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
42762987babbSYuchung Cheng 
42772987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
42782987babbSYuchung Cheng {
42792987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
42801da177e4SLinus Torvalds }
42811da177e4SLinus Torvalds 
42821da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
42831da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
42841da177e4SLinus Torvalds  *
42851da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
42861da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
42871da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
42881da177e4SLinus Torvalds  *
42891da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
42901da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
42911da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
42921da177e4SLinus Torvalds  */
4293e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
42941da177e4SLinus Torvalds {
42951da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
42961da177e4SLinus Torvalds 	struct sk_buff *skb;
42971da177e4SLinus Torvalds 
42981da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
42997450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
43007450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
430151456b29SIan Morris 	if (!skb)
43021da177e4SLinus Torvalds 		return -1;
43031da177e4SLinus Torvalds 
43041da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
43051da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
43061da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
43071da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
43081da177e4SLinus Torvalds 	 * send it.
43091da177e4SLinus Torvalds 	 */
4310a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4311e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
43127450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
43131da177e4SLinus Torvalds }
43141da177e4SLinus Torvalds 
4315385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
4316ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
4317ee995283SPavel Emelyanov {
4318ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
4319ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
43209a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
4321e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4322ee995283SPavel Emelyanov 	}
4323ee995283SPavel Emelyanov }
4324ee995283SPavel Emelyanov 
432567edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
4326e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
43271da177e4SLinus Torvalds {
43281da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
43291da177e4SLinus Torvalds 	struct sk_buff *skb;
43301da177e4SLinus Torvalds 
4331058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
4332058dc334SIlpo Järvinen 		return -1;
4333058dc334SIlpo Järvinen 
433400db4124SIan Morris 	skb = tcp_send_head(sk);
433500db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
43361da177e4SLinus Torvalds 		int err;
43370c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
433890840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
43391da177e4SLinus Torvalds 
43401da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
43411da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
43421da177e4SLinus Torvalds 
43431da177e4SLinus Torvalds 		/* We are probing the opening of a window
43441da177e4SLinus Torvalds 		 * but the window size is != 0
43451da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
43461da177e4SLinus Torvalds 		 */
43471da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
43481da177e4SLinus Torvalds 		    skb->len > mss) {
43491da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
43504de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
435175c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
435275c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
43531da177e4SLinus Torvalds 				return -1;
43541da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
43555bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
43561da177e4SLinus Torvalds 
43574de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4358dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
435966f5fe62SIlpo Järvinen 		if (!err)
436066f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
43611da177e4SLinus Torvalds 		return err;
43621da177e4SLinus Torvalds 	} else {
436333f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4364e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
4365e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
43661da177e4SLinus Torvalds 	}
43671da177e4SLinus Torvalds }
43681da177e4SLinus Torvalds 
43691da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
43701da177e4SLinus Torvalds  * a partial packet else a zero probe.
43711da177e4SLinus Torvalds  */
43721da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
43731da177e4SLinus Torvalds {
4374463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
43751da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4376c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
4377c1d5674fSYuchung Cheng 	unsigned long timeout;
43781da177e4SLinus Torvalds 	int err;
43791da177e4SLinus Torvalds 
4380e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
43811da177e4SLinus Torvalds 
438275c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
43831da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
43846687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
4385463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
43869d9b1ee0SEnke Chen 		icsk->icsk_probes_tstamp = 0;
43871da177e4SLinus Torvalds 		return;
43881da177e4SLinus Torvalds 	}
43891da177e4SLinus Torvalds 
4390c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
43911da177e4SLinus Torvalds 	if (err <= 0) {
439239e24435SKuniyuki Iwashima 		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4393463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
4394c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
43951da177e4SLinus Torvalds 	} else {
43961da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
4397c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
43981da177e4SLinus Torvalds 		 */
4399c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
44001da177e4SLinus Torvalds 	}
4401344db93aSEnke Chen 
4402344db93aSEnke Chen 	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
44038dc242adSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
44041da177e4SLinus Torvalds }
44055db92c99SOctavian Purdila 
4406ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
44075db92c99SOctavian Purdila {
44085db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
44095db92c99SOctavian Purdila 	struct flowi fl;
44105db92c99SOctavian Purdila 	int res;
44115db92c99SOctavian Purdila 
4412cb6cd2ceSAkhmat Karakotov 	/* Paired with WRITE_ONCE() in sock_setsockopt() */
4413cb6cd2ceSAkhmat Karakotov 	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
44145e526552SEric Dumazet 		WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
4415331fca43SMartin KaFai Lau 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4416331fca43SMartin KaFai Lau 				  NULL);
44175db92c99SOctavian Purdila 	if (!res) {
44180a375c82SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
44190a375c82SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4420e9d9da91SEric Dumazet 		if (unlikely(tcp_passive_fastopen(sk))) {
4421e9d9da91SEric Dumazet 			/* sk has const attribute because listeners are lockless.
4422e9d9da91SEric Dumazet 			 * However in this case, we are dealing with a passive fastopen
4423e9d9da91SEric Dumazet 			 * socket thus we can change total_retrans value.
4424e9d9da91SEric Dumazet 			 */
4425e9d9da91SEric Dumazet 			tcp_sk_rw(sk)->total_retrans++;
4426e9d9da91SEric Dumazet 		}
4427cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
44285db92c99SOctavian Purdila 	}
44295db92c99SOctavian Purdila 	return res;
44305db92c99SOctavian Purdila }
44315db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
4432