xref: /linux/net/ipv4/tcp_output.c (revision b617158dc096709d8600c53b6052144d12b89fab)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds /*
231da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
241da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
251da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
261da177e4SLinus Torvalds  *				:	AF independence
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
291da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
301da177e4SLinus Torvalds  *					during syn/ack processing.
311da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
321da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
331da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
341da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
351da177e4SLinus Torvalds  *
361da177e4SLinus Torvalds  */
371da177e4SLinus Torvalds 
3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3991df42beSJoe Perches 
401da177e4SLinus Torvalds #include <net/tcp.h>
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds #include <linux/compiler.h>
435a0e3ad6STejun Heo #include <linux/gfp.h>
441da177e4SLinus Torvalds #include <linux/module.h>
4560e2a778SUrsula Braun #include <linux/static_key.h>
461da177e4SLinus Torvalds 
47e086101bSCong Wang #include <trace/events/tcp.h>
4835089bb2SDavid S. Miller 
499799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
509799ccb0SEric Dumazet  * ensuring monotically increasing values.
519799ccb0SEric Dumazet  */
529799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
539799ccb0SEric Dumazet {
549799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
559799ccb0SEric Dumazet 
565f6188a8SEric Dumazet 	tp->tcp_clock_cache = val;
57e6d14070SEric Dumazet 	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
589799ccb0SEric Dumazet }
599799ccb0SEric Dumazet 
6046d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6146d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
62519855c5SWilliam Allen Simpson 
6367edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6475c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
656ff03ac3SIlpo Järvinen {
666ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
676ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6866f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
699e412ba7SIlpo Järvinen 
701da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
718512430eSIlpo Järvinen 
7275c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7375c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7475c119afSEric Dumazet 
7566f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
76bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
77750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
78f19c29e3SYuchung Cheng 
79f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
80f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
816a5dc9e5SEric Dumazet }
821da177e4SLinus Torvalds 
83a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
84a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
851da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
861da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
871da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
881da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
891da177e4SLinus Torvalds  */
90cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
911da177e4SLinus Torvalds {
92cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
939e412ba7SIlpo Järvinen 
94a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
95a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
96a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
971da177e4SLinus Torvalds 		return tp->snd_nxt;
981da177e4SLinus Torvalds 	else
9990840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1031da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1041da177e4SLinus Torvalds  *
1051da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1061da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1071da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1081da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1091da177e4SLinus Torvalds  *    large MSS.
1101da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1111da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1121da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1131da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1141da177e4SLinus Torvalds  *    probably even Jumbo".
1151da177e4SLinus Torvalds  */
1161da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1171da177e4SLinus Torvalds {
1181da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
119cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1201da177e4SLinus Torvalds 	int mss = tp->advmss;
1211da177e4SLinus Torvalds 
1220dbaee3bSDavid S. Miller 	if (dst) {
1230dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1240dbaee3bSDavid S. Miller 
1250dbaee3bSDavid S. Miller 		if (metric < mss) {
1260dbaee3bSDavid S. Miller 			mss = metric;
1271da177e4SLinus Torvalds 			tp->advmss = mss;
1281da177e4SLinus Torvalds 		}
1290dbaee3bSDavid S. Miller 	}
1301da177e4SLinus Torvalds 
1311da177e4SLinus Torvalds 	return (__u16)mss;
1321da177e4SLinus Torvalds }
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1356f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1366f021c62SEric Dumazet  */
1376f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1381da177e4SLinus Torvalds {
139463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1406f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1411da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1421da177e4SLinus Torvalds 
1436687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1441da177e4SLinus Torvalds 
1456687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1461da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1471da177e4SLinus Torvalds 
148463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1491da177e4SLinus Torvalds 		cwnd >>= 1;
1501da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
151c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1521da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1531da177e4SLinus Torvalds }
1541da177e4SLinus Torvalds 
15567edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15640efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
157cf533ea5SEric Dumazet 				struct sock *sk)
1581da177e4SLinus Torvalds {
159463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
160d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1611da177e4SLinus Torvalds 
16205c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16305c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16405c5a46dSNeal Cardwell 
1654a41f453SWei Wang 	/* If this is the first data packet sent in response to the
1664a41f453SWei Wang 	 * previous received data,
1674a41f453SWei Wang 	 * and it is a reply for ato after last received packet,
1684a41f453SWei Wang 	 * increase pingpong count.
1691da177e4SLinus Torvalds 	 */
1704a41f453SWei Wang 	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
1714a41f453SWei Wang 	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
1724a41f453SWei Wang 		inet_csk_inc_pingpong_cnt(sk);
1734a41f453SWei Wang 
1744a41f453SWei Wang 	tp->lsndtime = now;
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
17767edfef7SAndi Kleen /* Account for an ACK we sent. */
17827cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
17927cde44aSYuchung Cheng 				      u32 rcv_nxt)
1801da177e4SLinus Torvalds {
1815d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1825d9f4262SEric Dumazet 
18386de5921SEric Dumazet 	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
18586de5921SEric Dumazet 			      tp->compressed_ack - TCP_FASTRETRANS_THRESH);
18686de5921SEric Dumazet 		tp->compressed_ack = TCP_FASTRETRANS_THRESH;
1875d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1885d9f4262SEric Dumazet 			__sock_put(sk);
1895d9f4262SEric Dumazet 	}
19027cde44aSYuchung Cheng 
19127cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19227cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
193463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
194463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1981da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1991da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2001da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2011da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2021da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2031da177e4SLinus Torvalds  */
204ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2051da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2121da177e4SLinus Torvalds 	if (*window_clamp == 0)
213589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2141da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2171da177e4SLinus Torvalds 	if (space > mss)
218589c49cbSGao Feng 		space = rounddown(space, mss);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2271da177e4SLinus Torvalds 	 */
228ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2291da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23015d99e02SRick Jones 	else
231a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
232a337531bSYuchung Cheng 
233a337531bSYuchung Cheng 	if (init_rcv_wnd)
234a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23515d99e02SRick Jones 
23619bf6261SEric Dumazet 	*rcv_wscale = 0;
2371da177e4SLinus Torvalds 	if (wscale_ok) {
238589c49cbSGao Feng 		/* Set window scaling on max possible window */
239356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
240f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
241316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24219bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24319bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
246589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2471da177e4SLinus Torvalds }
2484bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2511da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2521da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2531da177e4SLinus Torvalds  * frame.
2541da177e4SLinus Torvalds  */
25540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2588e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2591da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2601da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* Never shrink the offered window */
2631da177e4SLinus Torvalds 	if (new_win < cur_win) {
2641da177e4SLinus Torvalds 		/* Danger Will Robinson!
2651da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2661da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2671da177e4SLinus Torvalds 		 * window in time.  --DaveM
2681da177e4SLinus Torvalds 		 *
2691da177e4SLinus Torvalds 		 * Relax Will Robinson.
2701da177e4SLinus Torvalds 		 */
2718e165e20SFlorian Westphal 		if (new_win == 0)
2728e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2738e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
274607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2751da177e4SLinus Torvalds 	}
2761da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2771da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2801da177e4SLinus Torvalds 	 * scaled window.
2811da177e4SLinus Torvalds 	 */
282ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
283ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2841da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2851da177e4SLinus Torvalds 	else
2861da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2891da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2901da177e4SLinus Torvalds 
29131770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
2928e165e20SFlorian Westphal 	if (new_win == 0) {
29331770e34SFlorian Westphal 		tp->pred_flags = 0;
2948e165e20SFlorian Westphal 		if (old_win)
2958e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2968e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
2978e165e20SFlorian Westphal 	} else if (old_win == 0) {
2988e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
2998e165e20SFlorian Westphal 	}
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds 	return new_win;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
30467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
305735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
306bdf1ee5dSIlpo Järvinen {
30730e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
30830e502a3SDaniel Borkmann 
3094de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
310bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3114de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31291b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
31391b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
31430e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
315bdf1ee5dSIlpo Järvinen }
316bdf1ee5dSIlpo Järvinen 
31767edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
318735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
319bdf1ee5dSIlpo Järvinen {
320bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32191b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
322f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
32391b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
324f7b3bec6SFlorian Westphal 
325f7b3bec6SFlorian Westphal 	if (!use_ecn) {
326f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
327f7b3bec6SFlorian Westphal 
328f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
329f7b3bec6SFlorian Westphal 			use_ecn = true;
330f7b3bec6SFlorian Westphal 	}
331bdf1ee5dSIlpo Järvinen 
332bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
333f7b3bec6SFlorian Westphal 
334f7b3bec6SFlorian Westphal 	if (use_ecn) {
3354de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
336bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
33791b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
33830e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
339bdf1ee5dSIlpo Järvinen 	}
340bdf1ee5dSIlpo Järvinen }
341bdf1ee5dSIlpo Järvinen 
34249213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34349213555SDaniel Borkmann {
34449213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
34549213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
34649213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
34749213555SDaniel Borkmann 		 */
34849213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
34949213555SDaniel Borkmann }
35049213555SDaniel Borkmann 
351735d3831SFlorian Westphal static void
3526ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
353bdf1ee5dSIlpo Järvinen {
3546ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
355bdf1ee5dSIlpo Järvinen 		th->ece = 1;
356bdf1ee5dSIlpo Järvinen }
357bdf1ee5dSIlpo Järvinen 
35867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
35967edfef7SAndi Kleen  * be sent.
36067edfef7SAndi Kleen  */
361735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
362ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
363bdf1ee5dSIlpo Järvinen {
364bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
365bdf1ee5dSIlpo Järvinen 
366bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
367bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
368bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
369bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
370bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
371bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
372bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
373ea1627c2SEric Dumazet 				th->cwr = 1;
374bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
375bdf1ee5dSIlpo Järvinen 			}
37630e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
377bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
378bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
379bdf1ee5dSIlpo Järvinen 		}
380bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
381ea1627c2SEric Dumazet 			th->ece = 1;
382bdf1ee5dSIlpo Järvinen 	}
383bdf1ee5dSIlpo Järvinen }
384bdf1ee5dSIlpo Järvinen 
385e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
386e870a8efSIlpo Järvinen  * auto increment end seqno.
387e870a8efSIlpo Järvinen  */
388e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
389e870a8efSIlpo Järvinen {
3902e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
391e870a8efSIlpo Järvinen 
3924de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
393e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
394e870a8efSIlpo Järvinen 
395cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
396e870a8efSIlpo Järvinen 
397e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
398a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
399e870a8efSIlpo Järvinen 		seq++;
400e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
401e870a8efSIlpo Järvinen }
402e870a8efSIlpo Järvinen 
403a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40433f5f57eSIlpo Järvinen {
40533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40633f5f57eSIlpo Järvinen }
40733f5f57eSIlpo Järvinen 
40833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
40933ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41033ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41189e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4122100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41360e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
41460e2a778SUrsula Braun 
41560e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
41660e2a778SUrsula Braun {
41760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
41860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
41960e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
42060e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
42160e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
42260e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
42360e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
42460e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
42560e2a778SUrsula Braun 		}
42660e2a778SUrsula Braun 	}
42760e2a778SUrsula Braun #endif
42860e2a778SUrsula Braun }
42933ad798cSAdam Langley 
43033ad798cSAdam Langley struct tcp_out_options {
4312100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4322100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
43333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
435bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
436bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4372100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4382100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
43933ad798cSAdam Langley };
44033ad798cSAdam Langley 
44167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
44267edfef7SAndi Kleen  *
44367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
444fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
445fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4468e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
447fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
448fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
449fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
450fd6149d3SIlpo Järvinen  *
451fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
452fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
453fd6149d3SIlpo Järvinen  */
45433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
455bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
456bd0388aeSWilliam Allen Simpson {
4572100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
458bd0388aeSWilliam Allen Simpson 
459bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4601a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4611a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
462bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
463bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46433ad798cSAdam Langley 		ptr += 4;
46533ad798cSAdam Langley 	}
46633ad798cSAdam Langley 
467fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
468fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
469fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
470fd6149d3SIlpo Järvinen 			       opts->mss);
471fd6149d3SIlpo Järvinen 	}
472fd6149d3SIlpo Järvinen 
473bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
474bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
479bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
48033ad798cSAdam Langley 		} else {
481496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
48240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
48340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48540efc6faSStephen Hemminger 		}
48633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48833ad798cSAdam Langley 	}
48933ad798cSAdam Langley 
490bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
49333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49533ad798cSAdam Langley 	}
49633ad798cSAdam Langley 
497bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
49833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
50033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
50133ad798cSAdam Langley 			       opts->ws);
50233ad798cSAdam Langley 	}
50333ad798cSAdam Langley 
50433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50740efc6faSStephen Hemminger 		int this_sack;
50840efc6faSStephen Hemminger 
50940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
51040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
51140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
51233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
51340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5142de979bdSStephen Hemminger 
51533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51633ad798cSAdam Langley 		     ++this_sack) {
51740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
51840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
51940efc6faSStephen Hemminger 		}
5202de979bdSStephen Hemminger 
52140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
52240efc6faSStephen Hemminger 	}
5232100c8d2SYuchung Cheng 
5242100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5252100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5267f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5277f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5282100c8d2SYuchung Cheng 
5297f9b838bSDaniel Lee 		if (foc->exp) {
5307f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5317f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5322100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5337f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5347f9b838bSDaniel Lee 		} else {
5357f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5367f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5377f9b838bSDaniel Lee 			*p++ = len;
5382100c8d2SYuchung Cheng 		}
5397f9b838bSDaniel Lee 
5407f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5417f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5427f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5437f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5447f9b838bSDaniel Lee 		}
5457f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5462100c8d2SYuchung Cheng 	}
54760e2a778SUrsula Braun 
54860e2a778SUrsula Braun 	smc_options_write(ptr, &options);
54960e2a778SUrsula Braun }
55060e2a778SUrsula Braun 
55160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
55260e2a778SUrsula Braun 			   struct tcp_out_options *opts,
55360e2a778SUrsula Braun 			   unsigned int *remaining)
55460e2a778SUrsula Braun {
55560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
55660e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
55760e2a778SUrsula Braun 		if (tp->syn_smc) {
55860e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
55960e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
56060e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
56160e2a778SUrsula Braun 			}
56260e2a778SUrsula Braun 		}
56360e2a778SUrsula Braun 	}
56460e2a778SUrsula Braun #endif
56560e2a778SUrsula Braun }
56660e2a778SUrsula Braun 
56760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
56860e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
56960e2a778SUrsula Braun 				struct tcp_out_options *opts,
57060e2a778SUrsula Braun 				unsigned int *remaining)
57160e2a778SUrsula Braun {
57260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57460e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
57560e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57660e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
57760e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
57860e2a778SUrsula Braun 			}
57960e2a778SUrsula Braun 		}
58060e2a778SUrsula Braun 	}
58160e2a778SUrsula Braun #endif
58240efc6faSStephen Hemminger }
58340efc6faSStephen Hemminger 
58467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58567edfef7SAndi Kleen  * network wire format yet.
58667edfef7SAndi Kleen  */
58795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
58833ad798cSAdam Langley 				struct tcp_out_options *opts,
589cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
590cf533ea5SEric Dumazet {
59133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
59295c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
593783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
59433ad798cSAdam Langley 
5958c2320e8SEric Dumazet 	*md5 = NULL;
596cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
597921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
5986015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
59933ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
60033ad798cSAdam Langley 		if (*md5) {
60133ad798cSAdam Langley 			opts->options |= OPTION_MD5;
602bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
603cfb6eeb4SYOSHIFUJI Hideaki 		}
6048c2320e8SEric Dumazet 	}
605cfb6eeb4SYOSHIFUJI Hideaki #endif
60633ad798cSAdam Langley 
60733ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
60833ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
60933ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61033ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61133ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61233ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61333ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
61433ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
61533ad798cSAdam Langley 	 * going out.  */
61633ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
617bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
61833ad798cSAdam Langley 
6195d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
62033ad798cSAdam Langley 		opts->options |= OPTION_TS;
6217faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
62233ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
623bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62433ad798cSAdam Langley 	}
6259bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
62633ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
62789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
628bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
62933ad798cSAdam Langley 	}
630f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
63133ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
632b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
633bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
63433ad798cSAdam Langley 	}
63533ad798cSAdam Langley 
636783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6372646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6382646c831SDaniel Lee 
6392646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6402646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
641783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
642783237e8SYuchung Cheng 		if (remaining >= need) {
643783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
644783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
645783237e8SYuchung Cheng 			remaining -= need;
646783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6472646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
648783237e8SYuchung Cheng 		}
649783237e8SYuchung Cheng 	}
650bd0388aeSWilliam Allen Simpson 
65160e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
65260e2a778SUrsula Braun 
653bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65433ad798cSAdam Langley }
65533ad798cSAdam Langley 
65667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
65760e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
65860e2a778SUrsula Braun 				       struct request_sock *req,
65995c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
66033ad798cSAdam Langley 				       struct tcp_out_options *opts,
66180f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6628336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6634957faadSWilliam Allen Simpson {
66433ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
66595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
66633ad798cSAdam Langley 
66733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
66880f03e27SEric Dumazet 	if (md5) {
66933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6704957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6714957faadSWilliam Allen Simpson 
6724957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6734957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6744957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6754957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6764957faadSWilliam Allen Simpson 		 */
677de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
67833ad798cSAdam Langley 	}
67933ad798cSAdam Langley #endif
68033ad798cSAdam Langley 
6814957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68233ad798cSAdam Langley 	opts->mss = mss;
6834957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
68433ad798cSAdam Langley 
68533ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
68633ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
68789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6884957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
68933ad798cSAdam Langley 	}
690de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
69133ad798cSAdam Langley 		opts->options |= OPTION_TS;
69295a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
69333ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6944957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
69533ad798cSAdam Langley 	}
69633ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
69733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
698de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6994957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70033ad798cSAdam Langley 	}
7017f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7027f9b838bSDaniel Lee 		u32 need = foc->len;
7037f9b838bSDaniel Lee 
7047f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7057f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7068336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7078336886fSJerry Chu 		if (remaining >= need) {
7088336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7098336886fSJerry Chu 			opts->fastopen_cookie = foc;
7108336886fSJerry Chu 			remaining -= need;
7118336886fSJerry Chu 		}
7128336886fSJerry Chu 	}
7134957faadSWilliam Allen Simpson 
71460e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
71560e2a778SUrsula Braun 
7164957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
71733ad798cSAdam Langley }
71833ad798cSAdam Langley 
71967edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
72067edfef7SAndi Kleen  * final wire format yet.
72167edfef7SAndi Kleen  */
72295c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
72333ad798cSAdam Langley 					struct tcp_out_options *opts,
724cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
725cf533ea5SEric Dumazet {
72633ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
72795c96174SEric Dumazet 	unsigned int size = 0;
728cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
72933ad798cSAdam Langley 
7305843ef42SAndi Kleen 	opts->options = 0;
7315843ef42SAndi Kleen 
7328c2320e8SEric Dumazet 	*md5 = NULL;
73333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
734921f9a0fSEric Dumazet 	if (static_branch_unlikely(&tcp_md5_needed) &&
7356015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
73633ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7378c2320e8SEric Dumazet 		if (*md5) {
73833ad798cSAdam Langley 			opts->options |= OPTION_MD5;
73933ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
74033ad798cSAdam Langley 		}
7418c2320e8SEric Dumazet 	}
74233ad798cSAdam Langley #endif
74333ad798cSAdam Langley 
74433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
74533ad798cSAdam Langley 		opts->options |= OPTION_TS;
7467faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
74733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
74833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
74933ad798cSAdam Langley 	}
75033ad798cSAdam Langley 
751cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
752cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
75395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
75433ad798cSAdam Langley 		opts->num_sack_blocks =
75595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
75633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
75733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
75833ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
75933ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
76033ad798cSAdam Langley 	}
76133ad798cSAdam Langley 
76233ad798cSAdam Langley 	return size;
76340efc6faSStephen Hemminger }
7641da177e4SLinus Torvalds 
76546d3ceabSEric Dumazet 
76646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
76746d3ceabSEric Dumazet  *
76846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
76946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
77046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
77146d3ceabSEric Dumazet  *
77246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
77346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7748e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
77546d3ceabSEric Dumazet  *
77646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
77746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
77846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
77946d3ceabSEric Dumazet  */
78046d3ceabSEric Dumazet struct tsq_tasklet {
78146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
78246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
78346d3ceabSEric Dumazet };
78446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
78546d3ceabSEric Dumazet 
78673a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
7876f458dfbSEric Dumazet {
7886f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7896f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
790f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
791f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
792f9616c35SEric Dumazet 
793f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
7943a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
7953a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
796f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
7973a91d29fSKoichiro Den 		}
798f9616c35SEric Dumazet 
799f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
800bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8016f458dfbSEric Dumazet 	}
802f9616c35SEric Dumazet }
80373a6bab5SEric Dumazet 
80473a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
80573a6bab5SEric Dumazet {
80673a6bab5SEric Dumazet 	bh_lock_sock(sk);
80773a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
80873a6bab5SEric Dumazet 		tcp_tsq_write(sk);
80973a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
81073a6bab5SEric Dumazet 		sock_hold(sk);
81173a6bab5SEric Dumazet 	bh_unlock_sock(sk);
81273a6bab5SEric Dumazet }
81346d3ceabSEric Dumazet /*
8148e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
81546d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8168e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
81746d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
81846d3ceabSEric Dumazet  */
81946d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
82046d3ceabSEric Dumazet {
82146d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
82246d3ceabSEric Dumazet 	LIST_HEAD(list);
82346d3ceabSEric Dumazet 	unsigned long flags;
82446d3ceabSEric Dumazet 	struct list_head *q, *n;
82546d3ceabSEric Dumazet 	struct tcp_sock *tp;
82646d3ceabSEric Dumazet 	struct sock *sk;
82746d3ceabSEric Dumazet 
82846d3ceabSEric Dumazet 	local_irq_save(flags);
82946d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
83046d3ceabSEric Dumazet 	local_irq_restore(flags);
83146d3ceabSEric Dumazet 
83246d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
83346d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
83446d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
83546d3ceabSEric Dumazet 
83646d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8370a9648f1SEric Dumazet 		smp_mb__before_atomic();
8387aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8397aa5470cSEric Dumazet 
8406f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
84146d3ceabSEric Dumazet 		sk_free(sk);
84246d3ceabSEric Dumazet 	}
84346d3ceabSEric Dumazet }
84446d3ceabSEric Dumazet 
84540fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
84640fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
84740fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
84840fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
84946d3ceabSEric Dumazet /**
85046d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
85146d3ceabSEric Dumazet  * @sk: socket
85246d3ceabSEric Dumazet  *
85346d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
85446d3ceabSEric Dumazet  * actions before socket release.
85546d3ceabSEric Dumazet  */
85646d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
85746d3ceabSEric Dumazet {
8586f458dfbSEric Dumazet 	unsigned long flags, nflags;
85946d3ceabSEric Dumazet 
8606f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8616f458dfbSEric Dumazet 	do {
8627aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8636f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8646f458dfbSEric Dumazet 			return;
8656f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8667aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8676f458dfbSEric Dumazet 
86873a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
86973a6bab5SEric Dumazet 		tcp_tsq_write(sk);
87073a6bab5SEric Dumazet 		__sock_put(sk);
87173a6bab5SEric Dumazet 	}
872c3f9b018SEric Dumazet 	/* Here begins the tricky part :
873c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
874c3f9b018SEric Dumazet 	 * 1) BH disabled
875c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
876c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
877c3f9b018SEric Dumazet 	 *
878c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
879c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
880c3f9b018SEric Dumazet 	 */
881c3f9b018SEric Dumazet 	sock_release_ownership(sk);
882c3f9b018SEric Dumazet 
88340fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8846f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
885144d56e9SEric Dumazet 		__sock_put(sk);
886144d56e9SEric Dumazet 	}
88740fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8886f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
889144d56e9SEric Dumazet 		__sock_put(sk);
890144d56e9SEric Dumazet 	}
89140fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8924fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
893144d56e9SEric Dumazet 		__sock_put(sk);
894144d56e9SEric Dumazet 	}
89546d3ceabSEric Dumazet }
89646d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
89746d3ceabSEric Dumazet 
89846d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
89946d3ceabSEric Dumazet {
90046d3ceabSEric Dumazet 	int i;
90146d3ceabSEric Dumazet 
90246d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
90346d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
90446d3ceabSEric Dumazet 
90546d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
90646d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
90746d3ceabSEric Dumazet 			     tcp_tasklet_func,
90846d3ceabSEric Dumazet 			     (unsigned long)tsq);
90946d3ceabSEric Dumazet 	}
91046d3ceabSEric Dumazet }
91146d3ceabSEric Dumazet 
91246d3ceabSEric Dumazet /*
91346d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9148e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
91546d3ceabSEric Dumazet  * hold qdisc lock.
91646d3ceabSEric Dumazet  */
917d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
91846d3ceabSEric Dumazet {
91946d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
92046d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
921408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9229b462d02SEric Dumazet 
9239b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9249b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9259b462d02SEric Dumazet 	 */
92614afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9279b462d02SEric Dumazet 
9289b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9299b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9309b462d02SEric Dumazet 	 * This gives :
9319b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9329b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9339b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9349b462d02SEric Dumazet 	 */
93514afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9369b462d02SEric Dumazet 		goto out;
93746d3ceabSEric Dumazet 
9387aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
93946d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
940a9b204d1SEric Dumazet 		bool empty;
94146d3ceabSEric Dumazet 
942408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
943408f0a6cSEric Dumazet 			goto out;
944408f0a6cSEric Dumazet 
94573a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9467aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
947408f0a6cSEric Dumazet 		if (nval != oval)
948408f0a6cSEric Dumazet 			continue;
949408f0a6cSEric Dumazet 
95046d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
95146d3ceabSEric Dumazet 		local_irq_save(flags);
952903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
953a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
95446d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
955a9b204d1SEric Dumazet 		if (empty)
95646d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
95746d3ceabSEric Dumazet 		local_irq_restore(flags);
9589b462d02SEric Dumazet 		return;
95946d3ceabSEric Dumazet 	}
9609b462d02SEric Dumazet out:
9619b462d02SEric Dumazet 	sk_free(sk);
96246d3ceabSEric Dumazet }
96346d3ceabSEric Dumazet 
96473a6bab5SEric Dumazet /* Note: Called under soft irq.
96573a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
966218af599SEric Dumazet  */
967218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
968218af599SEric Dumazet {
969218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
970218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
971218af599SEric Dumazet 
97273a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
97373a6bab5SEric Dumazet 	sock_put(sk);
974218af599SEric Dumazet 
975218af599SEric Dumazet 	return HRTIMER_NORESTART;
976218af599SEric Dumazet }
977218af599SEric Dumazet 
978a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
979a7a25630SEric Dumazet 				      u64 prior_wstamp)
980e2080072SEric Dumazet {
981ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
982ab408b6dSEric Dumazet 
983ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
98476a9ebe8SEric Dumazet 		unsigned long rate = sk->sk_pacing_rate;
985ab408b6dSEric Dumazet 
986ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
987ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
988ab408b6dSEric Dumazet 		 * this is a minor annoyance.
989ab408b6dSEric Dumazet 		 */
99076a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
991a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
992a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
993a7a25630SEric Dumazet 
994a7a25630SEric Dumazet 			/* take into account OS jitter */
995a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
996a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
997ab408b6dSEric Dumazet 		}
998ab408b6dSEric Dumazet 	}
999e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1000e2080072SEric Dumazet }
1001e2080072SEric Dumazet 
10021da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10031da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10041da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10051da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10061da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10071da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10081da177e4SLinus Torvalds  * device.
10091da177e4SLinus Torvalds  *
10101da177e4SLinus Torvalds  * We are working here with either a clone of the original
10111da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10121da177e4SLinus Torvalds  */
10132987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10142987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10151da177e4SLinus Torvalds {
10166687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1017dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1018dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1019dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
102033ad798cSAdam Langley 	struct tcp_out_options opts;
102195c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10228c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1023cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10241da177e4SLinus Torvalds 	struct tcphdr *th;
1025a7a25630SEric Dumazet 	u64 prior_wstamp;
10261da177e4SLinus Torvalds 	int err;
10271da177e4SLinus Torvalds 
1028dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10296f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
10307f12422cSYuchung Cheng 	prior_wstamp = tp->tcp_wstamp_ns;
10317f12422cSYuchung Cheng 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
10327f12422cSYuchung Cheng 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1033ccdbb6e9SEric Dumazet 	if (clone_it) {
10346f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10356f094b9eSLawrence Brakmo 			- tp->snd_una;
10368c72c65bSEric Dumazet 		oskb = skb;
1037e2080072SEric Dumazet 
1038e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1039e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1040e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1041dfb4b9dcSDavid S. Miller 			else
1042e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1043e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1044e2080072SEric Dumazet 
1045dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1046dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1047dfb4b9dcSDavid S. Miller 	}
10485f6188a8SEric Dumazet 
1049dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1050dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
105133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10521da177e4SLinus Torvalds 
10534de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
105433ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
105533ad798cSAdam Langley 	else
105633ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
105733ad798cSAdam Langley 							   &md5);
105833ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10591da177e4SLinus Torvalds 
1060547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1061b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
106273a6bab5SEric Dumazet 	 * which holds one reference to sk.
1063b2532eb9SEric Dumazet 	 *
1064b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1065b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1066547669d4SEric Dumazet 	 */
1067b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10681da177e4SLinus Torvalds 
106938ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
107038ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
107138ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
107238ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
107338ab52e8SEric Dumazet 	 */
107438ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
107538ab52e8SEric Dumazet 
1076aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1077aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
107846d3ceabSEric Dumazet 
107946d3ceabSEric Dumazet 	skb_orphan(skb);
108046d3ceabSEric Dumazet 	skb->sk = sk;
10811d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1082b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
108314afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10841da177e4SLinus Torvalds 
1085c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1086c3a2e837SJulian Anastasov 
10871da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1088ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1089c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1090c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10911da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10922987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1093df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10944de075e0SEric Dumazet 					tcb->tcp_flags);
1095dfb4b9dcSDavid S. Miller 
10961da177e4SLinus Torvalds 	th->check		= 0;
10971da177e4SLinus Torvalds 	th->urg_ptr		= 0;
10981da177e4SLinus Torvalds 
109933f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11007691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11017691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11021da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11031da177e4SLinus Torvalds 			th->urg = 1;
11047691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11050eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11067691367dSHerbert Xu 			th->urg = 1;
11077691367dSHerbert Xu 		}
11081da177e4SLinus Torvalds 	}
11091da177e4SLinus Torvalds 
1110bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
111151466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1112ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1113ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1114ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1115ea1627c2SEric Dumazet 	} else {
1116ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1117ea1627c2SEric Dumazet 		 * is never scaled.
1118ea1627c2SEric Dumazet 		 */
1119ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1120ea1627c2SEric Dumazet 	}
1121cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1122cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1123cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1124a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1125bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
112639f8e58eSEric Dumazet 					       md5, sk, skb);
1127cfb6eeb4SYOSHIFUJI Hideaki 	}
1128cfb6eeb4SYOSHIFUJI Hideaki #endif
1129cfb6eeb4SYOSHIFUJI Hideaki 
1130bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11311da177e4SLinus Torvalds 
11324de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
113327cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11341da177e4SLinus Torvalds 
1135a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1136cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1137a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1138ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1139a44d6eacSMartin KaFai Lau 	}
11401da177e4SLinus Torvalds 
1141bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1142aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1143aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11441da177e4SLinus Torvalds 
11452efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1146f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1147cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1148f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1149cd7d8498SEric Dumazet 
1150d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1151971f10ecSEric Dumazet 
1152971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1153971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1154971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1155971f10ecSEric Dumazet 
1156a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
1157a842fe14SEric Dumazet 
1158b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11597faee5c0SEric Dumazet 
11608c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11615ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11628c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11638c72c65bSEric Dumazet 	}
1164fc225799SEric Dumazet 	if (!err && oskb) {
1165a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1166fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1167fc225799SEric Dumazet 	}
11688c72c65bSEric Dumazet 	return err;
11691da177e4SLinus Torvalds }
11701da177e4SLinus Torvalds 
11712987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11722987babbSYuchung Cheng 			    gfp_t gfp_mask)
11732987babbSYuchung Cheng {
11742987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11752987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11762987babbSYuchung Cheng }
11772987babbSYuchung Cheng 
117867edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11791da177e4SLinus Torvalds  *
11801da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11811da177e4SLinus Torvalds  * otherwise socket can stall.
11821da177e4SLinus Torvalds  */
11831da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11841da177e4SLinus Torvalds {
11851da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11861da177e4SLinus Torvalds 
11871da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11881da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1189f4a775d1SEric Dumazet 	__skb_header_release(skb);
1190fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11913ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11923ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11931da177e4SLinus Torvalds }
11941da177e4SLinus Torvalds 
119567edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
11965bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1197f6302d1dSDavid S. Miller {
11984a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1199f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1200f6302d1dSDavid S. Miller 		 * non-TSO case.
1201f6302d1dSDavid S. Miller 		 */
1202cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1203f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1204f6302d1dSDavid S. Miller 	} else {
1205cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1206f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12071da177e4SLinus Torvalds 	}
12081da177e4SLinus Torvalds }
12091da177e4SLinus Torvalds 
1210797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1211797108d1SIlpo Järvinen  * tweaks to fix counters
1212797108d1SIlpo Järvinen  */
1213cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1214797108d1SIlpo Järvinen {
1215797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1216797108d1SIlpo Järvinen 
1217797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1218797108d1SIlpo Järvinen 
1219797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1220797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1221797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1222797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1223797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1224797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1225797108d1SIlpo Järvinen 
1226797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1227797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1228797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1229797108d1SIlpo Järvinen 
1230797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1231797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1232713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1233797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1234797108d1SIlpo Järvinen 
1235797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1236797108d1SIlpo Järvinen }
1237797108d1SIlpo Järvinen 
12380a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12390a2cf20cSSoheil Hassas Yeganeh {
12400a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12410a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12420a2cf20cSSoheil Hassas Yeganeh }
12430a2cf20cSSoheil Hassas Yeganeh 
1244490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1245490cc7d0SWillem de Bruijn {
1246490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1247490cc7d0SWillem de Bruijn 
12480a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1249490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1250490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1251490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1252490cc7d0SWillem de Bruijn 
1253490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1254490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1255490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1256b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1257b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1258490cc7d0SWillem de Bruijn 	}
1259490cc7d0SWillem de Bruijn }
1260490cc7d0SWillem de Bruijn 
1261a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1262a166140eSMartin KaFai Lau {
1263a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1264a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1265a166140eSMartin KaFai Lau }
1266a166140eSMartin KaFai Lau 
126775c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
126875c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
126975c119afSEric Dumazet 					 struct sk_buff *buff,
127075c119afSEric Dumazet 					 struct sock *sk,
127175c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
127275c119afSEric Dumazet {
127375c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
127475c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
127575c119afSEric Dumazet 	else
127675c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
127775c119afSEric Dumazet }
127875c119afSEric Dumazet 
12791da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12801da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12811da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12821da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12831da177e4SLinus Torvalds  */
128475c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
128575c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12866cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12871da177e4SLinus Torvalds {
12881da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12891da177e4SLinus Torvalds 	struct sk_buff *buff;
12906475be16SDavid S. Miller 	int nsize, old_factor;
1291*b617158dSEric Dumazet 	long limit;
1292b60b49eaSHerbert Xu 	int nlen;
12939ce01461SIlpo Järvinen 	u8 flags;
12941da177e4SLinus Torvalds 
12952fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12962fceec13SIlpo Järvinen 		return -EINVAL;
12976a438bbeSStephen Hemminger 
12981da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
12991da177e4SLinus Torvalds 	if (nsize < 0)
13001da177e4SLinus Torvalds 		nsize = 0;
13011da177e4SLinus Torvalds 
1302*b617158dSEric Dumazet 	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1303*b617158dSEric Dumazet 	 * We need some allowance to not penalize applications setting small
1304*b617158dSEric Dumazet 	 * SO_SNDBUF values.
1305*b617158dSEric Dumazet 	 * Also allow first and last skb in retransmit queue to be split.
1306*b617158dSEric Dumazet 	 */
1307*b617158dSEric Dumazet 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1308*b617158dSEric Dumazet 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1309*b617158dSEric Dumazet 		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1310*b617158dSEric Dumazet 		     skb != tcp_rtx_queue_head(sk) &&
1311*b617158dSEric Dumazet 		     skb != tcp_rtx_queue_tail(sk))) {
1312f070ef2aSEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1313f070ef2aSEric Dumazet 		return -ENOMEM;
1314f070ef2aSEric Dumazet 	}
1315f070ef2aSEric Dumazet 
13166cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13171da177e4SLinus Torvalds 		return -ENOMEM;
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1320eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
132151456b29SIan Morris 	if (!buff)
13221da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1323ef5cb973SHerbert Xu 
13243ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
13253ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1326b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1327b60b49eaSHerbert Xu 	buff->truesize += nlen;
1328b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13311da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13321da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13331da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13341da177e4SLinus Torvalds 
13351da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13364de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13374de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13384de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1339e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1340a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13411da177e4SLinus Torvalds 
13421da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13431da177e4SLinus Torvalds 
134498be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13451da177e4SLinus Torvalds 
1346a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1347490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13481da177e4SLinus Torvalds 
13496475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13506475be16SDavid S. Miller 
13511da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13525bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13535bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13541da177e4SLinus Torvalds 
1355b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1356b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1357b9f64820SYuchung Cheng 
13586475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13596475be16SDavid S. Miller 	 * adjust the various packet counters.
13606475be16SDavid S. Miller 	 */
1361cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13626475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13636475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13641da177e4SLinus Torvalds 
1365797108d1SIlpo Järvinen 		if (diff)
1366797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13671da177e4SLinus Torvalds 	}
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1370f4a775d1SEric Dumazet 	__skb_header_release(buff);
137175c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1372f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1373e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13741da177e4SLinus Torvalds 
13751da177e4SLinus Torvalds 	return 0;
13761da177e4SLinus Torvalds }
13771da177e4SLinus Torvalds 
1378f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1379f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13801da177e4SLinus Torvalds  */
13817162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13821da177e4SLinus Torvalds {
13837b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13841da177e4SLinus Torvalds 	int i, k, eat;
13851da177e4SLinus Torvalds 
13864fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13874fa48bf3SEric Dumazet 	if (eat) {
13884fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13894fa48bf3SEric Dumazet 		len -= eat;
13904fa48bf3SEric Dumazet 		if (!len)
13917162fb24SEric Dumazet 			return 0;
13924fa48bf3SEric Dumazet 	}
13931da177e4SLinus Torvalds 	eat = len;
13941da177e4SLinus Torvalds 	k = 0;
13957b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13967b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13977b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
13989e903e08SEric Dumazet 
13999e903e08SEric Dumazet 		if (size <= eat) {
1400aff65da0SIan Campbell 			skb_frag_unref(skb, i);
14019e903e08SEric Dumazet 			eat -= size;
14021da177e4SLinus Torvalds 		} else {
14037b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
14041da177e4SLinus Torvalds 			if (eat) {
14057b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
14067b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
14071da177e4SLinus Torvalds 				eat = 0;
14081da177e4SLinus Torvalds 			}
14091da177e4SLinus Torvalds 			k++;
14101da177e4SLinus Torvalds 		}
14111da177e4SLinus Torvalds 	}
14127b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14131da177e4SLinus Torvalds 
14141da177e4SLinus Torvalds 	skb->data_len -= len;
14151da177e4SLinus Torvalds 	skb->len = skb->data_len;
14167162fb24SEric Dumazet 	return len;
14171da177e4SLinus Torvalds }
14181da177e4SLinus Torvalds 
141967edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14201da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14211da177e4SLinus Torvalds {
14227162fb24SEric Dumazet 	u32 delta_truesize;
14237162fb24SEric Dumazet 
142414bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14251da177e4SLinus Torvalds 		return -ENOMEM;
14261da177e4SLinus Torvalds 
14277162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14281da177e4SLinus Torvalds 
14291da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
143084fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14311da177e4SLinus Torvalds 
14327162fb24SEric Dumazet 	if (delta_truesize) {
14337162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14347162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14357162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14361da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14377162fb24SEric Dumazet 	}
14381da177e4SLinus Torvalds 
14395b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14401da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14415bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14421da177e4SLinus Torvalds 
14431da177e4SLinus Torvalds 	return 0;
14441da177e4SLinus Torvalds }
14451da177e4SLinus Torvalds 
14461b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14471b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14485d424d5aSJohn Heffner {
1449cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1450cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14515d424d5aSJohn Heffner 	int mss_now;
14525d424d5aSJohn Heffner 
14535d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14545d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14555d424d5aSJohn Heffner 	 */
14565d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14575d424d5aSJohn Heffner 
145867469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
145967469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
146067469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
146167469601SEric Dumazet 
146267469601SEric Dumazet 		if (dst && dst_allfrag(dst))
146367469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
146467469601SEric Dumazet 	}
146567469601SEric Dumazet 
14665d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14675d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14685d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14695d424d5aSJohn Heffner 
14705d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14715d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14725d424d5aSJohn Heffner 
14735d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14745f3e2bf0SEric Dumazet 	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
14755d424d5aSJohn Heffner 	return mss_now;
14765d424d5aSJohn Heffner }
14775d424d5aSJohn Heffner 
14781b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14791b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14801b63edd6SYuchung Cheng {
14811b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14821b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14831b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14841b63edd6SYuchung Cheng }
14851b63edd6SYuchung Cheng 
14865d424d5aSJohn Heffner /* Inverse of above */
148767469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14885d424d5aSJohn Heffner {
1489cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1490cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14915d424d5aSJohn Heffner 	int mtu;
14925d424d5aSJohn Heffner 
14935d424d5aSJohn Heffner 	mtu = mss +
14945d424d5aSJohn Heffner 	      tp->tcp_header_len +
14955d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14965d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14975d424d5aSJohn Heffner 
149867469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
149967469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
150067469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
150167469601SEric Dumazet 
150267469601SEric Dumazet 		if (dst && dst_allfrag(dst))
150367469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
150467469601SEric Dumazet 	}
15055d424d5aSJohn Heffner 	return mtu;
15065d424d5aSJohn Heffner }
1507556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
15085d424d5aSJohn Heffner 
150967edfef7SAndi Kleen /* MTU probing init per socket */
15105d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
15115d424d5aSJohn Heffner {
15125d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15135d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1514b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15155d424d5aSJohn Heffner 
1516b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15175d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15185d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1519b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15205d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
152105cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1522c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15235d424d5aSJohn Heffner }
15244bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15255d424d5aSJohn Heffner 
15261da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15291da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1532caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15331da177e4SLinus Torvalds    It also does not include TCP options.
15341da177e4SLinus Torvalds 
1535d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15361da177e4SLinus Torvalds 
15371da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15381da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15391da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15401da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15431da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15441da177e4SLinus Torvalds 
1545d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1546d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15471da177e4SLinus Torvalds  */
15481da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15491da177e4SLinus Torvalds {
15501da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1551d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15525d424d5aSJohn Heffner 	int mss_now;
15531da177e4SLinus Torvalds 
15545d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15555d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15561da177e4SLinus Torvalds 
15575d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1558409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15591da177e4SLinus Torvalds 
15601da177e4SLinus Torvalds 	/* And store cached results */
1561d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15625d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15635d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1564c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	return mss_now;
15671da177e4SLinus Torvalds }
15684bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15711da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15721da177e4SLinus Torvalds  */
15730c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15741da177e4SLinus Torvalds {
1575cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1576cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1577c1b4a7e6SDavid S. Miller 	u32 mss_now;
157895c96174SEric Dumazet 	unsigned int header_len;
157933ad798cSAdam Langley 	struct tcp_out_options opts;
158033ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15811da177e4SLinus Torvalds 
1582c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1583c1b4a7e6SDavid S. Miller 
15841da177e4SLinus Torvalds 	if (dst) {
15851da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1586d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15871da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15881da177e4SLinus Torvalds 	}
15891da177e4SLinus Torvalds 
159033ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
159133ad798cSAdam Langley 		     sizeof(struct tcphdr);
159233ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
159333ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
159433ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
159533ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
159633ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
159733ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
159833ad798cSAdam Langley 		mss_now -= delta;
159933ad798cSAdam Langley 	}
1600cfb6eeb4SYOSHIFUJI Hideaki 
16011da177e4SLinus Torvalds 	return mss_now;
16021da177e4SLinus Torvalds }
16031da177e4SLinus Torvalds 
160486fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
160586fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
160686fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
160786fd14adSWeiping Pan  */
160886fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1609a762a980SDavid S. Miller {
16109e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1611a762a980SDavid S. Miller 
161286fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
161386fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
161486fd14adSWeiping Pan 		/* Limited by application or receiver window. */
161586fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
161686fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
161786fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
161886fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
161986fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
162086fd14adSWeiping Pan 		}
162186fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
162286fd14adSWeiping Pan 	}
1623c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
162486fd14adSWeiping Pan }
162586fd14adSWeiping Pan 
1626ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1627a762a980SDavid S. Miller {
16281b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1629a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1630a762a980SDavid S. Miller 
1631ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1632ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1633ca8a2263SNeal Cardwell 	 */
1634ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1635ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1636ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1637ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1638ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1639ca8a2263SNeal Cardwell 	}
1640e114a710SEric Dumazet 
164124901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1642a762a980SDavid S. Miller 		/* Network is feed fully. */
1643a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1644c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1645a762a980SDavid S. Miller 	} else {
1646a762a980SDavid S. Miller 		/* Network starves. */
1647a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1648a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1649a762a980SDavid S. Miller 
1650b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1651c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16521b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1653a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1654b0f71bd3SFrancis Yan 
1655b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1656b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1657b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1658b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
165975c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1660b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1661b0f71bd3SFrancis Yan 		 */
166275c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1663b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1664b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1665b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1666a762a980SDavid S. Miller 	}
1667a762a980SDavid S. Miller }
1668a762a980SDavid S. Miller 
1669d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1670d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1671d4589926SEric Dumazet {
1672d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1673d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1674d4589926SEric Dumazet }
1675d4589926SEric Dumazet 
1676d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1677d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1678d4589926SEric Dumazet  * The test is really :
1679d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1680d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1681d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1682d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16830e3a4803SIlpo Järvinen  */
1684d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1685d4589926SEric Dumazet 				const struct sk_buff *skb)
1686d4589926SEric Dumazet {
1687d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1688d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1689d4589926SEric Dumazet }
1690d4589926SEric Dumazet 
1691d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1692d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1693d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1694d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1695d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1696d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1697d4589926SEric Dumazet  */
1698d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1699cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1700d4589926SEric Dumazet {
1701d4589926SEric Dumazet 	return partial &&
1702d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1703d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1704d4589926SEric Dumazet }
1705605ad7f1SEric Dumazet 
1706605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1707605ad7f1SEric Dumazet  * to send one TSO packet per ms
1708605ad7f1SEric Dumazet  */
1709dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
17101b3878caSNeal Cardwell 			    int min_tso_segs)
1711605ad7f1SEric Dumazet {
1712605ad7f1SEric Dumazet 	u32 bytes, segs;
1713605ad7f1SEric Dumazet 
171476a9ebe8SEric Dumazet 	bytes = min_t(unsigned long,
171576a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift,
1716605ad7f1SEric Dumazet 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1717605ad7f1SEric Dumazet 
1718605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1719605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1720605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1721605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1722605ad7f1SEric Dumazet 	 */
17231b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1724605ad7f1SEric Dumazet 
1725350c9f48SEric Dumazet 	return segs;
1726605ad7f1SEric Dumazet }
1727605ad7f1SEric Dumazet 
1728ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1729ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1730ed6e7268SNeal Cardwell  */
1731ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1732ed6e7268SNeal Cardwell {
1733ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1734dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1735ed6e7268SNeal Cardwell 
1736dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1737dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1738dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1739dcb8c9b4SEric Dumazet 
1740dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1741350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1742ed6e7268SNeal Cardwell }
1743ed6e7268SNeal Cardwell 
1744d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1745d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1746d4589926SEric Dumazet 					const struct sk_buff *skb,
1747d4589926SEric Dumazet 					unsigned int mss_now,
1748d4589926SEric Dumazet 					unsigned int max_segs,
1749d4589926SEric Dumazet 					int nonagle)
1750c1b4a7e6SDavid S. Miller {
1751cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1752d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1753c1b4a7e6SDavid S. Miller 
175490840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17551485348dSBen Hutchings 	max_len = mss_now * max_segs;
17560e3a4803SIlpo Järvinen 
17571485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17581485348dSBen Hutchings 		return max_len;
17590e3a4803SIlpo Järvinen 
17605ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17615ea3a748SIlpo Järvinen 
17621485348dSBen Hutchings 	if (max_len <= needed)
17631485348dSBen Hutchings 		return max_len;
17640e3a4803SIlpo Järvinen 
1765d4589926SEric Dumazet 	partial = needed % mss_now;
1766d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1767d4589926SEric Dumazet 	 * to include this last segment in this skb.
1768d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1769d4589926SEric Dumazet 	 */
1770cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1771d4589926SEric Dumazet 		return needed - partial;
1772d4589926SEric Dumazet 
1773d4589926SEric Dumazet 	return needed;
1774c1b4a7e6SDavid S. Miller }
1775c1b4a7e6SDavid S. Miller 
1776c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1777c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1778c1b4a7e6SDavid S. Miller  */
1779cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1780cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1781c1b4a7e6SDavid S. Miller {
1782d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1783c1b4a7e6SDavid S. Miller 
1784c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17854de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17864de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1787c1b4a7e6SDavid S. Miller 		return 1;
1788c1b4a7e6SDavid S. Miller 
1789c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1790c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1791d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1792c1b4a7e6SDavid S. Miller 		return 0;
1793d649a7a8SEric Dumazet 
1794d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1795d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1796d649a7a8SEric Dumazet 	 */
1797d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1798d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1799c1b4a7e6SDavid S. Miller }
1800c1b4a7e6SDavid S. Miller 
1801b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
180267edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1803c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1804c1b4a7e6SDavid S. Miller  */
18055bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1806c1b4a7e6SDavid S. Miller {
1807c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1808c1b4a7e6SDavid S. Miller 
1809f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
18105bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1811c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1812c1b4a7e6SDavid S. Miller 	}
1813c1b4a7e6SDavid S. Miller 	return tso_segs;
1814c1b4a7e6SDavid S. Miller }
1815c1b4a7e6SDavid S. Miller 
1816c1b4a7e6SDavid S. Miller 
1817a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1818c1b4a7e6SDavid S. Miller  * sent now.
1819c1b4a7e6SDavid S. Miller  */
1820a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1821c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1822c1b4a7e6SDavid S. Miller {
1823c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1824c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1825c1b4a7e6SDavid S. Miller 	 *
1826c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1827c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1828c1b4a7e6SDavid S. Miller 	 */
1829c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1830a2a385d6SEric Dumazet 		return true;
1831c1b4a7e6SDavid S. Miller 
18329b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18339b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1834a2a385d6SEric Dumazet 		return true;
1835c1b4a7e6SDavid S. Miller 
1836cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1837a2a385d6SEric Dumazet 		return true;
1838c1b4a7e6SDavid S. Miller 
1839a2a385d6SEric Dumazet 	return false;
1840c1b4a7e6SDavid S. Miller }
1841c1b4a7e6SDavid S. Miller 
1842c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1843a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1844a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1845056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1846c1b4a7e6SDavid S. Miller {
1847c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1848c1b4a7e6SDavid S. Miller 
1849c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1850c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1851c1b4a7e6SDavid S. Miller 
185290840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1853c1b4a7e6SDavid S. Miller }
1854c1b4a7e6SDavid S. Miller 
1855c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1856c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1857c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1858c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1859c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1860c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1861c1b4a7e6SDavid S. Miller  */
186256483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1863c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1864c1b4a7e6SDavid S. Miller {
1865c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
186656483341SEric Dumazet 	struct sk_buff *buff;
18679ce01461SIlpo Järvinen 	u8 flags;
1868c1b4a7e6SDavid S. Miller 
1869c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1870c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
187156483341SEric Dumazet 		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
187256483341SEric Dumazet 				    skb, len, mss_now, gfp);
1873c1b4a7e6SDavid S. Miller 
1874eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
187551456b29SIan Morris 	if (unlikely(!buff))
1876c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1877c1b4a7e6SDavid S. Miller 
18783ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18793ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1880b60b49eaSHerbert Xu 	buff->truesize += nlen;
1881c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1882c1b4a7e6SDavid S. Miller 
1883c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1884c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1885c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1886c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1887c1b4a7e6SDavid S. Miller 
1888c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18894de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18904de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18914de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1892c1b4a7e6SDavid S. Miller 
1893c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1894c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1895c1b4a7e6SDavid S. Miller 
1896a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1897a166140eSMartin KaFai Lau 
189898be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1899c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1900490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1901c1b4a7e6SDavid S. Miller 
1902c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
19035bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
19045bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1905c1b4a7e6SDavid S. Miller 
1906c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1907f4a775d1SEric Dumazet 	__skb_header_release(buff);
190856483341SEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
1909c1b4a7e6SDavid S. Miller 
1910c1b4a7e6SDavid S. Miller 	return 0;
1911c1b4a7e6SDavid S. Miller }
1912c1b4a7e6SDavid S. Miller 
1913c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1914c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1915c1b4a7e6SDavid S. Miller  *
1916c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1917c1b4a7e6SDavid S. Miller  */
1918ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1919f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
1920f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
1921f9bfe4e6SEric Dumazet 				 u32 max_segs)
1922c1b4a7e6SDavid S. Miller {
19236687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1924f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
192550c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
192650c8339eSEric Dumazet 	struct sk_buff *head;
1927ad9f4f50SEric Dumazet 	int win_divisor;
1928f1c6ea38SEric Dumazet 	s64 delta;
1929c1b4a7e6SDavid S. Miller 
193099d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1931ae8064acSJohn Heffner 		goto send_now;
1932ae8064acSJohn Heffner 
19335f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
1934a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
1935a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
1936a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
19375f852eb5SEric Dumazet 	 */
1938a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1939a682850aSEric Dumazet 	if (delta > 0)
1940ae8064acSJohn Heffner 		goto send_now;
1941908a75c1SDavid S. Miller 
1942c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1943c1b4a7e6SDavid S. Miller 
1944c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1945c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1946c1b4a7e6SDavid S. Miller 
194790840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1948c1b4a7e6SDavid S. Miller 
1949c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1950c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1951c1b4a7e6SDavid S. Miller 
1952c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1953c1b4a7e6SDavid S. Miller 
1954ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1955605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1956ae8064acSJohn Heffner 		goto send_now;
1957ba244fe9SDavid S. Miller 
195862ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
195962ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
196062ad2761SIlpo Järvinen 		goto send_now;
196162ad2761SIlpo Järvinen 
19625bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1963ad9f4f50SEric Dumazet 	if (win_divisor) {
1964c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1965c1b4a7e6SDavid S. Miller 
1966c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1967c1b4a7e6SDavid S. Miller 		 * just use it.
1968c1b4a7e6SDavid S. Miller 		 */
1969ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1970c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1971ae8064acSJohn Heffner 			goto send_now;
1972c1b4a7e6SDavid S. Miller 	} else {
1973c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1974c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1975c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1976c1b4a7e6SDavid S. Miller 		 * then send now.
1977c1b4a7e6SDavid S. Miller 		 */
19786b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1979ae8064acSJohn Heffner 			goto send_now;
1980c1b4a7e6SDavid S. Miller 	}
1981c1b4a7e6SDavid S. Miller 
198275c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
198375c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
198475c119afSEric Dumazet 	if (!head)
198575c119afSEric Dumazet 		goto send_now;
1986f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
198750c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
1988f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
198950c8339eSEric Dumazet 		goto send_now;
199050c8339eSEric Dumazet 
1991f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1992f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
1993f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
1994f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
1995f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
1996f9bfe4e6SEric Dumazet 	 */
1997f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
1998f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
1999ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
2000f9bfe4e6SEric Dumazet 			return true;
2001f9bfe4e6SEric Dumazet 		}
2002f9bfe4e6SEric Dumazet 	} else {
2003f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
2004f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
2005f9bfe4e6SEric Dumazet 			return true;
2006f9bfe4e6SEric Dumazet 		}
2007f9bfe4e6SEric Dumazet 	}
2008f9bfe4e6SEric Dumazet 
2009f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
2010d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2011d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2012f9bfe4e6SEric Dumazet 		goto send_now;
2013ca8a2263SNeal Cardwell 
2014a2a385d6SEric Dumazet 	return true;
2015ae8064acSJohn Heffner 
2016ae8064acSJohn Heffner send_now:
2017a2a385d6SEric Dumazet 	return false;
2018c1b4a7e6SDavid S. Miller }
2019c1b4a7e6SDavid S. Miller 
202005cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
202105cbc0dbSFan Du {
202205cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
202305cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
202405cbc0dbSFan Du 	struct net *net = sock_net(sk);
202505cbc0dbSFan Du 	u32 interval;
202605cbc0dbSFan Du 	s32 delta;
202705cbc0dbSFan Du 
202805cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
2029c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
203005cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
203105cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
203205cbc0dbSFan Du 
203305cbc0dbSFan Du 		/* Update current search range */
203405cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
203505cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
203605cbc0dbSFan Du 			sizeof(struct tcphdr) +
203705cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
203805cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
203905cbc0dbSFan Du 
204005cbc0dbSFan Du 		/* Update probe time stamp */
2041c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
204205cbc0dbSFan Du 	}
204305cbc0dbSFan Du }
204405cbc0dbSFan Du 
2045808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2046808cf9e3SIlya Lesokhin {
2047808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2048808cf9e3SIlya Lesokhin 
2049808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2050808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2051808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2052808cf9e3SIlya Lesokhin 			break;
2053808cf9e3SIlya Lesokhin 
2054808cf9e3SIlya Lesokhin 		if (unlikely(TCP_SKB_CB(skb)->eor))
2055808cf9e3SIlya Lesokhin 			return false;
2056808cf9e3SIlya Lesokhin 
2057808cf9e3SIlya Lesokhin 		len -= skb->len;
2058808cf9e3SIlya Lesokhin 	}
2059808cf9e3SIlya Lesokhin 
2060808cf9e3SIlya Lesokhin 	return true;
2061808cf9e3SIlya Lesokhin }
2062808cf9e3SIlya Lesokhin 
20635d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
206467edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
206567edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
206667edfef7SAndi Kleen  * changes resulting in larger path MTUs.
206767edfef7SAndi Kleen  *
20685d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20695d424d5aSJohn Heffner  *         1 if a probe was sent,
2070056834d9SIlpo Järvinen  *         -1 otherwise
2071056834d9SIlpo Järvinen  */
20725d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20735d424d5aSJohn Heffner {
20745d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
207512a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20765d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20776b58e0a5SFan Du 	struct net *net = sock_net(sk);
20785d424d5aSJohn Heffner 	int probe_size;
207991cc17c0SIlpo Järvinen 	int size_needed;
208012a59abcSEric Dumazet 	int copy, len;
20815d424d5aSJohn Heffner 	int mss_now;
20826b58e0a5SFan Du 	int interval;
20835d424d5aSJohn Heffner 
20845d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20855d424d5aSJohn Heffner 	 * not in recovery,
20865d424d5aSJohn Heffner 	 * have enough cwnd, and
208712a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
208812a59abcSEric Dumazet 	 */
208912a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20905d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20915d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20925d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
209312a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20945d424d5aSJohn Heffner 		return -1;
20955d424d5aSJohn Heffner 
20966b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20976b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20986b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20996b58e0a5SFan Du 	 */
21000c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
21016b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
21026b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
210391cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
21046b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
210505cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
210605cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
210705cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
210805cbc0dbSFan Du 	 */
21096b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
211005cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
211105cbc0dbSFan Du 		/* Check whether enough time has elaplased for
211205cbc0dbSFan Du 		 * another round of probing.
211305cbc0dbSFan Du 		 */
211405cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
21155d424d5aSJohn Heffner 		return -1;
21165d424d5aSJohn Heffner 	}
21175d424d5aSJohn Heffner 
21185d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
21197f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
21205d424d5aSJohn Heffner 		return -1;
21215d424d5aSJohn Heffner 
212291cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
21235d424d5aSJohn Heffner 		return -1;
212490840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
21255d424d5aSJohn Heffner 		return 0;
21265d424d5aSJohn Heffner 
2127d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2128d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2129d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
21305d424d5aSJohn Heffner 			return -1;
21315d424d5aSJohn Heffner 		else
21325d424d5aSJohn Heffner 			return 0;
21335d424d5aSJohn Heffner 	}
21345d424d5aSJohn Heffner 
2135808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2136808cf9e3SIlya Lesokhin 		return -1;
2137808cf9e3SIlya Lesokhin 
21385d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2139eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
214051456b29SIan Morris 	if (!nskb)
21415d424d5aSJohn Heffner 		return -1;
21423ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
21433ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21445d424d5aSJohn Heffner 
2145fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
21465d424d5aSJohn Heffner 
21475d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21485d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21494de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21505d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21515d424d5aSJohn Heffner 	nskb->csum = 0;
215298be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21535d424d5aSJohn Heffner 
215450c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21552b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
215650c4817eSIlpo Järvinen 
21575d424d5aSJohn Heffner 	len = 0;
2158234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21595d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21605d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21615d424d5aSJohn Heffner 
21625d424d5aSJohn Heffner 		if (skb->len <= copy) {
21635d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21645d424d5aSJohn Heffner 			 * Throw it away. */
21654de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2166808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2167808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2168808cf9e3SIlya Lesokhin 			 */
2169808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2170fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21713ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21725d424d5aSJohn Heffner 		} else {
21734de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2174a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21755d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21765d424d5aSJohn Heffner 				skb_pull(skb, copy);
21775d424d5aSJohn Heffner 			} else {
21785d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21795bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21805d424d5aSJohn Heffner 			}
21815d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21825d424d5aSJohn Heffner 		}
21835d424d5aSJohn Heffner 
21845d424d5aSJohn Heffner 		len += copy;
2185234b6860SIlpo Järvinen 
2186234b6860SIlpo Järvinen 		if (len >= probe_size)
2187234b6860SIlpo Järvinen 			break;
21885d424d5aSJohn Heffner 	}
21895bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21905d424d5aSJohn Heffner 
21915d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21927faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21937faee5c0SEric Dumazet 	 */
21945d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21955d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21965d424d5aSJohn Heffner 		 * effectively two packets. */
21975d424d5aSJohn Heffner 		tp->snd_cwnd--;
219866f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21995d424d5aSJohn Heffner 
22005d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
22010e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
22020e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
22035d424d5aSJohn Heffner 
22045d424d5aSJohn Heffner 		return 1;
22055d424d5aSJohn Heffner 	}
22065d424d5aSJohn Heffner 
22075d424d5aSJohn Heffner 	return -1;
22085d424d5aSJohn Heffner }
22095d424d5aSJohn Heffner 
2210864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2211218af599SEric Dumazet {
2212864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2213864e5c09SEric Dumazet 
2214864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2215864e5c09SEric Dumazet 		return false;
2216864e5c09SEric Dumazet 
2217864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2218864e5c09SEric Dumazet 		return false;
2219864e5c09SEric Dumazet 
2220864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2221864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2222864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2223864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2224864e5c09SEric Dumazet 		sock_hold(sk);
2225864e5c09SEric Dumazet 	}
2226864e5c09SEric Dumazet 	return true;
2227218af599SEric Dumazet }
2228218af599SEric Dumazet 
2229f9616c35SEric Dumazet /* TCP Small Queues :
2230f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2231f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2232f9616c35SEric Dumazet  * This allows for :
2233f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2234f9616c35SEric Dumazet  *  - faster recovery
2235f9616c35SEric Dumazet  *  - high rates
2236f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2237f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2238f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2239f9616c35SEric Dumazet  */
2240f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2241f9616c35SEric Dumazet 				  unsigned int factor)
2242f9616c35SEric Dumazet {
224376a9ebe8SEric Dumazet 	unsigned long limit;
2244f9616c35SEric Dumazet 
224576a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
224676a9ebe8SEric Dumazet 		      2 * skb->truesize,
224776a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift);
2248c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
224976a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
22509184d8bbSEric Dumazet 			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2251f9616c35SEric Dumazet 	limit <<= factor;
2252f9616c35SEric Dumazet 
2253a842fe14SEric Dumazet 	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2254a842fe14SEric Dumazet 	    tcp_sk(sk)->tcp_tx_delay) {
2255a842fe14SEric Dumazet 		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2256a842fe14SEric Dumazet 
2257a842fe14SEric Dumazet 		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2258a842fe14SEric Dumazet 		 * approximate our needs assuming an ~100% skb->truesize overhead.
2259a842fe14SEric Dumazet 		 * USEC_PER_SEC is approximated by 2^20.
2260a842fe14SEric Dumazet 		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2261a842fe14SEric Dumazet 		 */
2262a842fe14SEric Dumazet 		extra_bytes >>= (20 - 1);
2263a842fe14SEric Dumazet 		limit += extra_bytes;
2264a842fe14SEric Dumazet 	}
226514afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
226675c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
226775eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
226875eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
226975eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
227075eefc6cSEric Dumazet 		 */
227175c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
227275eefc6cSEric Dumazet 			return false;
227375eefc6cSEric Dumazet 
22747aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2275f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2276f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2277f9616c35SEric Dumazet 		 * test again the condition.
2278f9616c35SEric Dumazet 		 */
2279f9616c35SEric Dumazet 		smp_mb__after_atomic();
228014afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2281f9616c35SEric Dumazet 			return true;
2282f9616c35SEric Dumazet 	}
2283f9616c35SEric Dumazet 	return false;
2284f9616c35SEric Dumazet }
2285f9616c35SEric Dumazet 
228605b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
228705b055e8SFrancis Yan {
2288628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2289efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
229005b055e8SFrancis Yan 
2291efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2292efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
229305b055e8SFrancis Yan 	tp->chrono_start = now;
229405b055e8SFrancis Yan 	tp->chrono_type = new;
229505b055e8SFrancis Yan }
229605b055e8SFrancis Yan 
229705b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
229805b055e8SFrancis Yan {
229905b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
230005b055e8SFrancis Yan 
230105b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
23020f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
23030f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
230405b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
230505b055e8SFrancis Yan 	 */
230605b055e8SFrancis Yan 	if (type > tp->chrono_type)
230705b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
230805b055e8SFrancis Yan }
230905b055e8SFrancis Yan 
231005b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
231105b055e8SFrancis Yan {
231205b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
231305b055e8SFrancis Yan 
23140f87230dSFrancis Yan 
23150f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
23160f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
23170f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
23180f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
23190f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
23200f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
23210f87230dSFrancis Yan 	 */
232275c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
232305b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
23240f87230dSFrancis Yan 	else if (type == tp->chrono_type)
23250f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
232605b055e8SFrancis Yan }
232705b055e8SFrancis Yan 
23281da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
23291da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
23301da177e4SLinus Torvalds  * window for us.
23311da177e4SLinus Torvalds  *
2332f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2333f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2334f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2335f8269a49SIlpo Järvinen  *
23366ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
23376ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
23386ba8a3b1SNandita Dukkipati 
2339a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2340a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
23411da177e4SLinus Torvalds  */
2342a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2343d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
23441da177e4SLinus Torvalds {
23451da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
234692df7b51SDavid S. Miller 	struct sk_buff *skb;
2347c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2348c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23495d424d5aSJohn Heffner 	int result;
23505615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2351605ad7f1SEric Dumazet 	u32 max_segs;
23521da177e4SLinus Torvalds 
2353c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23545d424d5aSJohn Heffner 
2355ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2356d5dd9175SIlpo Järvinen 	if (!push_one) {
23575d424d5aSJohn Heffner 		/* Do MTU probing. */
2358d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2359d5dd9175SIlpo Järvinen 		if (!result) {
2360a2a385d6SEric Dumazet 			return false;
23615d424d5aSJohn Heffner 		} else if (result > 0) {
23625d424d5aSJohn Heffner 			sent_pkts = 1;
23635d424d5aSJohn Heffner 		}
2364d5dd9175SIlpo Järvinen 	}
23655d424d5aSJohn Heffner 
2366ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2367fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2368c8ac3774SHerbert Xu 		unsigned int limit;
2369c8ac3774SHerbert Xu 
237079861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
237179861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
237279861919SEric Dumazet 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
237379861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2374bf50b606SEric Dumazet 			tcp_init_tso_segs(skb, mss_now);
237579861919SEric Dumazet 			goto repair; /* Skip network transmission */
237679861919SEric Dumazet 		}
237779861919SEric Dumazet 
2378218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2379218af599SEric Dumazet 			break;
2380218af599SEric Dumazet 
23815bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2382c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2383c1b4a7e6SDavid S. Miller 
2384b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
23856ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
23866ba8a3b1SNandita Dukkipati 			if (push_one == 2)
23876ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
23886ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23896ba8a3b1SNandita Dukkipati 			else
2390b68e9f85SHerbert Xu 				break;
23916ba8a3b1SNandita Dukkipati 		}
2392b68e9f85SHerbert Xu 
23935615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23945615f886SFrancis Yan 			is_rwnd_limited = true;
2395b68e9f85SHerbert Xu 			break;
23965615f886SFrancis Yan 		}
2397b68e9f85SHerbert Xu 
2398d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2399aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2400aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2401aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2402aa93466bSDavid S. Miller 				break;
2403c1b4a7e6SDavid S. Miller 		} else {
2404ca8a2263SNeal Cardwell 			if (!push_one &&
2405605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2406f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2407aa93466bSDavid S. Miller 				break;
2408c1b4a7e6SDavid S. Miller 		}
2409aa93466bSDavid S. Miller 
2410605ad7f1SEric Dumazet 		limit = mss_now;
2411d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2412605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2413605ad7f1SEric Dumazet 						    min_t(unsigned int,
2414605ad7f1SEric Dumazet 							  cwnd_quota,
2415605ad7f1SEric Dumazet 							  max_segs),
2416605ad7f1SEric Dumazet 						    nonagle);
2417605ad7f1SEric Dumazet 
2418605ad7f1SEric Dumazet 		if (skb->len > limit &&
241956483341SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2420605ad7f1SEric Dumazet 			break;
2421605ad7f1SEric Dumazet 
2422f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
242346d3ceabSEric Dumazet 			break;
2424c9eeec26SEric Dumazet 
2425d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
24261da177e4SLinus Torvalds 			break;
24271da177e4SLinus Torvalds 
2428ec342325SAndrew Vagin repair:
24291da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
24301da177e4SLinus Torvalds 		 * This call will increment packets_out.
24311da177e4SLinus Torvalds 		 */
243266f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
24331da177e4SLinus Torvalds 
24341da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2435a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2436d5dd9175SIlpo Järvinen 
2437d5dd9175SIlpo Järvinen 		if (push_one)
2438d5dd9175SIlpo Järvinen 			break;
24391da177e4SLinus Torvalds 	}
24401da177e4SLinus Torvalds 
24415615f886SFrancis Yan 	if (is_rwnd_limited)
24425615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
24435615f886SFrancis Yan 	else
24445615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
24455615f886SFrancis Yan 
2446aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2447684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2448684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24496ba8a3b1SNandita Dukkipati 
24506ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24516ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2452ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2453d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2454ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2455a2a385d6SEric Dumazet 		return false;
24561da177e4SLinus Torvalds 	}
245775c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24586ba8a3b1SNandita Dukkipati }
24596ba8a3b1SNandita Dukkipati 
2460ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24616ba8a3b1SNandita Dukkipati {
24626ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24636ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2464a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24652ae21cf5SEric Dumazet 	int early_retrans;
24666ba8a3b1SNandita Dukkipati 
24676ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24686ba8a3b1SNandita Dukkipati 	 * finishes.
24696ba8a3b1SNandita Dukkipati 	 */
2470f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
24716ba8a3b1SNandita Dukkipati 		return false;
24726ba8a3b1SNandita Dukkipati 
24732ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
24746ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2475b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
24766ba8a3b1SNandita Dukkipati 	 */
24772ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2478bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2479b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2480b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
24816ba8a3b1SNandita Dukkipati 		return false;
24826ba8a3b1SNandita Dukkipati 
2483bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2484f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2485f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24866ba8a3b1SNandita Dukkipati 	 */
2487bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2488bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24896ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2490bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2491bb4d991aSYuchung Cheng 		else
2492bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2493bb4d991aSYuchung Cheng 	} else {
2494bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2495bb4d991aSYuchung Cheng 	}
24966ba8a3b1SNandita Dukkipati 
2497a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2498ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2499ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2500ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2501a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2502a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
25036ba8a3b1SNandita Dukkipati 
25043f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
25053f80e08fSEric Dumazet 			     TCP_RTO_MAX, NULL);
25066ba8a3b1SNandita Dukkipati 	return true;
25076ba8a3b1SNandita Dukkipati }
25086ba8a3b1SNandita Dukkipati 
25091f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
25101f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
25111f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
25121f3279aeSEric Dumazet  */
25131f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
25141f3279aeSEric Dumazet 				    const struct sk_buff *skb)
25151f3279aeSEric Dumazet {
251639bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2517c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
25181f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
25191f3279aeSEric Dumazet 		return true;
25201f3279aeSEric Dumazet 	}
25211f3279aeSEric Dumazet 	return false;
25221f3279aeSEric Dumazet }
25231f3279aeSEric Dumazet 
2524b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
25256ba8a3b1SNandita Dukkipati  * retransmit the last segment.
25266ba8a3b1SNandita Dukkipati  */
25276ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
25286ba8a3b1SNandita Dukkipati {
25299b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
25306ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
25316ba8a3b1SNandita Dukkipati 	int pcount;
25326ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
25336ba8a3b1SNandita Dukkipati 
2534b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
253575c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2536b340b264SYuchung Cheng 		pcount = tp->packets_out;
2537b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2538b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2539b340b264SYuchung Cheng 			goto probe_sent;
25406ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25416ba8a3b1SNandita Dukkipati 	}
254275c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2543b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2544b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2545b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2546b2b7af86SYuchung Cheng 			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2547b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2548b2b7af86SYuchung Cheng 		return;
2549b2b7af86SYuchung Cheng 	}
25506ba8a3b1SNandita Dukkipati 
25519b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
25529b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
25539b717a8dSNandita Dukkipati 		goto rearm_timer;
25549b717a8dSNandita Dukkipati 
25551f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25561f3279aeSEric Dumazet 		goto rearm_timer;
25571f3279aeSEric Dumazet 
25586ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25596ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25606ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25616ba8a3b1SNandita Dukkipati 
25626ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
256375c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
256475c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25656cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25666ba8a3b1SNandita Dukkipati 			goto rearm_timer;
256775c119afSEric Dumazet 		skb = skb_rb_next(skb);
25686ba8a3b1SNandita Dukkipati 	}
25696ba8a3b1SNandita Dukkipati 
25706ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25716ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25726ba8a3b1SNandita Dukkipati 
257310d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2574b340b264SYuchung Cheng 		goto rearm_timer;
25756ba8a3b1SNandita Dukkipati 
25769b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
25779b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
25789b717a8dSNandita Dukkipati 
2579b340b264SYuchung Cheng probe_sent:
2580c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2581fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2582fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2583b340b264SYuchung Cheng rearm_timer:
2584fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
25851da177e4SLinus Torvalds }
25861da177e4SLinus Torvalds 
2587a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2588a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2589a762a980SDavid S. Miller  * The socket must be locked by the caller.
2590a762a980SDavid S. Miller  */
25919e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25929e412ba7SIlpo Järvinen 			       int nonagle)
2593a762a980SDavid S. Miller {
2594726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2595726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2596726e07a8SIlpo Järvinen 	 * all will be happy.
2597726e07a8SIlpo Järvinen 	 */
2598726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2599726e07a8SIlpo Järvinen 		return;
2600726e07a8SIlpo Järvinen 
260199a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
26027450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
26039e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2604a762a980SDavid S. Miller }
2605a762a980SDavid S. Miller 
2606c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2607c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2608c1b4a7e6SDavid S. Miller  */
2609c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2610c1b4a7e6SDavid S. Miller {
2611fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2612c1b4a7e6SDavid S. Miller 
2613c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2614c1b4a7e6SDavid S. Miller 
2615d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2616c1b4a7e6SDavid S. Miller }
2617c1b4a7e6SDavid S. Miller 
26181da177e4SLinus Torvalds /* This function returns the amount that we can raise the
26191da177e4SLinus Torvalds  * usable window based on the following constraints
26201da177e4SLinus Torvalds  *
26211da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
26221da177e4SLinus Torvalds  * 2. We limit memory per socket
26231da177e4SLinus Torvalds  *
26241da177e4SLinus Torvalds  * RFC 1122:
26251da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
26261da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
26271da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
26281da177e4SLinus Torvalds  *
26291da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
26301da177e4SLinus Torvalds  * it at least MSS bytes.
26311da177e4SLinus Torvalds  *
26321da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
26331da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
26341da177e4SLinus Torvalds  *
26351da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
26361da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
26371da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
26381da177e4SLinus Torvalds  * window to always advance by a single byte.
26391da177e4SLinus Torvalds  *
26401da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
26411da177e4SLinus Torvalds  * then this will not be a problem.
26421da177e4SLinus Torvalds  *
26431da177e4SLinus Torvalds  * BSD seems to make the following compromise:
26441da177e4SLinus Torvalds  *
26451da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
26461da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
26471da177e4SLinus Torvalds  *	then set the window to 0.
26481da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
26491da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
26501da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26511da177e4SLinus Torvalds  *
26521da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26531da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26541da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26551da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26561da177e4SLinus Torvalds  * because the pipeline is full.
26571da177e4SLinus Torvalds  *
26581da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26591da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26601da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26611da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26621da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26631da177e4SLinus Torvalds  *
26641da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26651da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26661da177e4SLinus Torvalds  *
26671da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26681da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26691da177e4SLinus Torvalds  */
26701da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26711da177e4SLinus Torvalds {
2672463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
26731da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2674caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
26751da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
26761da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
26771da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
26781da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
26791da177e4SLinus Torvalds 	 */
2680463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
26811da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
268286c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
268386c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
26841da177e4SLinus Torvalds 	int window;
26851da177e4SLinus Torvalds 
268606425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
26871da177e4SLinus Torvalds 		mss = full_space;
268806425c30SEric Dumazet 		if (mss <= 0)
268906425c30SEric Dumazet 			return 0;
269006425c30SEric Dumazet 	}
2691b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2692463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26931da177e4SLinus Torvalds 
2694b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2695056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2696056834d9SIlpo Järvinen 					       4U * tp->advmss);
26971da177e4SLinus Torvalds 
269886c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
269986c1a045SFlorian Westphal 		 * increase it due to wscale.
270086c1a045SFlorian Westphal 		 */
270186c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
270286c1a045SFlorian Westphal 
270386c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
270486c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
270586c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
270686c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
270786c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
270886c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
270986c1a045SFlorian Westphal 		 */
271086c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
27111da177e4SLinus Torvalds 			return 0;
27121da177e4SLinus Torvalds 	}
27131da177e4SLinus Torvalds 
27141da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
27151da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
27161da177e4SLinus Torvalds 
27171da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
27181da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
27191da177e4SLinus Torvalds 	 */
27201da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
27211da177e4SLinus Torvalds 		window = free_space;
27221da177e4SLinus Torvalds 
27231da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
27241da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
27251da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
27261da177e4SLinus Torvalds 		 */
27271935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
27281da177e4SLinus Torvalds 	} else {
27291935299dSGao Feng 		window = tp->rcv_wnd;
27301da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
27311da177e4SLinus Torvalds 		 * Window clamp already applied above.
27321da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
27331da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
27341da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
27351da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
27361da177e4SLinus Torvalds 		 * is too small.
27371da177e4SLinus Torvalds 		 */
27381da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
27391935299dSGao Feng 			window = rounddown(free_space, mss);
274084565070SJohn Heffner 		else if (mss == full_space &&
2741b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
274284565070SJohn Heffner 			window = free_space;
27431da177e4SLinus Torvalds 	}
27441da177e4SLinus Torvalds 
27451da177e4SLinus Torvalds 	return window;
27461da177e4SLinus Torvalds }
27471da177e4SLinus Torvalds 
2748cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2749082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2750082ac2d5SMartin KaFai Lau {
27510a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27520a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27530a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2754082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2755082ac2d5SMartin KaFai Lau 
27560a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2757082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27582de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27592de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2760082ac2d5SMartin KaFai Lau 	}
2761082ac2d5SMartin KaFai Lau }
2762082ac2d5SMartin KaFai Lau 
27634a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2764f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
276775c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
276813dde04fSWei Yongjun 	int next_skb_size;
27691da177e4SLinus Torvalds 
2770058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27711da177e4SLinus Torvalds 
2772058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
27731da177e4SLinus Torvalds 
2774f8071cdeSEric Dumazet 	if (next_skb_size) {
2775f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2776f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2777f8071cdeSEric Dumazet 				      next_skb_size);
27783b4929f6SEric Dumazet 		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
2779f8071cdeSEric Dumazet 			return false;
2780f8071cdeSEric Dumazet 	}
27812b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2782a6963a6bSIlpo Järvinen 
27831da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27841da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27851da177e4SLinus Torvalds 
2786e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27874de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27881da177e4SLinus Torvalds 
27891da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27901da177e4SLinus Torvalds 	 * packet counting does not break.
27911da177e4SLinus Torvalds 	 */
27924828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2793a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2794b7689205SIlpo Järvinen 
2795b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2796ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2797ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2798ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2799b7689205SIlpo Järvinen 
2800797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2801797108d1SIlpo Järvinen 
2802082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2803082ac2d5SMartin KaFai Lau 
280475c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2805f8071cdeSEric Dumazet 	return true;
28061da177e4SLinus Torvalds }
28071da177e4SLinus Torvalds 
280867edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2809a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
28104a17fc3aSIlpo Järvinen {
28114a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2812a2a385d6SEric Dumazet 		return false;
28134a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2814a2a385d6SEric Dumazet 		return false;
28152331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
28164a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2817a2a385d6SEric Dumazet 		return false;
28184a17fc3aSIlpo Järvinen 
2819a2a385d6SEric Dumazet 	return true;
28204a17fc3aSIlpo Järvinen }
28214a17fc3aSIlpo Järvinen 
282267edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
282367edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
282467edfef7SAndi Kleen  */
28254a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
28264a17fc3aSIlpo Järvinen 				     int space)
28274a17fc3aSIlpo Järvinen {
28284a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
28294a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2830a2a385d6SEric Dumazet 	bool first = true;
28314a17fc3aSIlpo Järvinen 
2832e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
28334a17fc3aSIlpo Järvinen 		return;
28344de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
28354a17fc3aSIlpo Järvinen 		return;
28364a17fc3aSIlpo Järvinen 
283775c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
28384a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
28394a17fc3aSIlpo Järvinen 			break;
28404a17fc3aSIlpo Järvinen 
2841a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2842a643b5d4SMartin KaFai Lau 			break;
2843a643b5d4SMartin KaFai Lau 
28444a17fc3aSIlpo Järvinen 		space -= skb->len;
28454a17fc3aSIlpo Järvinen 
28464a17fc3aSIlpo Järvinen 		if (first) {
2847a2a385d6SEric Dumazet 			first = false;
28484a17fc3aSIlpo Järvinen 			continue;
28494a17fc3aSIlpo Järvinen 		}
28504a17fc3aSIlpo Järvinen 
28514a17fc3aSIlpo Järvinen 		if (space < 0)
28524a17fc3aSIlpo Järvinen 			break;
28534a17fc3aSIlpo Järvinen 
28544a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28554a17fc3aSIlpo Järvinen 			break;
28564a17fc3aSIlpo Järvinen 
2857f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2858f8071cdeSEric Dumazet 			break;
28594a17fc3aSIlpo Järvinen 	}
28604a17fc3aSIlpo Järvinen }
28614a17fc3aSIlpo Järvinen 
28621da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28631da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28641da177e4SLinus Torvalds  * error occurred which prevented the send.
28651da177e4SLinus Torvalds  */
286610d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28671da177e4SLinus Torvalds {
28685d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
286910d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28707d227cd2SSridhar Samudrala 	unsigned int cur_mss;
287110d3be56SEric Dumazet 	int diff, len, err;
28721da177e4SLinus Torvalds 
287310d3be56SEric Dumazet 
287410d3be56SEric Dumazet 	/* Inconclusive MTU probe */
287510d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
28765d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
28775d424d5aSJohn Heffner 
28781da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2879caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
28801da177e4SLinus Torvalds 	 */
288114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2882ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2883ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28841da177e4SLinus Torvalds 		return -EAGAIN;
28851da177e4SLinus Torvalds 
28861f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28871f3279aeSEric Dumazet 		return -EBUSY;
28881f3279aeSEric Dumazet 
28891da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28907f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
28917f582b24SEric Dumazet 			WARN_ON_ONCE(1);
28927f582b24SEric Dumazet 			return -EINVAL;
28937f582b24SEric Dumazet 		}
28941da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28951da177e4SLinus Torvalds 			return -ENOMEM;
28961da177e4SLinus Torvalds 	}
28971da177e4SLinus Torvalds 
28987d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28997d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
29007d227cd2SSridhar Samudrala 
29010c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
29027d227cd2SSridhar Samudrala 
29031da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
29041da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
29051da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
29061da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
29071da177e4SLinus Torvalds 	 */
29089d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
29099d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
29101da177e4SLinus Torvalds 		return -EAGAIN;
29111da177e4SLinus Torvalds 
291210d3be56SEric Dumazet 	len = cur_mss * segs;
291310d3be56SEric Dumazet 	if (skb->len > len) {
291475c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
291575c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
29161da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
291702276f3cSIlpo Järvinen 	} else {
2918c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2919c52e2421SEric Dumazet 			return -ENOMEM;
292010d3be56SEric Dumazet 
292110d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
292210d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
292310d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
292410d3be56SEric Dumazet 		if (diff)
292510d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
292610d3be56SEric Dumazet 		if (skb->len < cur_mss)
292710d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
29281da177e4SLinus Torvalds 	}
29291da177e4SLinus Torvalds 
293049213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
293149213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
293249213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
293349213555SDaniel Borkmann 
2934678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2935678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2936678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2937678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2938678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2939678550c6SYuchung Cheng 	tp->total_retrans += segs;
2940fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2941678550c6SYuchung Cheng 
294250bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
294350bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
294450bceae9SThomas Graf 	 * beyond what csum_start can cover.
294550bceae9SThomas Graf 	 */
294650bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
294750bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
294810a81980SEric Dumazet 		struct sk_buff *nskb;
294910a81980SEric Dumazet 
2950e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
295110a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2952c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2953117632e6SEric Dumazet 				     -ENOBUFS;
2954e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2955e2080072SEric Dumazet 
29565889e2c0SYousuk Seung 		if (!err) {
2957a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
29585889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29595889e2c0SYousuk Seung 		}
2960117632e6SEric Dumazet 	} else {
2961c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2962117632e6SEric Dumazet 	}
2963c84a5711SYuchung Cheng 
29647f12422cSYuchung Cheng 	/* To avoid taking spuriously low RTT samples based on a timestamp
29657f12422cSYuchung Cheng 	 * for a transmit that never happened, always mark EVER_RETRANS
29667f12422cSYuchung Cheng 	 */
29677f12422cSYuchung Cheng 	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
29687f12422cSYuchung Cheng 
2969a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2970a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2971a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2972a31ad29eSLawrence Brakmo 
2973fc9f3501SEric Dumazet 	if (likely(!err)) {
2974e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2975678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2976ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
2977fc9f3501SEric Dumazet 	}
2978c84a5711SYuchung Cheng 	return err;
297993b174adSYuchung Cheng }
298093b174adSYuchung Cheng 
298110d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
298293b174adSYuchung Cheng {
298393b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
298410d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
29851da177e4SLinus Torvalds 
29861da177e4SLinus Torvalds 	if (err == 0) {
29871da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
29881da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2989e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
29901da177e4SLinus Torvalds 		}
29911da177e4SLinus Torvalds #endif
29921da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
29931da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
29947ae18975SYuchung Cheng 	}
29951da177e4SLinus Torvalds 
29967ae18975SYuchung Cheng 	/* Save stamp of the first (attempted) retransmit. */
29971da177e4SLinus Torvalds 	if (!tp->retrans_stamp)
29987faee5c0SEric Dumazet 		tp->retrans_stamp = tcp_skb_timestamp(skb);
29991da177e4SLinus Torvalds 
30006e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
30016e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
30026e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
30031da177e4SLinus Torvalds 	return err;
30041da177e4SLinus Torvalds }
30051da177e4SLinus Torvalds 
30061da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
30071da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
30081da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
30091da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
30101da177e4SLinus Torvalds  */
30111da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
30121da177e4SLinus Torvalds {
30136687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
3014b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
30151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3016840a3cbeSYuchung Cheng 	u32 max_segs;
301761eb55f4SIlpo Järvinen 	int mib_idx;
30186a438bbeSStephen Hemminger 
301945e77d31SIlpo Järvinen 	if (!tp->packets_out)
302045e77d31SIlpo Järvinen 		return;
302145e77d31SIlpo Järvinen 
302275c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
3023b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
3024ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
302575c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
3026dca0aaf8SEric Dumazet 		__u8 sacked;
302710d3be56SEric Dumazet 		int segs;
30281da177e4SLinus Torvalds 
3029218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3030218af599SEric Dumazet 			break;
3031218af599SEric Dumazet 
30326a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
303351456b29SIan Morris 		if (!hole)
30346a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
30356a438bbeSStephen Hemminger 
303610d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
303710d3be56SEric Dumazet 		if (segs <= 0)
30381da177e4SLinus Torvalds 			return;
3039dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3040a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3041a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3042a3d2e9f8SEric Dumazet 		 */
3043a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
30440e1c54c2SIlpo Järvinen 
3045840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3046006f582cSIlpo Järvinen 			break;
30470e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
304851456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
30490e1c54c2SIlpo Järvinen 				hole = skb;
305061eb55f4SIlpo Järvinen 			continue;
30511da177e4SLinus Torvalds 
30520e1c54c2SIlpo Järvinen 		} else {
30530e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30540e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30550e1c54c2SIlpo Järvinen 			else
30560e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30570e1c54c2SIlpo Järvinen 		}
30580e1c54c2SIlpo Järvinen 
30590e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
306061eb55f4SIlpo Järvinen 			continue;
306140b215e5SPavel Emelyanov 
3062f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3063f9616c35SEric Dumazet 			return;
3064f9616c35SEric Dumazet 
306510d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30661da177e4SLinus Torvalds 			return;
306724ab6becSYuchung Cheng 
3068de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30691da177e4SLinus Torvalds 
3070684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3071a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3072a262f0cdSNandita Dukkipati 
307375c119afSEric Dumazet 		if (skb == rtx_head &&
307457dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
30753f80e08fSEric Dumazet 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30763f421baaSArnaldo Carvalho de Melo 					     inet_csk(sk)->icsk_rto,
30773f80e08fSEric Dumazet 					     TCP_RTO_MAX,
30783f80e08fSEric Dumazet 					     skb);
30791da177e4SLinus Torvalds 	}
30801da177e4SLinus Torvalds }
30811da177e4SLinus Torvalds 
3082d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3083d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3084845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3085845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3086a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3087a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3088d83769a5SEric Dumazet  */
3089a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3090d83769a5SEric Dumazet {
3091e805605cSJohannes Weiner 	int amt;
3092d83769a5SEric Dumazet 
3093d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3094d83769a5SEric Dumazet 		return;
3095d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3096d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3097e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3098e805605cSJohannes Weiner 
3099baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3100baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3101d83769a5SEric Dumazet }
3102d83769a5SEric Dumazet 
3103845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3104845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
31051da177e4SLinus Torvalds  */
31061da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
31071da177e4SLinus Torvalds {
3108845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
31091da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
31101da177e4SLinus Torvalds 
3111845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3112845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3113845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3114845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
31151da177e4SLinus Torvalds 	 */
311675c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
311775c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
311875c119afSEric Dumazet 
311975c119afSEric Dumazet 	if (tskb) {
3120845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3121845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
31221da177e4SLinus Torvalds 		tp->write_seq++;
312375c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3124845704a5SEric Dumazet 			/* This means tskb was already sent.
3125845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3126845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3127845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3128845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3129845704a5SEric Dumazet 			 */
3130845704a5SEric Dumazet 			tp->snd_nxt++;
3131845704a5SEric Dumazet 			return;
3132845704a5SEric Dumazet 		}
31331da177e4SLinus Torvalds 	} else {
3134845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3135d1edc085SColin Ian King 		if (unlikely(!skb))
3136845704a5SEric Dumazet 			return;
3137d1edc085SColin Ian King 
3138e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3139d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3140a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
31411da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3142e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3143a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
31441da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
31451da177e4SLinus Torvalds 	}
3146845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
31471da177e4SLinus Torvalds }
31481da177e4SLinus Torvalds 
31491da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
31501da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
31511da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
315265bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31531da177e4SLinus Torvalds  */
3154dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31551da177e4SLinus Torvalds {
31561da177e4SLinus Torvalds 	struct sk_buff *skb;
31571da177e4SLinus Torvalds 
31587cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31597cc2b043SGao Feng 
31601da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31611da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31621da177e4SLinus Torvalds 	if (!skb) {
31634e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31641da177e4SLinus Torvalds 		return;
31651da177e4SLinus Torvalds 	}
31661da177e4SLinus Torvalds 
31671da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31681da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3169e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3170a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31719a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
31721da177e4SLinus Torvalds 	/* Send it off. */
3173dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
31744e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3175c24b14c4SSong Liu 
3176c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3177c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3178c24b14c4SSong Liu 	 */
3179c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
31801da177e4SLinus Torvalds }
31811da177e4SLinus Torvalds 
318267edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
318367edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
31841da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
31851da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
31861da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
31871da177e4SLinus Torvalds  */
31881da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
31891da177e4SLinus Torvalds {
31901da177e4SLinus Torvalds 	struct sk_buff *skb;
31911da177e4SLinus Torvalds 
319275c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
319351456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
319475c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31951da177e4SLinus Torvalds 		return -EFAULT;
31961da177e4SLinus Torvalds 	}
31974de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31981da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3199e2080072SEric Dumazet 			struct sk_buff *nskb;
3200e2080072SEric Dumazet 
3201e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3202e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3203e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
320451456b29SIan Morris 			if (!nskb)
32051da177e4SLinus Torvalds 				return -ENOMEM;
3206e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
320775c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3208f4a775d1SEric Dumazet 			__skb_header_release(nskb);
320975c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
32103ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
32113ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
32121da177e4SLinus Torvalds 			skb = nskb;
32131da177e4SLinus Torvalds 		}
32141da177e4SLinus Torvalds 
32154de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3216735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
32171da177e4SLinus Torvalds 	}
3218dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
32191da177e4SLinus Torvalds }
32201da177e4SLinus Torvalds 
32214aea39c1SEric Dumazet /**
32224aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
32234aea39c1SEric Dumazet  * sk: listener socket
32244aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
32254aea39c1SEric Dumazet  * req: request_sock pointer
32264aea39c1SEric Dumazet  *
32274aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
32284aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
32294aea39c1SEric Dumazet  */
32305d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3231e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3232ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3233b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
32341da177e4SLinus Torvalds {
32352e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
32365d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
323780f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
32385d062de7SEric Dumazet 	struct tcp_out_options opts;
32395d062de7SEric Dumazet 	struct sk_buff *skb;
3240bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
32415d062de7SEric Dumazet 	struct tcphdr *th;
3242f5fff5dcSTom Quetchenbach 	int mss;
3243a842fe14SEric Dumazet 	u64 now;
32441da177e4SLinus Torvalds 
3245ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
32464aea39c1SEric Dumazet 	if (unlikely(!skb)) {
32474aea39c1SEric Dumazet 		dst_release(dst);
32481da177e4SLinus Torvalds 		return NULL;
32494aea39c1SEric Dumazet 	}
32501da177e4SLinus Torvalds 	/* Reserve space for headers. */
32511da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32521da177e4SLinus Torvalds 
3253b3d05147SEric Dumazet 	switch (synack_type) {
3254b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32559e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3256b3d05147SEric Dumazet 		break;
3257b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3258b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3259b3d05147SEric Dumazet 		 * to avoid false sharing.
3260b3d05147SEric Dumazet 		 */
3261b3d05147SEric Dumazet 		break;
3262b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3263ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3264ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3265ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3266ca6fb065SEric Dumazet 		 */
3267ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3268b3d05147SEric Dumazet 		break;
3269ca6fb065SEric Dumazet 	}
32704aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
32711da177e4SLinus Torvalds 
32723541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3273f5fff5dcSTom Quetchenbach 
327433ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
3275a842fe14SEric Dumazet 	now = tcp_clock_ns();
32768b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
32778b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
3278d3edd06eSEric Dumazet 		skb->skb_mstamp_ns = cookie_init_timestamp(req);
32798b5f12d0SFlorian Westphal 	else
32808b5f12d0SFlorian Westphal #endif
32819e450c1eSYuchung Cheng 	{
3282a842fe14SEric Dumazet 		skb->skb_mstamp_ns = now;
32839e450c1eSYuchung Cheng 		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
32849e450c1eSYuchung Cheng 			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
32859e450c1eSYuchung Cheng 	}
328680f03e27SEric Dumazet 
328780f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
328880f03e27SEric Dumazet 	rcu_read_lock();
3289fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
329080f03e27SEric Dumazet #endif
329158d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
329260e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
329360e2a778SUrsula Braun 					     foc) + sizeof(*th);
329433ad798cSAdam Langley 
3295aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3296aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
32971da177e4SLinus Torvalds 
3298ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32991da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
33001da177e4SLinus Torvalds 	th->syn = 1;
33011da177e4SLinus Torvalds 	th->ack = 1;
33026ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3303b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3304634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3305e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
33063b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
33073b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
33088336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
33098336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
33101da177e4SLinus Torvalds 
33111da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3312ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
33135d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
33141da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
331590bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3316cfb6eeb4SYOSHIFUJI Hideaki 
3317cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3318cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
331980f03e27SEric Dumazet 	if (md5)
3320bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
332139f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
332280f03e27SEric Dumazet 	rcu_read_unlock();
3323cfb6eeb4SYOSHIFUJI Hideaki #endif
3324cfb6eeb4SYOSHIFUJI Hideaki 
3325a842fe14SEric Dumazet 	skb->skb_mstamp_ns = now;
3326a842fe14SEric Dumazet 	tcp_add_tx_delay(skb, tp);
3327a842fe14SEric Dumazet 
33281da177e4SLinus Torvalds 	return skb;
33291da177e4SLinus Torvalds }
33304bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
33311da177e4SLinus Torvalds 
333281164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
333381164413SDaniel Borkmann {
333481164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
333581164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
333681164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
333781164413SDaniel Borkmann 
333881164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
333981164413SDaniel Borkmann 		return;
334081164413SDaniel Borkmann 
334181164413SDaniel Borkmann 	rcu_read_lock();
334281164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
334381164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
334481164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
334581164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
334681164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
334781164413SDaniel Borkmann 	}
334881164413SDaniel Borkmann 	rcu_read_unlock();
334981164413SDaniel Borkmann }
335081164413SDaniel Borkmann 
335167edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3352f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
33531da177e4SLinus Torvalds {
3354cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
33551da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33561da177e4SLinus Torvalds 	__u8 rcv_wscale;
335713d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
33581da177e4SLinus Torvalds 
33591da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33601da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33611da177e4SLinus Torvalds 	 */
33625d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33635d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33645d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33651da177e4SLinus Torvalds 
3366cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
336700db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3368cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3369cfb6eeb4SYOSHIFUJI Hideaki #endif
3370cfb6eeb4SYOSHIFUJI Hideaki 
33711da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
33721da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
33731da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
33741da177e4SLinus Torvalds 	tp->max_window = 0;
33755d424d5aSJohn Heffner 	tcp_mtup_init(sk);
33761da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
33771da177e4SLinus Torvalds 
337881164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
337981164413SDaniel Borkmann 
33801da177e4SLinus Torvalds 	if (!tp->window_clamp)
33811da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
33823541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3383f5fff5dcSTom Quetchenbach 
33841da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
33851da177e4SLinus Torvalds 
3386e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3387e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3388e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3389e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3390e88c64f0SHagen Paul Pfeifer 
339113d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
339213d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
339313d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
339413d3b1ebSLawrence Brakmo 
3395ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
33961da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33971da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33981da177e4SLinus Torvalds 				  &tp->window_clamp,
33999bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
340031d12926Slaurent chavey 				  &rcv_wscale,
340113d3b1ebSLawrence Brakmo 				  rcv_wnd);
34021da177e4SLinus Torvalds 
34031da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
34041da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
34051da177e4SLinus Torvalds 
34061da177e4SLinus Torvalds 	sk->sk_err = 0;
34071da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
34081da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3409ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
34107f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
34111da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
34121da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
341333f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3414370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3415ee995283SPavel Emelyanov 
3416ee995283SPavel Emelyanov 	if (likely(!tp->repair))
34171da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3418c7781a6eSAndrew Vagin 	else
341970eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3420ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3421ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
34221da177e4SLinus Torvalds 
34238550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3424463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
34251da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
34261da177e4SLinus Torvalds }
34271da177e4SLinus Torvalds 
3428783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3429783237e8SYuchung Cheng {
3430783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3431783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3432783237e8SYuchung Cheng 
3433783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3434f4a775d1SEric Dumazet 	__skb_header_release(skb);
3435783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3436783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3437783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3438783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3439783237e8SYuchung Cheng }
3440783237e8SYuchung Cheng 
3441783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3442783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3443783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3444783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3445783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3446783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3447783237e8SYuchung Cheng  */
3448783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3449783237e8SYuchung Cheng {
3450783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3451783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3452065263f4SWei Wang 	int space, err = 0;
3453355a901eSEric Dumazet 	struct sk_buff *syn_data;
3454783237e8SYuchung Cheng 
345567da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3456065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3457783237e8SYuchung Cheng 		goto fallback;
3458783237e8SYuchung Cheng 
3459783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3460783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3461783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3462783237e8SYuchung Cheng 	 */
34633541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34643541f9e8SEric Dumazet 
34651b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3466783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3467783237e8SYuchung Cheng 
3468f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3469f5ddcbbbSEric Dumazet 
3470f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3471f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3472f5ddcbbbSEric Dumazet 
3473eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3474355a901eSEric Dumazet 	if (!syn_data)
3475783237e8SYuchung Cheng 		goto fallback;
3476355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3477355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
347807e100f9SEric Dumazet 	if (space) {
347907e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
348057be5bdaSAl Viro 					    &fo->data->msg_iter);
348157be5bdaSAl Viro 		if (unlikely(!copied)) {
3482ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3483355a901eSEric Dumazet 			kfree_skb(syn_data);
3484783237e8SYuchung Cheng 			goto fallback;
3485783237e8SYuchung Cheng 		}
348657be5bdaSAl Viro 		if (copied != space) {
348757be5bdaSAl Viro 			skb_trim(syn_data, copied);
348857be5bdaSAl Viro 			space = copied;
348957be5bdaSAl Viro 		}
3490f859a448SWillem de Bruijn 		skb_zcopy_set(syn_data, fo->uarg, NULL);
349107e100f9SEric Dumazet 	}
3492355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3493355a901eSEric Dumazet 	if (space == fo->size)
3494355a901eSEric Dumazet 		fo->data = NULL;
3495355a901eSEric Dumazet 	fo->copied = space;
3496783237e8SYuchung Cheng 
3497355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34980f87230dSFrancis Yan 	if (syn_data->len)
34990f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3500355a901eSEric Dumazet 
3501355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3502355a901eSEric Dumazet 
3503d3edd06eSEric Dumazet 	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3504355a901eSEric Dumazet 
3505355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3506355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3507355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3508355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3509431a9124SEric Dumazet 	 */
3510355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3511355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3512355a901eSEric Dumazet 	if (!err) {
351367da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
351475c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3515f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3516783237e8SYuchung Cheng 		goto done;
3517783237e8SYuchung Cheng 	}
3518783237e8SYuchung Cheng 
351975c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
352075c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3521b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3522b5b7db8dSEric Dumazet 
3523783237e8SYuchung Cheng fallback:
3524783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3525783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3526783237e8SYuchung Cheng 		fo->cookie.len = 0;
3527783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3528783237e8SYuchung Cheng 	if (err)
3529783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3530783237e8SYuchung Cheng done:
3531783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3532783237e8SYuchung Cheng 	return err;
3533783237e8SYuchung Cheng }
3534783237e8SYuchung Cheng 
353567edfef7SAndi Kleen /* Build a SYN and send it off. */
35361da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
35371da177e4SLinus Torvalds {
35381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35391da177e4SLinus Torvalds 	struct sk_buff *buff;
3540ee586811SEric Paris 	int err;
35411da177e4SLinus Torvalds 
3542de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
35438ba60924SEric Dumazet 
35448ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
35458ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
35468ba60924SEric Dumazet 
35471da177e4SLinus Torvalds 	tcp_connect_init(sk);
35481da177e4SLinus Torvalds 
35492b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
35502b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
35512b916477SAndrey Vagin 		return 0;
35522b916477SAndrey Vagin 	}
35532b916477SAndrey Vagin 
3554eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3555355a901eSEric Dumazet 	if (unlikely(!buff))
35561da177e4SLinus Torvalds 		return -ENOBUFS;
35571da177e4SLinus Torvalds 
3558a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
35599a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35609a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3561783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3562735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
356375c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35641da177e4SLinus Torvalds 
3565783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3566783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3567783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3568ee586811SEric Paris 	if (err == -ECONNREFUSED)
3569ee586811SEric Paris 		return err;
3570bd37a088SWei Yongjun 
3571bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3572bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3573bd37a088SWei Yongjun 	 */
3574bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3575bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3576b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3577b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3578b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3579b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3580b5b7db8dSEric Dumazet 	}
358181cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
35821da177e4SLinus Torvalds 
35831da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
35843f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35853f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
35861da177e4SLinus Torvalds 	return 0;
35871da177e4SLinus Torvalds }
35884bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
35891da177e4SLinus Torvalds 
35901da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
35911da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
35921da177e4SLinus Torvalds  * for details.
35931da177e4SLinus Torvalds  */
35941da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
35951da177e4SLinus Torvalds {
3596463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3597463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35981da177e4SLinus Torvalds 	unsigned long timeout;
35991da177e4SLinus Torvalds 
36001da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3601463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
36021da177e4SLinus Torvalds 		int max_ato = HZ / 2;
36031da177e4SLinus Torvalds 
360431954cd8SWei Wang 		if (inet_csk_in_pingpong_mode(sk) ||
3605056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
36061da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
36071da177e4SLinus Torvalds 
36081da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
36091da177e4SLinus Torvalds 
36101da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3611463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
36121da177e4SLinus Torvalds 		 * directly.
36131da177e4SLinus Torvalds 		 */
3614740b0f18SEric Dumazet 		if (tp->srtt_us) {
3615740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3616740b0f18SEric Dumazet 					TCP_DELACK_MIN);
36171da177e4SLinus Torvalds 
36181da177e4SLinus Torvalds 			if (rtt < max_ato)
36191da177e4SLinus Torvalds 				max_ato = rtt;
36201da177e4SLinus Torvalds 		}
36211da177e4SLinus Torvalds 
36221da177e4SLinus Torvalds 		ato = min(ato, max_ato);
36231da177e4SLinus Torvalds 	}
36241da177e4SLinus Torvalds 
36251da177e4SLinus Torvalds 	/* Stay within the limit we were given */
36261da177e4SLinus Torvalds 	timeout = jiffies + ato;
36271da177e4SLinus Torvalds 
36281da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3629463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
36301da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
36311da177e4SLinus Torvalds 		 * send ACK now.
36321da177e4SLinus Torvalds 		 */
3633463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3634463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
36351da177e4SLinus Torvalds 			tcp_send_ack(sk);
36361da177e4SLinus Torvalds 			return;
36371da177e4SLinus Torvalds 		}
36381da177e4SLinus Torvalds 
3639463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3640463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
36411da177e4SLinus Torvalds 	}
3642463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3643463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3644463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
36451da177e4SLinus Torvalds }
36461da177e4SLinus Torvalds 
36471da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
36482987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
36491da177e4SLinus Torvalds {
36501da177e4SLinus Torvalds 	struct sk_buff *buff;
36511da177e4SLinus Torvalds 
3652058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3653058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3654058dc334SIlpo Järvinen 		return;
3655058dc334SIlpo Järvinen 
36561da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
36571da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
36581da177e4SLinus Torvalds 	 * sock.
36591da177e4SLinus Torvalds 	 */
36607450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36617450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36627450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3663463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3664463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36653f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36663f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36671da177e4SLinus Torvalds 		return;
36681da177e4SLinus Torvalds 	}
36691da177e4SLinus Torvalds 
36701da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36711da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3672a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
36731da177e4SLinus Torvalds 
367498781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
367598781965SEric Dumazet 	 * too much.
367698781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
367798781965SEric Dumazet 	 */
367898781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
367998781965SEric Dumazet 
36801da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
36812987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
36821da177e4SLinus Torvalds }
368327cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
36842987babbSYuchung Cheng 
36852987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
36862987babbSYuchung Cheng {
36872987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
36881da177e4SLinus Torvalds }
36891da177e4SLinus Torvalds 
36901da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
36911da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
36921da177e4SLinus Torvalds  *
36931da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
36941da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
36951da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
36961da177e4SLinus Torvalds  *
36971da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
36981da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36991da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
37001da177e4SLinus Torvalds  */
3701e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
37021da177e4SLinus Torvalds {
37031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37041da177e4SLinus Torvalds 	struct sk_buff *skb;
37051da177e4SLinus Torvalds 
37061da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
37077450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
37087450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
370951456b29SIan Morris 	if (!skb)
37101da177e4SLinus Torvalds 		return -1;
37111da177e4SLinus Torvalds 
37121da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
37131da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
37141da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
37151da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
37161da177e4SLinus Torvalds 	 * send it.
37171da177e4SLinus Torvalds 	 */
3718a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3719e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
37207450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
37211da177e4SLinus Torvalds }
37221da177e4SLinus Torvalds 
3723385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3724ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3725ee995283SPavel Emelyanov {
3726ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3727ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
37289a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3729e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3730ee995283SPavel Emelyanov 	}
3731ee995283SPavel Emelyanov }
3732ee995283SPavel Emelyanov 
373367edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3734e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
37351da177e4SLinus Torvalds {
37361da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37371da177e4SLinus Torvalds 	struct sk_buff *skb;
37381da177e4SLinus Torvalds 
3739058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3740058dc334SIlpo Järvinen 		return -1;
3741058dc334SIlpo Järvinen 
374200db4124SIan Morris 	skb = tcp_send_head(sk);
374300db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
37441da177e4SLinus Torvalds 		int err;
37450c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
374690840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
37471da177e4SLinus Torvalds 
37481da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
37491da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
37501da177e4SLinus Torvalds 
37511da177e4SLinus Torvalds 		/* We are probing the opening of a window
37521da177e4SLinus Torvalds 		 * but the window size is != 0
37531da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
37541da177e4SLinus Torvalds 		 */
37551da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
37561da177e4SLinus Torvalds 		    skb->len > mss) {
37571da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
37584de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
375975c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
376075c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37611da177e4SLinus Torvalds 				return -1;
37621da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37635bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37641da177e4SLinus Torvalds 
37654de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3766dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
376766f5fe62SIlpo Järvinen 		if (!err)
376866f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37691da177e4SLinus Torvalds 		return err;
37701da177e4SLinus Torvalds 	} else {
377133f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3772e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3773e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
37741da177e4SLinus Torvalds 	}
37751da177e4SLinus Torvalds }
37761da177e4SLinus Torvalds 
37771da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
37781da177e4SLinus Torvalds  * a partial packet else a zero probe.
37791da177e4SLinus Torvalds  */
37801da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
37811da177e4SLinus Torvalds {
3782463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
37831da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3784c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3785c1d5674fSYuchung Cheng 	unsigned long timeout;
37861da177e4SLinus Torvalds 	int err;
37871da177e4SLinus Torvalds 
3788e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
37891da177e4SLinus Torvalds 
379075c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
37911da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
37926687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3793463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
37941da177e4SLinus Torvalds 		return;
37951da177e4SLinus Torvalds 	}
37961da177e4SLinus Torvalds 
3797c1d5674fSYuchung Cheng 	icsk->icsk_probes_out++;
37981da177e4SLinus Torvalds 	if (err <= 0) {
3799c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3800463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
3801c1d5674fSYuchung Cheng 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
38021da177e4SLinus Torvalds 	} else {
38031da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
3804c1d5674fSYuchung Cheng 		 * Let senders fight for local resources conservatively.
38051da177e4SLinus Torvalds 		 */
3806c1d5674fSYuchung Cheng 		timeout = TCP_RESOURCE_PROBE_INTERVAL;
38071da177e4SLinus Torvalds 	}
3808c1d5674fSYuchung Cheng 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
38091da177e4SLinus Torvalds }
38105db92c99SOctavian Purdila 
3811ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
38125db92c99SOctavian Purdila {
38135db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
38145db92c99SOctavian Purdila 	struct flowi fl;
38155db92c99SOctavian Purdila 	int res;
38165db92c99SOctavian Purdila 
381758d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3818b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
38195db92c99SOctavian Purdila 	if (!res) {
382090bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
382102a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
38227e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
38237e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3824cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
38255db92c99SOctavian Purdila 	}
38265db92c99SOctavian Purdila 	return res;
38275db92c99SOctavian Purdila }
38285db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3829