xref: /linux/net/ipv4/tcp_output.c (revision d8ed257f313f64e9835e61d1365dea95a0a1c9c6)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
4460e2a778SUrsula Braun #include <linux/static_key.h>
451da177e4SLinus Torvalds 
46e086101bSCong Wang #include <trace/events/tcp.h>
4735089bb2SDavid S. Miller 
489799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
499799ccb0SEric Dumazet  * ensuring monotically increasing values.
509799ccb0SEric Dumazet  */
519799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
529799ccb0SEric Dumazet {
539799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
549799ccb0SEric Dumazet 
555f6188a8SEric Dumazet 	if (val > tp->tcp_clock_cache)
565f6188a8SEric Dumazet 		tp->tcp_clock_cache = val;
579799ccb0SEric Dumazet 
589799ccb0SEric Dumazet 	val = div_u64(val, NSEC_PER_USEC);
599799ccb0SEric Dumazet 	if (val > tp->tcp_mstamp)
609799ccb0SEric Dumazet 		tp->tcp_mstamp = val;
619799ccb0SEric Dumazet }
629799ccb0SEric Dumazet 
6346d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6446d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
65519855c5SWilliam Allen Simpson 
6667edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6775c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
686ff03ac3SIlpo Järvinen {
696ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
706ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7166f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
729e412ba7SIlpo Järvinen 
731da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
748512430eSIlpo Järvinen 
7575c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7675c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7775c119afSEric Dumazet 
7866f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
79bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
80750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
81f19c29e3SYuchung Cheng 
82f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
83f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
846a5dc9e5SEric Dumazet }
851da177e4SLinus Torvalds 
86a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
87a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
881da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
891da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
901da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
911da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
921da177e4SLinus Torvalds  */
93cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
941da177e4SLinus Torvalds {
95cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
969e412ba7SIlpo Järvinen 
97a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
98a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
99a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1001da177e4SLinus Torvalds 		return tp->snd_nxt;
1011da177e4SLinus Torvalds 	else
10290840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1061da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1071da177e4SLinus Torvalds  *
1081da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1091da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1101da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1111da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1121da177e4SLinus Torvalds  *    large MSS.
1131da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1141da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1151da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1161da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1171da177e4SLinus Torvalds  *    probably even Jumbo".
1181da177e4SLinus Torvalds  */
1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
122cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1231da177e4SLinus Torvalds 	int mss = tp->advmss;
1241da177e4SLinus Torvalds 
1250dbaee3bSDavid S. Miller 	if (dst) {
1260dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1270dbaee3bSDavid S. Miller 
1280dbaee3bSDavid S. Miller 		if (metric < mss) {
1290dbaee3bSDavid S. Miller 			mss = metric;
1301da177e4SLinus Torvalds 			tp->advmss = mss;
1311da177e4SLinus Torvalds 		}
1320dbaee3bSDavid S. Miller 	}
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds 	return (__u16)mss;
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1386f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1396f021c62SEric Dumazet  */
1406f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1411da177e4SLinus Torvalds {
142463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1436f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1441da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1451da177e4SLinus Torvalds 
1466687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1491da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1501da177e4SLinus Torvalds 
151463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1521da177e4SLinus Torvalds 		cwnd >>= 1;
1531da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
154c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1551da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
15867edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15940efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
160cf533ea5SEric Dumazet 				struct sock *sk)
1611da177e4SLinus Torvalds {
162463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
163d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1641da177e4SLinus Torvalds 
16505c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16605c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16705c5a46dSNeal Cardwell 
1681da177e4SLinus Torvalds 	tp->lsndtime = now;
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1711da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1721da177e4SLinus Torvalds 	 */
1732251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
174463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
17767edfef7SAndi Kleen /* Account for an ACK we sent. */
17827cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
17927cde44aSYuchung Cheng 				      u32 rcv_nxt)
1801da177e4SLinus Torvalds {
1815d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1825d9f4262SEric Dumazet 
18386de5921SEric Dumazet 	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
18586de5921SEric Dumazet 			      tp->compressed_ack - TCP_FASTRETRANS_THRESH);
18686de5921SEric Dumazet 		tp->compressed_ack = TCP_FASTRETRANS_THRESH;
1875d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1885d9f4262SEric Dumazet 			__sock_put(sk);
1895d9f4262SEric Dumazet 	}
19027cde44aSYuchung Cheng 
19127cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19227cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
193463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
194463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1981da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1991da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2001da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2011da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2021da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2031da177e4SLinus Torvalds  */
204ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2051da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2121da177e4SLinus Torvalds 	if (*window_clamp == 0)
213589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2141da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2171da177e4SLinus Torvalds 	if (space > mss)
218589c49cbSGao Feng 		space = rounddown(space, mss);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2271da177e4SLinus Torvalds 	 */
228ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2291da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23015d99e02SRick Jones 	else
231a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
232a337531bSYuchung Cheng 
233a337531bSYuchung Cheng 	if (init_rcv_wnd)
234a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23515d99e02SRick Jones 
23619bf6261SEric Dumazet 	*rcv_wscale = 0;
2371da177e4SLinus Torvalds 	if (wscale_ok) {
238589c49cbSGao Feng 		/* Set window scaling on max possible window */
239356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
240f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
241316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
24219bf6261SEric Dumazet 		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
24319bf6261SEric Dumazet 				      0, TCP_MAX_WSCALE);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
246589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2471da177e4SLinus Torvalds }
2484bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2511da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2521da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2531da177e4SLinus Torvalds  * frame.
2541da177e4SLinus Torvalds  */
25540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2588e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2591da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2601da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* Never shrink the offered window */
2631da177e4SLinus Torvalds 	if (new_win < cur_win) {
2641da177e4SLinus Torvalds 		/* Danger Will Robinson!
2651da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2661da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2671da177e4SLinus Torvalds 		 * window in time.  --DaveM
2681da177e4SLinus Torvalds 		 *
2691da177e4SLinus Torvalds 		 * Relax Will Robinson.
2701da177e4SLinus Torvalds 		 */
2718e165e20SFlorian Westphal 		if (new_win == 0)
2728e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2738e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
274607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2751da177e4SLinus Torvalds 	}
2761da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2771da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2801da177e4SLinus Torvalds 	 * scaled window.
2811da177e4SLinus Torvalds 	 */
282ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
283ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2841da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2851da177e4SLinus Torvalds 	else
2861da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2891da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2901da177e4SLinus Torvalds 
29131770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
2928e165e20SFlorian Westphal 	if (new_win == 0) {
29331770e34SFlorian Westphal 		tp->pred_flags = 0;
2948e165e20SFlorian Westphal 		if (old_win)
2958e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2968e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
2978e165e20SFlorian Westphal 	} else if (old_win == 0) {
2988e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
2998e165e20SFlorian Westphal 	}
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds 	return new_win;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
30467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
305735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
306bdf1ee5dSIlpo Järvinen {
30730e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
30830e502a3SDaniel Borkmann 
3094de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
310bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3114de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31291b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
31391b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
31430e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
315bdf1ee5dSIlpo Järvinen }
316bdf1ee5dSIlpo Järvinen 
31767edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
318735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
319bdf1ee5dSIlpo Järvinen {
320bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32191b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
322f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
32391b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
324f7b3bec6SFlorian Westphal 
325f7b3bec6SFlorian Westphal 	if (!use_ecn) {
326f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
327f7b3bec6SFlorian Westphal 
328f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
329f7b3bec6SFlorian Westphal 			use_ecn = true;
330f7b3bec6SFlorian Westphal 	}
331bdf1ee5dSIlpo Järvinen 
332bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
333f7b3bec6SFlorian Westphal 
334f7b3bec6SFlorian Westphal 	if (use_ecn) {
3354de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
336bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
33791b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
33830e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
339bdf1ee5dSIlpo Järvinen 	}
340bdf1ee5dSIlpo Järvinen }
341bdf1ee5dSIlpo Järvinen 
34249213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34349213555SDaniel Borkmann {
34449213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
34549213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
34649213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
34749213555SDaniel Borkmann 		 */
34849213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
34949213555SDaniel Borkmann }
35049213555SDaniel Borkmann 
351735d3831SFlorian Westphal static void
3526ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
353bdf1ee5dSIlpo Järvinen {
3546ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
355bdf1ee5dSIlpo Järvinen 		th->ece = 1;
356bdf1ee5dSIlpo Järvinen }
357bdf1ee5dSIlpo Järvinen 
35867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
35967edfef7SAndi Kleen  * be sent.
36067edfef7SAndi Kleen  */
361735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
362ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
363bdf1ee5dSIlpo Järvinen {
364bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
365bdf1ee5dSIlpo Järvinen 
366bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
367bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
368bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
369bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
370bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
371bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
372bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
373ea1627c2SEric Dumazet 				th->cwr = 1;
374bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
375bdf1ee5dSIlpo Järvinen 			}
37630e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
377bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
378bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
379bdf1ee5dSIlpo Järvinen 		}
380bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
381ea1627c2SEric Dumazet 			th->ece = 1;
382bdf1ee5dSIlpo Järvinen 	}
383bdf1ee5dSIlpo Järvinen }
384bdf1ee5dSIlpo Järvinen 
385e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
386e870a8efSIlpo Järvinen  * auto increment end seqno.
387e870a8efSIlpo Järvinen  */
388e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
389e870a8efSIlpo Järvinen {
3902e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
391e870a8efSIlpo Järvinen 
3924de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
393e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
394e870a8efSIlpo Järvinen 
395cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
396e870a8efSIlpo Järvinen 
397e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
398a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
399e870a8efSIlpo Järvinen 		seq++;
400e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
401e870a8efSIlpo Järvinen }
402e870a8efSIlpo Järvinen 
403a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40433f5f57eSIlpo Järvinen {
40533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40633f5f57eSIlpo Järvinen }
40733f5f57eSIlpo Järvinen 
40833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
40933ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41033ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41189e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4122100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41360e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
41460e2a778SUrsula Braun 
41560e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
41660e2a778SUrsula Braun {
41760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
41860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
41960e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
42060e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
42160e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
42260e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
42360e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
42460e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
42560e2a778SUrsula Braun 		}
42660e2a778SUrsula Braun 	}
42760e2a778SUrsula Braun #endif
42860e2a778SUrsula Braun }
42933ad798cSAdam Langley 
43033ad798cSAdam Langley struct tcp_out_options {
4312100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4322100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
43333ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43433ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
435bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
436bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4372100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4382100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
43933ad798cSAdam Langley };
44033ad798cSAdam Langley 
44167edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
44267edfef7SAndi Kleen  *
44367edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
444fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
445fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4468e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
447fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
448fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
449fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
450fd6149d3SIlpo Järvinen  *
451fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
452fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
453fd6149d3SIlpo Järvinen  */
45433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
455bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
456bd0388aeSWilliam Allen Simpson {
4572100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
458bd0388aeSWilliam Allen Simpson 
459bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4601a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4611a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
462bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
463bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46433ad798cSAdam Langley 		ptr += 4;
46533ad798cSAdam Langley 	}
46633ad798cSAdam Langley 
467fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
468fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
469fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
470fd6149d3SIlpo Järvinen 			       opts->mss);
471fd6149d3SIlpo Järvinen 	}
472fd6149d3SIlpo Järvinen 
473bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
474bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47533ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47633ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47733ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47833ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
479bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
48033ad798cSAdam Langley 		} else {
481496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
48240efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
48340efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48440efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48540efc6faSStephen Hemminger 		}
48633ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48733ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48833ad798cSAdam Langley 	}
48933ad798cSAdam Langley 
490bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
49333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49533ad798cSAdam Langley 	}
49633ad798cSAdam Langley 
497bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
49833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
50033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
50133ad798cSAdam Langley 			       opts->ws);
50233ad798cSAdam Langley 	}
50333ad798cSAdam Langley 
50433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50740efc6faSStephen Hemminger 		int this_sack;
50840efc6faSStephen Hemminger 
50940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
51040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
51140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
51233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
51340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5142de979bdSStephen Hemminger 
51533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51633ad798cSAdam Langley 		     ++this_sack) {
51740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
51840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
51940efc6faSStephen Hemminger 		}
5202de979bdSStephen Hemminger 
52140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
52240efc6faSStephen Hemminger 	}
5232100c8d2SYuchung Cheng 
5242100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5252100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5267f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5277f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5282100c8d2SYuchung Cheng 
5297f9b838bSDaniel Lee 		if (foc->exp) {
5307f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5317f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5322100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5337f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5347f9b838bSDaniel Lee 		} else {
5357f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5367f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5377f9b838bSDaniel Lee 			*p++ = len;
5382100c8d2SYuchung Cheng 		}
5397f9b838bSDaniel Lee 
5407f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5417f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5427f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5437f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5447f9b838bSDaniel Lee 		}
5457f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5462100c8d2SYuchung Cheng 	}
54760e2a778SUrsula Braun 
54860e2a778SUrsula Braun 	smc_options_write(ptr, &options);
54960e2a778SUrsula Braun }
55060e2a778SUrsula Braun 
55160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
55260e2a778SUrsula Braun 			   struct tcp_out_options *opts,
55360e2a778SUrsula Braun 			   unsigned int *remaining)
55460e2a778SUrsula Braun {
55560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
55660e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
55760e2a778SUrsula Braun 		if (tp->syn_smc) {
55860e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
55960e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
56060e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
56160e2a778SUrsula Braun 			}
56260e2a778SUrsula Braun 		}
56360e2a778SUrsula Braun 	}
56460e2a778SUrsula Braun #endif
56560e2a778SUrsula Braun }
56660e2a778SUrsula Braun 
56760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
56860e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
56960e2a778SUrsula Braun 				struct tcp_out_options *opts,
57060e2a778SUrsula Braun 				unsigned int *remaining)
57160e2a778SUrsula Braun {
57260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57460e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
57560e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57660e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
57760e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
57860e2a778SUrsula Braun 			}
57960e2a778SUrsula Braun 		}
58060e2a778SUrsula Braun 	}
58160e2a778SUrsula Braun #endif
58240efc6faSStephen Hemminger }
58340efc6faSStephen Hemminger 
58467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58567edfef7SAndi Kleen  * network wire format yet.
58667edfef7SAndi Kleen  */
58795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
58833ad798cSAdam Langley 				struct tcp_out_options *opts,
589cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
590cf533ea5SEric Dumazet {
59133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
59295c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
593783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
59433ad798cSAdam Langley 
5958c2320e8SEric Dumazet 	*md5 = NULL;
596cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
5976015c71eSEric Dumazet 	if (static_key_false(&tcp_md5_needed) &&
5986015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
59933ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
60033ad798cSAdam Langley 		if (*md5) {
60133ad798cSAdam Langley 			opts->options |= OPTION_MD5;
602bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
603cfb6eeb4SYOSHIFUJI Hideaki 		}
6048c2320e8SEric Dumazet 	}
605cfb6eeb4SYOSHIFUJI Hideaki #endif
60633ad798cSAdam Langley 
60733ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
60833ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
60933ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61033ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61133ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61233ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61333ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
61433ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
61533ad798cSAdam Langley 	 * going out.  */
61633ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
617bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
61833ad798cSAdam Langley 
6195d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
62033ad798cSAdam Langley 		opts->options |= OPTION_TS;
6217faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
62233ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
623bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62433ad798cSAdam Langley 	}
6259bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
62633ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
62789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
628bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
62933ad798cSAdam Langley 	}
630f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
63133ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
632b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
633bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
63433ad798cSAdam Langley 	}
63533ad798cSAdam Langley 
636783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6372646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6382646c831SDaniel Lee 
6392646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6402646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
641783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
642783237e8SYuchung Cheng 		if (remaining >= need) {
643783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
644783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
645783237e8SYuchung Cheng 			remaining -= need;
646783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6472646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
648783237e8SYuchung Cheng 		}
649783237e8SYuchung Cheng 	}
650bd0388aeSWilliam Allen Simpson 
65160e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
65260e2a778SUrsula Braun 
653bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65433ad798cSAdam Langley }
65533ad798cSAdam Langley 
65667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
65760e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
65860e2a778SUrsula Braun 				       struct request_sock *req,
65995c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
66033ad798cSAdam Langley 				       struct tcp_out_options *opts,
66180f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6628336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6634957faadSWilliam Allen Simpson {
66433ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
66595c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
66633ad798cSAdam Langley 
66733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
66880f03e27SEric Dumazet 	if (md5) {
66933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6704957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6714957faadSWilliam Allen Simpson 
6724957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6734957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6744957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6754957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6764957faadSWilliam Allen Simpson 		 */
677de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
67833ad798cSAdam Langley 	}
67933ad798cSAdam Langley #endif
68033ad798cSAdam Langley 
6814957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68233ad798cSAdam Langley 	opts->mss = mss;
6834957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
68433ad798cSAdam Langley 
68533ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
68633ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
68789e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6884957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
68933ad798cSAdam Langley 	}
690de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
69133ad798cSAdam Langley 		opts->options |= OPTION_TS;
69295a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
69333ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6944957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
69533ad798cSAdam Langley 	}
69633ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
69733ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
698de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6994957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70033ad798cSAdam Langley 	}
7017f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7027f9b838bSDaniel Lee 		u32 need = foc->len;
7037f9b838bSDaniel Lee 
7047f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7057f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7068336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7078336886fSJerry Chu 		if (remaining >= need) {
7088336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7098336886fSJerry Chu 			opts->fastopen_cookie = foc;
7108336886fSJerry Chu 			remaining -= need;
7118336886fSJerry Chu 		}
7128336886fSJerry Chu 	}
7134957faadSWilliam Allen Simpson 
71460e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
71560e2a778SUrsula Braun 
7164957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
71733ad798cSAdam Langley }
71833ad798cSAdam Langley 
71967edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
72067edfef7SAndi Kleen  * final wire format yet.
72167edfef7SAndi Kleen  */
72295c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
72333ad798cSAdam Langley 					struct tcp_out_options *opts,
724cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
725cf533ea5SEric Dumazet {
72633ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
72795c96174SEric Dumazet 	unsigned int size = 0;
728cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
72933ad798cSAdam Langley 
7305843ef42SAndi Kleen 	opts->options = 0;
7315843ef42SAndi Kleen 
7328c2320e8SEric Dumazet 	*md5 = NULL;
73333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
7346015c71eSEric Dumazet 	if (static_key_false(&tcp_md5_needed) &&
7356015c71eSEric Dumazet 	    rcu_access_pointer(tp->md5sig_info)) {
73633ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7378c2320e8SEric Dumazet 		if (*md5) {
73833ad798cSAdam Langley 			opts->options |= OPTION_MD5;
73933ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
74033ad798cSAdam Langley 		}
7418c2320e8SEric Dumazet 	}
74233ad798cSAdam Langley #endif
74333ad798cSAdam Langley 
74433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
74533ad798cSAdam Langley 		opts->options |= OPTION_TS;
7467faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
74733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
74833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
74933ad798cSAdam Langley 	}
75033ad798cSAdam Langley 
751cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
752cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
75395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
75433ad798cSAdam Langley 		opts->num_sack_blocks =
75595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
75633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
75733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
75833ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
75933ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
76033ad798cSAdam Langley 	}
76133ad798cSAdam Langley 
76233ad798cSAdam Langley 	return size;
76340efc6faSStephen Hemminger }
7641da177e4SLinus Torvalds 
76546d3ceabSEric Dumazet 
76646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
76746d3ceabSEric Dumazet  *
76846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
76946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
77046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
77146d3ceabSEric Dumazet  *
77246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
77346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7748e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
77546d3ceabSEric Dumazet  *
77646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
77746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
77846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
77946d3ceabSEric Dumazet  */
78046d3ceabSEric Dumazet struct tsq_tasklet {
78146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
78246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
78346d3ceabSEric Dumazet };
78446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
78546d3ceabSEric Dumazet 
78673a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
7876f458dfbSEric Dumazet {
7886f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7896f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
790f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
791f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
792f9616c35SEric Dumazet 
793f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
7943a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
7953a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
796f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
7973a91d29fSKoichiro Den 		}
798f9616c35SEric Dumazet 
799f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
800bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8016f458dfbSEric Dumazet 	}
802f9616c35SEric Dumazet }
80373a6bab5SEric Dumazet 
80473a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
80573a6bab5SEric Dumazet {
80673a6bab5SEric Dumazet 	bh_lock_sock(sk);
80773a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
80873a6bab5SEric Dumazet 		tcp_tsq_write(sk);
80973a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
81073a6bab5SEric Dumazet 		sock_hold(sk);
81173a6bab5SEric Dumazet 	bh_unlock_sock(sk);
81273a6bab5SEric Dumazet }
81346d3ceabSEric Dumazet /*
8148e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
81546d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8168e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
81746d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
81846d3ceabSEric Dumazet  */
81946d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
82046d3ceabSEric Dumazet {
82146d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
82246d3ceabSEric Dumazet 	LIST_HEAD(list);
82346d3ceabSEric Dumazet 	unsigned long flags;
82446d3ceabSEric Dumazet 	struct list_head *q, *n;
82546d3ceabSEric Dumazet 	struct tcp_sock *tp;
82646d3ceabSEric Dumazet 	struct sock *sk;
82746d3ceabSEric Dumazet 
82846d3ceabSEric Dumazet 	local_irq_save(flags);
82946d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
83046d3ceabSEric Dumazet 	local_irq_restore(flags);
83146d3ceabSEric Dumazet 
83246d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
83346d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
83446d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
83546d3ceabSEric Dumazet 
83646d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8370a9648f1SEric Dumazet 		smp_mb__before_atomic();
8387aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8397aa5470cSEric Dumazet 
8406f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
84146d3ceabSEric Dumazet 		sk_free(sk);
84246d3ceabSEric Dumazet 	}
84346d3ceabSEric Dumazet }
84446d3ceabSEric Dumazet 
84540fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
84640fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
84740fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
84840fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
84946d3ceabSEric Dumazet /**
85046d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
85146d3ceabSEric Dumazet  * @sk: socket
85246d3ceabSEric Dumazet  *
85346d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
85446d3ceabSEric Dumazet  * actions before socket release.
85546d3ceabSEric Dumazet  */
85646d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
85746d3ceabSEric Dumazet {
8586f458dfbSEric Dumazet 	unsigned long flags, nflags;
85946d3ceabSEric Dumazet 
8606f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8616f458dfbSEric Dumazet 	do {
8627aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8636f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8646f458dfbSEric Dumazet 			return;
8656f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8667aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8676f458dfbSEric Dumazet 
86873a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
86973a6bab5SEric Dumazet 		tcp_tsq_write(sk);
87073a6bab5SEric Dumazet 		__sock_put(sk);
87173a6bab5SEric Dumazet 	}
872c3f9b018SEric Dumazet 	/* Here begins the tricky part :
873c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
874c3f9b018SEric Dumazet 	 * 1) BH disabled
875c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
876c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
877c3f9b018SEric Dumazet 	 *
878c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
879c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
880c3f9b018SEric Dumazet 	 */
881c3f9b018SEric Dumazet 	sock_release_ownership(sk);
882c3f9b018SEric Dumazet 
88340fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8846f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
885144d56e9SEric Dumazet 		__sock_put(sk);
886144d56e9SEric Dumazet 	}
88740fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8886f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
889144d56e9SEric Dumazet 		__sock_put(sk);
890144d56e9SEric Dumazet 	}
89140fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8924fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
893144d56e9SEric Dumazet 		__sock_put(sk);
894144d56e9SEric Dumazet 	}
89546d3ceabSEric Dumazet }
89646d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
89746d3ceabSEric Dumazet 
89846d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
89946d3ceabSEric Dumazet {
90046d3ceabSEric Dumazet 	int i;
90146d3ceabSEric Dumazet 
90246d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
90346d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
90446d3ceabSEric Dumazet 
90546d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
90646d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
90746d3ceabSEric Dumazet 			     tcp_tasklet_func,
90846d3ceabSEric Dumazet 			     (unsigned long)tsq);
90946d3ceabSEric Dumazet 	}
91046d3ceabSEric Dumazet }
91146d3ceabSEric Dumazet 
91246d3ceabSEric Dumazet /*
91346d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9148e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
91546d3ceabSEric Dumazet  * hold qdisc lock.
91646d3ceabSEric Dumazet  */
917d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
91846d3ceabSEric Dumazet {
91946d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
92046d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
921408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9229b462d02SEric Dumazet 
9239b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9249b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9259b462d02SEric Dumazet 	 */
92614afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9279b462d02SEric Dumazet 
9289b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9299b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9309b462d02SEric Dumazet 	 * This gives :
9319b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9329b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9339b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9349b462d02SEric Dumazet 	 */
93514afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9369b462d02SEric Dumazet 		goto out;
93746d3ceabSEric Dumazet 
9387aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
93946d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
940a9b204d1SEric Dumazet 		bool empty;
94146d3ceabSEric Dumazet 
942408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
943408f0a6cSEric Dumazet 			goto out;
944408f0a6cSEric Dumazet 
94573a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9467aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
947408f0a6cSEric Dumazet 		if (nval != oval)
948408f0a6cSEric Dumazet 			continue;
949408f0a6cSEric Dumazet 
95046d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
95146d3ceabSEric Dumazet 		local_irq_save(flags);
952903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
953a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
95446d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
955a9b204d1SEric Dumazet 		if (empty)
95646d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
95746d3ceabSEric Dumazet 		local_irq_restore(flags);
9589b462d02SEric Dumazet 		return;
95946d3ceabSEric Dumazet 	}
9609b462d02SEric Dumazet out:
9619b462d02SEric Dumazet 	sk_free(sk);
96246d3ceabSEric Dumazet }
96346d3ceabSEric Dumazet 
96473a6bab5SEric Dumazet /* Note: Called under soft irq.
96573a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
966218af599SEric Dumazet  */
967218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
968218af599SEric Dumazet {
969218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
970218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
971218af599SEric Dumazet 
97273a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
97373a6bab5SEric Dumazet 	sock_put(sk);
974218af599SEric Dumazet 
975218af599SEric Dumazet 	return HRTIMER_NORESTART;
976218af599SEric Dumazet }
977218af599SEric Dumazet 
978a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
979a7a25630SEric Dumazet 				      u64 prior_wstamp)
980e2080072SEric Dumazet {
981ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
982ab408b6dSEric Dumazet 
983d3edd06eSEric Dumazet 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
984ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
98576a9ebe8SEric Dumazet 		unsigned long rate = sk->sk_pacing_rate;
986ab408b6dSEric Dumazet 
987ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
988ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
989ab408b6dSEric Dumazet 		 * this is a minor annoyance.
990ab408b6dSEric Dumazet 		 */
99176a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
992a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
993a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
994a7a25630SEric Dumazet 
995a7a25630SEric Dumazet 			/* take into account OS jitter */
996a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
997a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
998ab408b6dSEric Dumazet 		}
999ab408b6dSEric Dumazet 	}
1000e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1001e2080072SEric Dumazet }
1002e2080072SEric Dumazet 
10031da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10041da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10051da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10061da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10071da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10081da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10091da177e4SLinus Torvalds  * device.
10101da177e4SLinus Torvalds  *
10111da177e4SLinus Torvalds  * We are working here with either a clone of the original
10121da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10131da177e4SLinus Torvalds  */
10142987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10152987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10161da177e4SLinus Torvalds {
10176687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1018dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1019dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1020dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
102133ad798cSAdam Langley 	struct tcp_out_options opts;
102295c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10238c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1024cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10251da177e4SLinus Torvalds 	struct tcphdr *th;
1026a7a25630SEric Dumazet 	u64 prior_wstamp;
10271da177e4SLinus Torvalds 	int err;
10281da177e4SLinus Torvalds 
1029dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10306f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
1031dfb4b9dcSDavid S. Miller 
1032ccdbb6e9SEric Dumazet 	if (clone_it) {
10336f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10346f094b9eSLawrence Brakmo 			- tp->snd_una;
10358c72c65bSEric Dumazet 		oskb = skb;
1036e2080072SEric Dumazet 
1037e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1038e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1039e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1040dfb4b9dcSDavid S. Miller 			else
1041e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1042e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1043e2080072SEric Dumazet 
1044dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1045dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1046dfb4b9dcSDavid S. Miller 	}
10475f6188a8SEric Dumazet 
1048a7a25630SEric Dumazet 	prior_wstamp = tp->tcp_wstamp_ns;
10495f6188a8SEric Dumazet 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
10505f6188a8SEric Dumazet 
1051d3edd06eSEric Dumazet 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1052dfb4b9dcSDavid S. Miller 
1053dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1054dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
105533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10561da177e4SLinus Torvalds 
10574de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
105833ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
105933ad798cSAdam Langley 	else
106033ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
106133ad798cSAdam Langley 							   &md5);
106233ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10631da177e4SLinus Torvalds 
1064547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1065b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
106673a6bab5SEric Dumazet 	 * which holds one reference to sk.
1067b2532eb9SEric Dumazet 	 *
1068b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1069b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1070547669d4SEric Dumazet 	 */
1071b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10721da177e4SLinus Torvalds 
107338ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
107438ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
107538ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
107638ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
107738ab52e8SEric Dumazet 	 */
107838ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
107938ab52e8SEric Dumazet 
1080aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1081aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
108246d3ceabSEric Dumazet 
108346d3ceabSEric Dumazet 	skb_orphan(skb);
108446d3ceabSEric Dumazet 	skb->sk = sk;
10851d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1086b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
108714afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10881da177e4SLinus Torvalds 
1089c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1090c3a2e837SJulian Anastasov 
10911da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1092ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1093c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1094c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10951da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10962987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1097df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10984de075e0SEric Dumazet 					tcb->tcp_flags);
1099dfb4b9dcSDavid S. Miller 
11001da177e4SLinus Torvalds 	th->check		= 0;
11011da177e4SLinus Torvalds 	th->urg_ptr		= 0;
11021da177e4SLinus Torvalds 
110333f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11047691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11057691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11061da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11071da177e4SLinus Torvalds 			th->urg = 1;
11087691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11090eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11107691367dSHerbert Xu 			th->urg = 1;
11117691367dSHerbert Xu 		}
11121da177e4SLinus Torvalds 	}
11131da177e4SLinus Torvalds 
1114bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
111551466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1116ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1117ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1118ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1119ea1627c2SEric Dumazet 	} else {
1120ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1121ea1627c2SEric Dumazet 		 * is never scaled.
1122ea1627c2SEric Dumazet 		 */
1123ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1124ea1627c2SEric Dumazet 	}
1125cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1126cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1127cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1128a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1129bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
113039f8e58eSEric Dumazet 					       md5, sk, skb);
1131cfb6eeb4SYOSHIFUJI Hideaki 	}
1132cfb6eeb4SYOSHIFUJI Hideaki #endif
1133cfb6eeb4SYOSHIFUJI Hideaki 
1134bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11351da177e4SLinus Torvalds 
11364de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
113727cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11381da177e4SLinus Torvalds 
1139a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1140cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1141a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1142ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1143a44d6eacSMartin KaFai Lau 	}
11441da177e4SLinus Torvalds 
1145bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1146aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1147aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11481da177e4SLinus Torvalds 
11492efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1150f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1151cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1152f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1153cd7d8498SEric Dumazet 
1154d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1155971f10ecSEric Dumazet 
1156971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1157971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1158971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1159971f10ecSEric Dumazet 
1160b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11617faee5c0SEric Dumazet 
11628c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11635ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11648c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11658c72c65bSEric Dumazet 	}
1166fc225799SEric Dumazet 	if (!err && oskb) {
1167a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1168fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1169fc225799SEric Dumazet 	}
11708c72c65bSEric Dumazet 	return err;
11711da177e4SLinus Torvalds }
11721da177e4SLinus Torvalds 
11732987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11742987babbSYuchung Cheng 			    gfp_t gfp_mask)
11752987babbSYuchung Cheng {
11762987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11772987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11782987babbSYuchung Cheng }
11792987babbSYuchung Cheng 
118067edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11811da177e4SLinus Torvalds  *
11821da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11831da177e4SLinus Torvalds  * otherwise socket can stall.
11841da177e4SLinus Torvalds  */
11851da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11861da177e4SLinus Torvalds {
11871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11901da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1191f4a775d1SEric Dumazet 	__skb_header_release(skb);
1192fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11933ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11943ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11951da177e4SLinus Torvalds }
11961da177e4SLinus Torvalds 
119767edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
11985bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1199f6302d1dSDavid S. Miller {
12004a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1201f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1202f6302d1dSDavid S. Miller 		 * non-TSO case.
1203f6302d1dSDavid S. Miller 		 */
1204cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1205f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1206f6302d1dSDavid S. Miller 	} else {
1207cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1208f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12091da177e4SLinus Torvalds 	}
12101da177e4SLinus Torvalds }
12111da177e4SLinus Torvalds 
1212797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1213797108d1SIlpo Järvinen  * tweaks to fix counters
1214797108d1SIlpo Järvinen  */
1215cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1216797108d1SIlpo Järvinen {
1217797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1218797108d1SIlpo Järvinen 
1219797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1220797108d1SIlpo Järvinen 
1221797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1222797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1223797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1224797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1225797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1226797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1227797108d1SIlpo Järvinen 
1228797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1229797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1230797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1231797108d1SIlpo Järvinen 
1232797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1233797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1234713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1235797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1236797108d1SIlpo Järvinen 
1237797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1238797108d1SIlpo Järvinen }
1239797108d1SIlpo Järvinen 
12400a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12410a2cf20cSSoheil Hassas Yeganeh {
12420a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12430a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12440a2cf20cSSoheil Hassas Yeganeh }
12450a2cf20cSSoheil Hassas Yeganeh 
1246490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1247490cc7d0SWillem de Bruijn {
1248490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1249490cc7d0SWillem de Bruijn 
12500a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1251490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1252490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1253490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1254490cc7d0SWillem de Bruijn 
1255490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1256490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1257490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1258b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1259b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1260490cc7d0SWillem de Bruijn 	}
1261490cc7d0SWillem de Bruijn }
1262490cc7d0SWillem de Bruijn 
1263a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1264a166140eSMartin KaFai Lau {
1265a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1266a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1267a166140eSMartin KaFai Lau }
1268a166140eSMartin KaFai Lau 
126975c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
127075c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
127175c119afSEric Dumazet 					 struct sk_buff *buff,
127275c119afSEric Dumazet 					 struct sock *sk,
127375c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
127475c119afSEric Dumazet {
127575c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
127675c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
127775c119afSEric Dumazet 	else
127875c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
127975c119afSEric Dumazet }
128075c119afSEric Dumazet 
12811da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12821da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12831da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12841da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12851da177e4SLinus Torvalds  */
128675c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
128775c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12886cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12891da177e4SLinus Torvalds {
12901da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12911da177e4SLinus Torvalds 	struct sk_buff *buff;
12926475be16SDavid S. Miller 	int nsize, old_factor;
1293b60b49eaSHerbert Xu 	int nlen;
12949ce01461SIlpo Järvinen 	u8 flags;
12951da177e4SLinus Torvalds 
12962fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12972fceec13SIlpo Järvinen 		return -EINVAL;
12986a438bbeSStephen Hemminger 
12991da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
13001da177e4SLinus Torvalds 	if (nsize < 0)
13011da177e4SLinus Torvalds 		nsize = 0;
13021da177e4SLinus Torvalds 
13036cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13041da177e4SLinus Torvalds 		return -ENOMEM;
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1307eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
130851456b29SIan Morris 	if (!buff)
13091da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1310ef5cb973SHerbert Xu 
13113ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
13123ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1313b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1314b60b49eaSHerbert Xu 	buff->truesize += nlen;
1315b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13181da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13191da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13201da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13234de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13244de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13254de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1326e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1327a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13301da177e4SLinus Torvalds 
133198be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13321da177e4SLinus Torvalds 
1333a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1334490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13351da177e4SLinus Torvalds 
13366475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13376475be16SDavid S. Miller 
13381da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13395bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13405bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13411da177e4SLinus Torvalds 
1342b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1343b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1344b9f64820SYuchung Cheng 
13456475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13466475be16SDavid S. Miller 	 * adjust the various packet counters.
13476475be16SDavid S. Miller 	 */
1348cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13496475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13506475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13511da177e4SLinus Torvalds 
1352797108d1SIlpo Järvinen 		if (diff)
1353797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13541da177e4SLinus Torvalds 	}
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1357f4a775d1SEric Dumazet 	__skb_header_release(buff);
135875c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1359f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1360e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13611da177e4SLinus Torvalds 
13621da177e4SLinus Torvalds 	return 0;
13631da177e4SLinus Torvalds }
13641da177e4SLinus Torvalds 
1365f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1366f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13671da177e4SLinus Torvalds  */
13687162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13691da177e4SLinus Torvalds {
13707b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13711da177e4SLinus Torvalds 	int i, k, eat;
13721da177e4SLinus Torvalds 
13734fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13744fa48bf3SEric Dumazet 	if (eat) {
13754fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13764fa48bf3SEric Dumazet 		len -= eat;
13774fa48bf3SEric Dumazet 		if (!len)
13787162fb24SEric Dumazet 			return 0;
13794fa48bf3SEric Dumazet 	}
13801da177e4SLinus Torvalds 	eat = len;
13811da177e4SLinus Torvalds 	k = 0;
13827b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13837b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13847b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
13859e903e08SEric Dumazet 
13869e903e08SEric Dumazet 		if (size <= eat) {
1387aff65da0SIan Campbell 			skb_frag_unref(skb, i);
13889e903e08SEric Dumazet 			eat -= size;
13891da177e4SLinus Torvalds 		} else {
13907b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
13911da177e4SLinus Torvalds 			if (eat) {
13927b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
13937b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
13941da177e4SLinus Torvalds 				eat = 0;
13951da177e4SLinus Torvalds 			}
13961da177e4SLinus Torvalds 			k++;
13971da177e4SLinus Torvalds 		}
13981da177e4SLinus Torvalds 	}
13997b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 	skb->data_len -= len;
14021da177e4SLinus Torvalds 	skb->len = skb->data_len;
14037162fb24SEric Dumazet 	return len;
14041da177e4SLinus Torvalds }
14051da177e4SLinus Torvalds 
140667edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14071da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14081da177e4SLinus Torvalds {
14097162fb24SEric Dumazet 	u32 delta_truesize;
14107162fb24SEric Dumazet 
141114bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14121da177e4SLinus Torvalds 		return -ENOMEM;
14131da177e4SLinus Torvalds 
14147162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
141784fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14181da177e4SLinus Torvalds 
14197162fb24SEric Dumazet 	if (delta_truesize) {
14207162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14217162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14227162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14231da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14247162fb24SEric Dumazet 	}
14251da177e4SLinus Torvalds 
14265b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14271da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14285bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds 	return 0;
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds 
14331b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14341b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14355d424d5aSJohn Heffner {
1436cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1437cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14385d424d5aSJohn Heffner 	int mss_now;
14395d424d5aSJohn Heffner 
14405d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14415d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14425d424d5aSJohn Heffner 	 */
14435d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14445d424d5aSJohn Heffner 
144567469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
144667469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
144767469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
144867469601SEric Dumazet 
144967469601SEric Dumazet 		if (dst && dst_allfrag(dst))
145067469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
145167469601SEric Dumazet 	}
145267469601SEric Dumazet 
14535d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14545d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14555d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14565d424d5aSJohn Heffner 
14575d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14585d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14595d424d5aSJohn Heffner 
14605d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14615d424d5aSJohn Heffner 	if (mss_now < 48)
14625d424d5aSJohn Heffner 		mss_now = 48;
14635d424d5aSJohn Heffner 	return mss_now;
14645d424d5aSJohn Heffner }
14655d424d5aSJohn Heffner 
14661b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14671b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14681b63edd6SYuchung Cheng {
14691b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14701b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14711b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14721b63edd6SYuchung Cheng }
14731b63edd6SYuchung Cheng 
14745d424d5aSJohn Heffner /* Inverse of above */
147567469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14765d424d5aSJohn Heffner {
1477cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1478cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14795d424d5aSJohn Heffner 	int mtu;
14805d424d5aSJohn Heffner 
14815d424d5aSJohn Heffner 	mtu = mss +
14825d424d5aSJohn Heffner 	      tp->tcp_header_len +
14835d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14845d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14855d424d5aSJohn Heffner 
148667469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
148767469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
148867469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
148967469601SEric Dumazet 
149067469601SEric Dumazet 		if (dst && dst_allfrag(dst))
149167469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
149267469601SEric Dumazet 	}
14935d424d5aSJohn Heffner 	return mtu;
14945d424d5aSJohn Heffner }
1495556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
14965d424d5aSJohn Heffner 
149767edfef7SAndi Kleen /* MTU probing init per socket */
14985d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
14995d424d5aSJohn Heffner {
15005d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15015d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1502b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15035d424d5aSJohn Heffner 
1504b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15055d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15065d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1507b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15085d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
150905cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1510c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15115d424d5aSJohn Heffner }
15124bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15135d424d5aSJohn Heffner 
15141da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15171da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1520caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15211da177e4SLinus Torvalds    It also does not include TCP options.
15221da177e4SLinus Torvalds 
1523d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15261da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15271da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15281da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15311da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15321da177e4SLinus Torvalds 
1533d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1534d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15351da177e4SLinus Torvalds  */
15361da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15371da177e4SLinus Torvalds {
15381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1539d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15405d424d5aSJohn Heffner 	int mss_now;
15411da177e4SLinus Torvalds 
15425d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15435d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15441da177e4SLinus Torvalds 
15455d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1546409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 	/* And store cached results */
1549d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15505d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15515d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1552c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	return mss_now;
15551da177e4SLinus Torvalds }
15564bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15591da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15601da177e4SLinus Torvalds  */
15610c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15621da177e4SLinus Torvalds {
1563cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1564cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1565c1b4a7e6SDavid S. Miller 	u32 mss_now;
156695c96174SEric Dumazet 	unsigned int header_len;
156733ad798cSAdam Langley 	struct tcp_out_options opts;
156833ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15691da177e4SLinus Torvalds 
1570c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1571c1b4a7e6SDavid S. Miller 
15721da177e4SLinus Torvalds 	if (dst) {
15731da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1574d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15751da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15761da177e4SLinus Torvalds 	}
15771da177e4SLinus Torvalds 
157833ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
157933ad798cSAdam Langley 		     sizeof(struct tcphdr);
158033ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
158133ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
158233ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
158333ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
158433ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
158533ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
158633ad798cSAdam Langley 		mss_now -= delta;
158733ad798cSAdam Langley 	}
1588cfb6eeb4SYOSHIFUJI Hideaki 
15891da177e4SLinus Torvalds 	return mss_now;
15901da177e4SLinus Torvalds }
15911da177e4SLinus Torvalds 
159286fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
159386fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
159486fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
159586fd14adSWeiping Pan  */
159686fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1597a762a980SDavid S. Miller {
15989e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1599a762a980SDavid S. Miller 
160086fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
160186fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
160286fd14adSWeiping Pan 		/* Limited by application or receiver window. */
160386fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
160486fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
160586fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
160686fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
160786fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
160886fd14adSWeiping Pan 		}
160986fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
161086fd14adSWeiping Pan 	}
1611c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
161286fd14adSWeiping Pan }
161386fd14adSWeiping Pan 
1614ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1615a762a980SDavid S. Miller {
16161b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1617a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1618a762a980SDavid S. Miller 
1619ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1620ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1621ca8a2263SNeal Cardwell 	 */
1622ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1623ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1624ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1625ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1626ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1627ca8a2263SNeal Cardwell 	}
1628e114a710SEric Dumazet 
162924901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1630a762a980SDavid S. Miller 		/* Network is feed fully. */
1631a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1632c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1633a762a980SDavid S. Miller 	} else {
1634a762a980SDavid S. Miller 		/* Network starves. */
1635a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1636a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1637a762a980SDavid S. Miller 
1638b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1639c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16401b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1641a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1642b0f71bd3SFrancis Yan 
1643b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1644b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1645b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1646b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
164775c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1648b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1649b0f71bd3SFrancis Yan 		 */
165075c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1651b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1652b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1653b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1654a762a980SDavid S. Miller 	}
1655a762a980SDavid S. Miller }
1656a762a980SDavid S. Miller 
1657d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1658d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1659d4589926SEric Dumazet {
1660d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1661d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1662d4589926SEric Dumazet }
1663d4589926SEric Dumazet 
1664d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1665d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1666d4589926SEric Dumazet  * The test is really :
1667d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1668d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1669d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1670d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16710e3a4803SIlpo Järvinen  */
1672d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1673d4589926SEric Dumazet 				const struct sk_buff *skb)
1674d4589926SEric Dumazet {
1675d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1676d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1677d4589926SEric Dumazet }
1678d4589926SEric Dumazet 
1679d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1680d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1681d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1682d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1683d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1684d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1685d4589926SEric Dumazet  */
1686d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1687cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1688d4589926SEric Dumazet {
1689d4589926SEric Dumazet 	return partial &&
1690d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1691d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1692d4589926SEric Dumazet }
1693605ad7f1SEric Dumazet 
1694605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1695605ad7f1SEric Dumazet  * to send one TSO packet per ms
1696605ad7f1SEric Dumazet  */
1697dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
16981b3878caSNeal Cardwell 			    int min_tso_segs)
1699605ad7f1SEric Dumazet {
1700605ad7f1SEric Dumazet 	u32 bytes, segs;
1701605ad7f1SEric Dumazet 
170276a9ebe8SEric Dumazet 	bytes = min_t(unsigned long,
170376a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift,
1704605ad7f1SEric Dumazet 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1705605ad7f1SEric Dumazet 
1706605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1707605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1708605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1709605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1710605ad7f1SEric Dumazet 	 */
17111b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1712605ad7f1SEric Dumazet 
1713350c9f48SEric Dumazet 	return segs;
1714605ad7f1SEric Dumazet }
1715605ad7f1SEric Dumazet 
1716ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1717ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1718ed6e7268SNeal Cardwell  */
1719ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1720ed6e7268SNeal Cardwell {
1721ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1722dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1723ed6e7268SNeal Cardwell 
1724dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1725dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1726dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1727dcb8c9b4SEric Dumazet 
1728dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1729350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1730ed6e7268SNeal Cardwell }
1731ed6e7268SNeal Cardwell 
1732d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1733d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1734d4589926SEric Dumazet 					const struct sk_buff *skb,
1735d4589926SEric Dumazet 					unsigned int mss_now,
1736d4589926SEric Dumazet 					unsigned int max_segs,
1737d4589926SEric Dumazet 					int nonagle)
1738c1b4a7e6SDavid S. Miller {
1739cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1740d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1741c1b4a7e6SDavid S. Miller 
174290840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17431485348dSBen Hutchings 	max_len = mss_now * max_segs;
17440e3a4803SIlpo Järvinen 
17451485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17461485348dSBen Hutchings 		return max_len;
17470e3a4803SIlpo Järvinen 
17485ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17495ea3a748SIlpo Järvinen 
17501485348dSBen Hutchings 	if (max_len <= needed)
17511485348dSBen Hutchings 		return max_len;
17520e3a4803SIlpo Järvinen 
1753d4589926SEric Dumazet 	partial = needed % mss_now;
1754d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1755d4589926SEric Dumazet 	 * to include this last segment in this skb.
1756d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1757d4589926SEric Dumazet 	 */
1758cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1759d4589926SEric Dumazet 		return needed - partial;
1760d4589926SEric Dumazet 
1761d4589926SEric Dumazet 	return needed;
1762c1b4a7e6SDavid S. Miller }
1763c1b4a7e6SDavid S. Miller 
1764c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1765c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1766c1b4a7e6SDavid S. Miller  */
1767cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1768cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1769c1b4a7e6SDavid S. Miller {
1770d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1771c1b4a7e6SDavid S. Miller 
1772c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17734de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17744de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1775c1b4a7e6SDavid S. Miller 		return 1;
1776c1b4a7e6SDavid S. Miller 
1777c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1778c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1779d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1780c1b4a7e6SDavid S. Miller 		return 0;
1781d649a7a8SEric Dumazet 
1782d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1783d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1784d649a7a8SEric Dumazet 	 */
1785d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1786d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1787c1b4a7e6SDavid S. Miller }
1788c1b4a7e6SDavid S. Miller 
1789b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
179067edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1791c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1792c1b4a7e6SDavid S. Miller  */
17935bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1794c1b4a7e6SDavid S. Miller {
1795c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1796c1b4a7e6SDavid S. Miller 
1797f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
17985bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1799c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1800c1b4a7e6SDavid S. Miller 	}
1801c1b4a7e6SDavid S. Miller 	return tso_segs;
1802c1b4a7e6SDavid S. Miller }
1803c1b4a7e6SDavid S. Miller 
1804c1b4a7e6SDavid S. Miller 
1805a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1806c1b4a7e6SDavid S. Miller  * sent now.
1807c1b4a7e6SDavid S. Miller  */
1808a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1809c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1810c1b4a7e6SDavid S. Miller {
1811c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1812c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1813c1b4a7e6SDavid S. Miller 	 *
1814c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1815c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1816c1b4a7e6SDavid S. Miller 	 */
1817c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1818a2a385d6SEric Dumazet 		return true;
1819c1b4a7e6SDavid S. Miller 
18209b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18219b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1822a2a385d6SEric Dumazet 		return true;
1823c1b4a7e6SDavid S. Miller 
1824cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1825a2a385d6SEric Dumazet 		return true;
1826c1b4a7e6SDavid S. Miller 
1827a2a385d6SEric Dumazet 	return false;
1828c1b4a7e6SDavid S. Miller }
1829c1b4a7e6SDavid S. Miller 
1830c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1831a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1832a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1833056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1834c1b4a7e6SDavid S. Miller {
1835c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1836c1b4a7e6SDavid S. Miller 
1837c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1838c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1839c1b4a7e6SDavid S. Miller 
184090840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1841c1b4a7e6SDavid S. Miller }
1842c1b4a7e6SDavid S. Miller 
1843c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1844c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1845c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1846c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1847c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1848c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1849c1b4a7e6SDavid S. Miller  */
185075c119afSEric Dumazet static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
185175c119afSEric Dumazet 			struct sk_buff *skb, unsigned int len,
1852c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1853c1b4a7e6SDavid S. Miller {
1854c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1855c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
18569ce01461SIlpo Järvinen 	u8 flags;
1857c1b4a7e6SDavid S. Miller 
1858c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1859c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
186075c119afSEric Dumazet 		return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
1861c1b4a7e6SDavid S. Miller 
1862eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
186351456b29SIan Morris 	if (unlikely(!buff))
1864c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1865c1b4a7e6SDavid S. Miller 
18663ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18673ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1868b60b49eaSHerbert Xu 	buff->truesize += nlen;
1869c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1870c1b4a7e6SDavid S. Miller 
1871c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1872c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1873c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1874c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1875c1b4a7e6SDavid S. Miller 
1876c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18774de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18784de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18794de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1880c1b4a7e6SDavid S. Miller 
1881c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1882c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1883c1b4a7e6SDavid S. Miller 
1884a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1885a166140eSMartin KaFai Lau 
188698be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1887c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1888490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1889c1b4a7e6SDavid S. Miller 
1890c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
18915bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
18925bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1893c1b4a7e6SDavid S. Miller 
1894c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1895f4a775d1SEric Dumazet 	__skb_header_release(buff);
189675c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1897c1b4a7e6SDavid S. Miller 
1898c1b4a7e6SDavid S. Miller 	return 0;
1899c1b4a7e6SDavid S. Miller }
1900c1b4a7e6SDavid S. Miller 
1901c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1902c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1903c1b4a7e6SDavid S. Miller  *
1904c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1905c1b4a7e6SDavid S. Miller  */
1906ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907f9bfe4e6SEric Dumazet 				 bool *is_cwnd_limited,
1908f9bfe4e6SEric Dumazet 				 bool *is_rwnd_limited,
1909f9bfe4e6SEric Dumazet 				 u32 max_segs)
1910c1b4a7e6SDavid S. Miller {
19116687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1912f1c6ea38SEric Dumazet 	u32 send_win, cong_win, limit, in_flight;
191350c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
191450c8339eSEric Dumazet 	struct sk_buff *head;
1915ad9f4f50SEric Dumazet 	int win_divisor;
1916f1c6ea38SEric Dumazet 	s64 delta;
1917c1b4a7e6SDavid S. Miller 
191899d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1919ae8064acSJohn Heffner 		goto send_now;
1920ae8064acSJohn Heffner 
19215f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
1922a682850aSEric Dumazet 	 * only if the last write was recent (1 ms).
1923a682850aSEric Dumazet 	 * Note that tp->tcp_wstamp_ns can be in the future if we have
1924a682850aSEric Dumazet 	 * packets waiting in a qdisc or device for EDT delivery.
19255f852eb5SEric Dumazet 	 */
1926a682850aSEric Dumazet 	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1927a682850aSEric Dumazet 	if (delta > 0)
1928ae8064acSJohn Heffner 		goto send_now;
1929908a75c1SDavid S. Miller 
1930c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1931c1b4a7e6SDavid S. Miller 
1932c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1933c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1934c1b4a7e6SDavid S. Miller 
193590840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1936c1b4a7e6SDavid S. Miller 
1937c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1938c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1939c1b4a7e6SDavid S. Miller 
1940c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1941c1b4a7e6SDavid S. Miller 
1942ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1943605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1944ae8064acSJohn Heffner 		goto send_now;
1945ba244fe9SDavid S. Miller 
194662ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
194762ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
194862ad2761SIlpo Järvinen 		goto send_now;
194962ad2761SIlpo Järvinen 
19505bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1951ad9f4f50SEric Dumazet 	if (win_divisor) {
1952c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1953c1b4a7e6SDavid S. Miller 
1954c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1955c1b4a7e6SDavid S. Miller 		 * just use it.
1956c1b4a7e6SDavid S. Miller 		 */
1957ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1958c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1959ae8064acSJohn Heffner 			goto send_now;
1960c1b4a7e6SDavid S. Miller 	} else {
1961c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1962c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1963c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1964c1b4a7e6SDavid S. Miller 		 * then send now.
1965c1b4a7e6SDavid S. Miller 		 */
19666b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1967ae8064acSJohn Heffner 			goto send_now;
1968c1b4a7e6SDavid S. Miller 	}
1969c1b4a7e6SDavid S. Miller 
197075c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
197175c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
197275c119afSEric Dumazet 	if (!head)
197375c119afSEric Dumazet 		goto send_now;
1974f1c6ea38SEric Dumazet 	delta = tp->tcp_clock_cache - head->tstamp;
197550c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
1976f1c6ea38SEric Dumazet 	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
197750c8339eSEric Dumazet 		goto send_now;
197850c8339eSEric Dumazet 
1979f9bfe4e6SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1980f9bfe4e6SEric Dumazet 	 * Three cases are tracked :
1981f9bfe4e6SEric Dumazet 	 * 1) We are cwnd-limited
1982f9bfe4e6SEric Dumazet 	 * 2) We are rwnd-limited
1983f9bfe4e6SEric Dumazet 	 * 3) We are application limited.
1984f9bfe4e6SEric Dumazet 	 */
1985f9bfe4e6SEric Dumazet 	if (cong_win < send_win) {
1986f9bfe4e6SEric Dumazet 		if (cong_win <= skb->len) {
1987ca8a2263SNeal Cardwell 			*is_cwnd_limited = true;
1988f9bfe4e6SEric Dumazet 			return true;
1989f9bfe4e6SEric Dumazet 		}
1990f9bfe4e6SEric Dumazet 	} else {
1991f9bfe4e6SEric Dumazet 		if (send_win <= skb->len) {
1992f9bfe4e6SEric Dumazet 			*is_rwnd_limited = true;
1993f9bfe4e6SEric Dumazet 			return true;
1994f9bfe4e6SEric Dumazet 		}
1995f9bfe4e6SEric Dumazet 	}
1996f9bfe4e6SEric Dumazet 
1997f9bfe4e6SEric Dumazet 	/* If this packet won't get more data, do not wait. */
1998*d8ed257fSEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
1999*d8ed257fSEric Dumazet 	    TCP_SKB_CB(skb)->eor)
2000f9bfe4e6SEric Dumazet 		goto send_now;
2001ca8a2263SNeal Cardwell 
2002a2a385d6SEric Dumazet 	return true;
2003ae8064acSJohn Heffner 
2004ae8064acSJohn Heffner send_now:
2005a2a385d6SEric Dumazet 	return false;
2006c1b4a7e6SDavid S. Miller }
2007c1b4a7e6SDavid S. Miller 
200805cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
200905cbc0dbSFan Du {
201005cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
201105cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
201205cbc0dbSFan Du 	struct net *net = sock_net(sk);
201305cbc0dbSFan Du 	u32 interval;
201405cbc0dbSFan Du 	s32 delta;
201505cbc0dbSFan Du 
201605cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
2017c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
201805cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
201905cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
202005cbc0dbSFan Du 
202105cbc0dbSFan Du 		/* Update current search range */
202205cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
202305cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
202405cbc0dbSFan Du 			sizeof(struct tcphdr) +
202505cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
202605cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
202705cbc0dbSFan Du 
202805cbc0dbSFan Du 		/* Update probe time stamp */
2029c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
203005cbc0dbSFan Du 	}
203105cbc0dbSFan Du }
203205cbc0dbSFan Du 
2033808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2034808cf9e3SIlya Lesokhin {
2035808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2036808cf9e3SIlya Lesokhin 
2037808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2038808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2039808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2040808cf9e3SIlya Lesokhin 			break;
2041808cf9e3SIlya Lesokhin 
2042808cf9e3SIlya Lesokhin 		if (unlikely(TCP_SKB_CB(skb)->eor))
2043808cf9e3SIlya Lesokhin 			return false;
2044808cf9e3SIlya Lesokhin 
2045808cf9e3SIlya Lesokhin 		len -= skb->len;
2046808cf9e3SIlya Lesokhin 	}
2047808cf9e3SIlya Lesokhin 
2048808cf9e3SIlya Lesokhin 	return true;
2049808cf9e3SIlya Lesokhin }
2050808cf9e3SIlya Lesokhin 
20515d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
205267edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
205367edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
205467edfef7SAndi Kleen  * changes resulting in larger path MTUs.
205567edfef7SAndi Kleen  *
20565d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20575d424d5aSJohn Heffner  *         1 if a probe was sent,
2058056834d9SIlpo Järvinen  *         -1 otherwise
2059056834d9SIlpo Järvinen  */
20605d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20615d424d5aSJohn Heffner {
20625d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
206312a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20645d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20656b58e0a5SFan Du 	struct net *net = sock_net(sk);
20665d424d5aSJohn Heffner 	int probe_size;
206791cc17c0SIlpo Järvinen 	int size_needed;
206812a59abcSEric Dumazet 	int copy, len;
20695d424d5aSJohn Heffner 	int mss_now;
20706b58e0a5SFan Du 	int interval;
20715d424d5aSJohn Heffner 
20725d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20735d424d5aSJohn Heffner 	 * not in recovery,
20745d424d5aSJohn Heffner 	 * have enough cwnd, and
207512a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
207612a59abcSEric Dumazet 	 */
207712a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20785d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20795d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20805d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
208112a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20825d424d5aSJohn Heffner 		return -1;
20835d424d5aSJohn Heffner 
20846b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20856b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20866b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20876b58e0a5SFan Du 	 */
20880c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
20896b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
20906b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
209191cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
20926b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
209305cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
209405cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
209505cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
209605cbc0dbSFan Du 	 */
20976b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
209805cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
209905cbc0dbSFan Du 		/* Check whether enough time has elaplased for
210005cbc0dbSFan Du 		 * another round of probing.
210105cbc0dbSFan Du 		 */
210205cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
21035d424d5aSJohn Heffner 		return -1;
21045d424d5aSJohn Heffner 	}
21055d424d5aSJohn Heffner 
21065d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
21077f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
21085d424d5aSJohn Heffner 		return -1;
21095d424d5aSJohn Heffner 
211091cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
21115d424d5aSJohn Heffner 		return -1;
211290840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
21135d424d5aSJohn Heffner 		return 0;
21145d424d5aSJohn Heffner 
2115d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2116d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2117d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
21185d424d5aSJohn Heffner 			return -1;
21195d424d5aSJohn Heffner 		else
21205d424d5aSJohn Heffner 			return 0;
21215d424d5aSJohn Heffner 	}
21225d424d5aSJohn Heffner 
2123808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2124808cf9e3SIlya Lesokhin 		return -1;
2125808cf9e3SIlya Lesokhin 
21265d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2127eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
212851456b29SIan Morris 	if (!nskb)
21295d424d5aSJohn Heffner 		return -1;
21303ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
21313ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21325d424d5aSJohn Heffner 
2133fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
21345d424d5aSJohn Heffner 
21355d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21365d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21374de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21385d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21395d424d5aSJohn Heffner 	nskb->csum = 0;
214098be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21415d424d5aSJohn Heffner 
214250c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21432b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
214450c4817eSIlpo Järvinen 
21455d424d5aSJohn Heffner 	len = 0;
2146234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21475d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21485d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21495d424d5aSJohn Heffner 
21505d424d5aSJohn Heffner 		if (skb->len <= copy) {
21515d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21525d424d5aSJohn Heffner 			 * Throw it away. */
21534de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2154808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2155808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2156808cf9e3SIlya Lesokhin 			 */
2157808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2158fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21593ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21605d424d5aSJohn Heffner 		} else {
21614de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2162a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21635d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21645d424d5aSJohn Heffner 				skb_pull(skb, copy);
21655d424d5aSJohn Heffner 			} else {
21665d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21675bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21685d424d5aSJohn Heffner 			}
21695d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21705d424d5aSJohn Heffner 		}
21715d424d5aSJohn Heffner 
21725d424d5aSJohn Heffner 		len += copy;
2173234b6860SIlpo Järvinen 
2174234b6860SIlpo Järvinen 		if (len >= probe_size)
2175234b6860SIlpo Järvinen 			break;
21765d424d5aSJohn Heffner 	}
21775bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21785d424d5aSJohn Heffner 
21795d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21807faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21817faee5c0SEric Dumazet 	 */
21825d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21835d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21845d424d5aSJohn Heffner 		 * effectively two packets. */
21855d424d5aSJohn Heffner 		tp->snd_cwnd--;
218666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21875d424d5aSJohn Heffner 
21885d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
21890e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
21900e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
21915d424d5aSJohn Heffner 
21925d424d5aSJohn Heffner 		return 1;
21935d424d5aSJohn Heffner 	}
21945d424d5aSJohn Heffner 
21955d424d5aSJohn Heffner 	return -1;
21965d424d5aSJohn Heffner }
21975d424d5aSJohn Heffner 
2198864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2199218af599SEric Dumazet {
2200864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2201864e5c09SEric Dumazet 
2202864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2203864e5c09SEric Dumazet 		return false;
2204864e5c09SEric Dumazet 
2205864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2206864e5c09SEric Dumazet 		return false;
2207864e5c09SEric Dumazet 
2208864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2209864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2210864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2211864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2212864e5c09SEric Dumazet 		sock_hold(sk);
2213864e5c09SEric Dumazet 	}
2214864e5c09SEric Dumazet 	return true;
2215218af599SEric Dumazet }
2216218af599SEric Dumazet 
2217f9616c35SEric Dumazet /* TCP Small Queues :
2218f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2219f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2220f9616c35SEric Dumazet  * This allows for :
2221f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2222f9616c35SEric Dumazet  *  - faster recovery
2223f9616c35SEric Dumazet  *  - high rates
2224f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2225f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2226f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2227f9616c35SEric Dumazet  */
2228f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2229f9616c35SEric Dumazet 				  unsigned int factor)
2230f9616c35SEric Dumazet {
223176a9ebe8SEric Dumazet 	unsigned long limit;
2232f9616c35SEric Dumazet 
223376a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
223476a9ebe8SEric Dumazet 		      2 * skb->truesize,
223576a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift);
2236c73e5807SEric Dumazet 	if (sk->sk_pacing_status == SK_PACING_NONE)
223776a9ebe8SEric Dumazet 		limit = min_t(unsigned long, limit,
22389184d8bbSEric Dumazet 			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2239f9616c35SEric Dumazet 	limit <<= factor;
2240f9616c35SEric Dumazet 
224114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
224275c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
224375eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
224475eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
224575eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
224675eefc6cSEric Dumazet 		 */
224775c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
224875eefc6cSEric Dumazet 			return false;
224975eefc6cSEric Dumazet 
22507aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2251f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2252f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2253f9616c35SEric Dumazet 		 * test again the condition.
2254f9616c35SEric Dumazet 		 */
2255f9616c35SEric Dumazet 		smp_mb__after_atomic();
225614afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2257f9616c35SEric Dumazet 			return true;
2258f9616c35SEric Dumazet 	}
2259f9616c35SEric Dumazet 	return false;
2260f9616c35SEric Dumazet }
2261f9616c35SEric Dumazet 
226205b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
226305b055e8SFrancis Yan {
2264628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2265efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
226605b055e8SFrancis Yan 
2267efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2268efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
226905b055e8SFrancis Yan 	tp->chrono_start = now;
227005b055e8SFrancis Yan 	tp->chrono_type = new;
227105b055e8SFrancis Yan }
227205b055e8SFrancis Yan 
227305b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
227405b055e8SFrancis Yan {
227505b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
227605b055e8SFrancis Yan 
227705b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
22780f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
22790f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
228005b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
228105b055e8SFrancis Yan 	 */
228205b055e8SFrancis Yan 	if (type > tp->chrono_type)
228305b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
228405b055e8SFrancis Yan }
228505b055e8SFrancis Yan 
228605b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
228705b055e8SFrancis Yan {
228805b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
228905b055e8SFrancis Yan 
22900f87230dSFrancis Yan 
22910f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
22920f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
22930f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
22940f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
22950f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
22960f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
22970f87230dSFrancis Yan 	 */
229875c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
229905b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
23000f87230dSFrancis Yan 	else if (type == tp->chrono_type)
23010f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
230205b055e8SFrancis Yan }
230305b055e8SFrancis Yan 
23041da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
23051da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
23061da177e4SLinus Torvalds  * window for us.
23071da177e4SLinus Torvalds  *
2308f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2309f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2310f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2311f8269a49SIlpo Järvinen  *
23126ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
23136ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
23146ba8a3b1SNandita Dukkipati 
2315a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2316a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
23171da177e4SLinus Torvalds  */
2318a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2319d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
23201da177e4SLinus Torvalds {
23211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
232292df7b51SDavid S. Miller 	struct sk_buff *skb;
2323c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2324c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23255d424d5aSJohn Heffner 	int result;
23265615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2327605ad7f1SEric Dumazet 	u32 max_segs;
23281da177e4SLinus Torvalds 
2329c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23305d424d5aSJohn Heffner 
2331ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2332d5dd9175SIlpo Järvinen 	if (!push_one) {
23335d424d5aSJohn Heffner 		/* Do MTU probing. */
2334d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2335d5dd9175SIlpo Järvinen 		if (!result) {
2336a2a385d6SEric Dumazet 			return false;
23375d424d5aSJohn Heffner 		} else if (result > 0) {
23385d424d5aSJohn Heffner 			sent_pkts = 1;
23395d424d5aSJohn Heffner 		}
2340d5dd9175SIlpo Järvinen 	}
23415d424d5aSJohn Heffner 
2342ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2343fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2344c8ac3774SHerbert Xu 		unsigned int limit;
2345c8ac3774SHerbert Xu 
234679861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
234779861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
234879861919SEric Dumazet 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
234979861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
235079861919SEric Dumazet 			goto repair; /* Skip network transmission */
235179861919SEric Dumazet 		}
235279861919SEric Dumazet 
2353218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2354218af599SEric Dumazet 			break;
2355218af599SEric Dumazet 
23565bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2357c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2358c1b4a7e6SDavid S. Miller 
2359b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
23606ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
23616ba8a3b1SNandita Dukkipati 			if (push_one == 2)
23626ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
23636ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23646ba8a3b1SNandita Dukkipati 			else
2365b68e9f85SHerbert Xu 				break;
23666ba8a3b1SNandita Dukkipati 		}
2367b68e9f85SHerbert Xu 
23685615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23695615f886SFrancis Yan 			is_rwnd_limited = true;
2370b68e9f85SHerbert Xu 			break;
23715615f886SFrancis Yan 		}
2372b68e9f85SHerbert Xu 
2373d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2374aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2375aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2376aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2377aa93466bSDavid S. Miller 				break;
2378c1b4a7e6SDavid S. Miller 		} else {
2379ca8a2263SNeal Cardwell 			if (!push_one &&
2380605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2381f9bfe4e6SEric Dumazet 						 &is_rwnd_limited, max_segs))
2382aa93466bSDavid S. Miller 				break;
2383c1b4a7e6SDavid S. Miller 		}
2384aa93466bSDavid S. Miller 
2385605ad7f1SEric Dumazet 		limit = mss_now;
2386d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2387605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2388605ad7f1SEric Dumazet 						    min_t(unsigned int,
2389605ad7f1SEric Dumazet 							  cwnd_quota,
2390605ad7f1SEric Dumazet 							  max_segs),
2391605ad7f1SEric Dumazet 						    nonagle);
2392605ad7f1SEric Dumazet 
2393605ad7f1SEric Dumazet 		if (skb->len > limit &&
239475c119afSEric Dumazet 		    unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
239575c119afSEric Dumazet 					  skb, limit, mss_now, gfp)))
2396605ad7f1SEric Dumazet 			break;
2397605ad7f1SEric Dumazet 
2398f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
239946d3ceabSEric Dumazet 			break;
2400c9eeec26SEric Dumazet 
2401d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
24021da177e4SLinus Torvalds 			break;
24031da177e4SLinus Torvalds 
2404ec342325SAndrew Vagin repair:
24051da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
24061da177e4SLinus Torvalds 		 * This call will increment packets_out.
24071da177e4SLinus Torvalds 		 */
240866f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
24091da177e4SLinus Torvalds 
24101da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2411a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2412d5dd9175SIlpo Järvinen 
2413d5dd9175SIlpo Järvinen 		if (push_one)
2414d5dd9175SIlpo Järvinen 			break;
24151da177e4SLinus Torvalds 	}
24161da177e4SLinus Torvalds 
24175615f886SFrancis Yan 	if (is_rwnd_limited)
24185615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
24195615f886SFrancis Yan 	else
24205615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
24215615f886SFrancis Yan 
2422aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2423684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2424684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24256ba8a3b1SNandita Dukkipati 
24266ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24276ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2428ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2429d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2430ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2431a2a385d6SEric Dumazet 		return false;
24321da177e4SLinus Torvalds 	}
243375c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24346ba8a3b1SNandita Dukkipati }
24356ba8a3b1SNandita Dukkipati 
2436ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24376ba8a3b1SNandita Dukkipati {
24386ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24396ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2440a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24412ae21cf5SEric Dumazet 	int early_retrans;
24426ba8a3b1SNandita Dukkipati 
24436ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24446ba8a3b1SNandita Dukkipati 	 * finishes.
24456ba8a3b1SNandita Dukkipati 	 */
2446f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
24476ba8a3b1SNandita Dukkipati 		return false;
24486ba8a3b1SNandita Dukkipati 
24492ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
24506ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2451b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
24526ba8a3b1SNandita Dukkipati 	 */
24532ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2454bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2455b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2456b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
24576ba8a3b1SNandita Dukkipati 		return false;
24586ba8a3b1SNandita Dukkipati 
2459bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2460f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2461f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24626ba8a3b1SNandita Dukkipati 	 */
2463bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2464bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24656ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2466bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2467bb4d991aSYuchung Cheng 		else
2468bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2469bb4d991aSYuchung Cheng 	} else {
2470bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2471bb4d991aSYuchung Cheng 	}
24726ba8a3b1SNandita Dukkipati 
2473a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2474ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2475ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2476ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2477a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2478a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
24796ba8a3b1SNandita Dukkipati 
24803f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
24813f80e08fSEric Dumazet 			     TCP_RTO_MAX, NULL);
24826ba8a3b1SNandita Dukkipati 	return true;
24836ba8a3b1SNandita Dukkipati }
24846ba8a3b1SNandita Dukkipati 
24851f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
24861f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
24871f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
24881f3279aeSEric Dumazet  */
24891f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
24901f3279aeSEric Dumazet 				    const struct sk_buff *skb)
24911f3279aeSEric Dumazet {
249239bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2493c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
24941f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
24951f3279aeSEric Dumazet 		return true;
24961f3279aeSEric Dumazet 	}
24971f3279aeSEric Dumazet 	return false;
24981f3279aeSEric Dumazet }
24991f3279aeSEric Dumazet 
2500b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
25016ba8a3b1SNandita Dukkipati  * retransmit the last segment.
25026ba8a3b1SNandita Dukkipati  */
25036ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
25046ba8a3b1SNandita Dukkipati {
25059b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
25066ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
25076ba8a3b1SNandita Dukkipati 	int pcount;
25086ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
25096ba8a3b1SNandita Dukkipati 
2510b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
251175c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2512b340b264SYuchung Cheng 		pcount = tp->packets_out;
2513b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2514b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2515b340b264SYuchung Cheng 			goto probe_sent;
25166ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25176ba8a3b1SNandita Dukkipati 	}
251875c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
2519b2b7af86SYuchung Cheng 	if (unlikely(!skb)) {
2520b2b7af86SYuchung Cheng 		WARN_ONCE(tp->packets_out,
2521b2b7af86SYuchung Cheng 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2522b2b7af86SYuchung Cheng 			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2523b2b7af86SYuchung Cheng 		inet_csk(sk)->icsk_pending = 0;
2524b2b7af86SYuchung Cheng 		return;
2525b2b7af86SYuchung Cheng 	}
25266ba8a3b1SNandita Dukkipati 
25279b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
25289b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
25299b717a8dSNandita Dukkipati 		goto rearm_timer;
25309b717a8dSNandita Dukkipati 
25311f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25321f3279aeSEric Dumazet 		goto rearm_timer;
25331f3279aeSEric Dumazet 
25346ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25356ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25366ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25376ba8a3b1SNandita Dukkipati 
25386ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
253975c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
254075c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25416cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25426ba8a3b1SNandita Dukkipati 			goto rearm_timer;
254375c119afSEric Dumazet 		skb = skb_rb_next(skb);
25446ba8a3b1SNandita Dukkipati 	}
25456ba8a3b1SNandita Dukkipati 
25466ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25476ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25486ba8a3b1SNandita Dukkipati 
254910d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2550b340b264SYuchung Cheng 		goto rearm_timer;
25516ba8a3b1SNandita Dukkipati 
25529b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
25539b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
25549b717a8dSNandita Dukkipati 
2555b340b264SYuchung Cheng probe_sent:
2556c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2557fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2558fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2559b340b264SYuchung Cheng rearm_timer:
2560fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
25611da177e4SLinus Torvalds }
25621da177e4SLinus Torvalds 
2563a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2564a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2565a762a980SDavid S. Miller  * The socket must be locked by the caller.
2566a762a980SDavid S. Miller  */
25679e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25689e412ba7SIlpo Järvinen 			       int nonagle)
2569a762a980SDavid S. Miller {
2570726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2571726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2572726e07a8SIlpo Järvinen 	 * all will be happy.
2573726e07a8SIlpo Järvinen 	 */
2574726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2575726e07a8SIlpo Järvinen 		return;
2576726e07a8SIlpo Järvinen 
257799a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
25787450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
25799e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2580a762a980SDavid S. Miller }
2581a762a980SDavid S. Miller 
2582c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2583c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2584c1b4a7e6SDavid S. Miller  */
2585c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2586c1b4a7e6SDavid S. Miller {
2587fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2588c1b4a7e6SDavid S. Miller 
2589c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2590c1b4a7e6SDavid S. Miller 
2591d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2592c1b4a7e6SDavid S. Miller }
2593c1b4a7e6SDavid S. Miller 
25941da177e4SLinus Torvalds /* This function returns the amount that we can raise the
25951da177e4SLinus Torvalds  * usable window based on the following constraints
25961da177e4SLinus Torvalds  *
25971da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
25981da177e4SLinus Torvalds  * 2. We limit memory per socket
25991da177e4SLinus Torvalds  *
26001da177e4SLinus Torvalds  * RFC 1122:
26011da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
26021da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
26031da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
26041da177e4SLinus Torvalds  *
26051da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
26061da177e4SLinus Torvalds  * it at least MSS bytes.
26071da177e4SLinus Torvalds  *
26081da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
26091da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
26101da177e4SLinus Torvalds  *
26111da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
26121da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
26131da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
26141da177e4SLinus Torvalds  * window to always advance by a single byte.
26151da177e4SLinus Torvalds  *
26161da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
26171da177e4SLinus Torvalds  * then this will not be a problem.
26181da177e4SLinus Torvalds  *
26191da177e4SLinus Torvalds  * BSD seems to make the following compromise:
26201da177e4SLinus Torvalds  *
26211da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
26221da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
26231da177e4SLinus Torvalds  *	then set the window to 0.
26241da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
26251da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
26261da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26271da177e4SLinus Torvalds  *
26281da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26291da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26301da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26311da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26321da177e4SLinus Torvalds  * because the pipeline is full.
26331da177e4SLinus Torvalds  *
26341da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26351da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26361da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26371da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26381da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26391da177e4SLinus Torvalds  *
26401da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26411da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26421da177e4SLinus Torvalds  *
26431da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26441da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26451da177e4SLinus Torvalds  */
26461da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26471da177e4SLinus Torvalds {
2648463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
26491da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2650caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
26511da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
26521da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
26531da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
26541da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
26551da177e4SLinus Torvalds 	 */
2656463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
26571da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
265886c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
265986c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
26601da177e4SLinus Torvalds 	int window;
26611da177e4SLinus Torvalds 
266206425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
26631da177e4SLinus Torvalds 		mss = full_space;
266406425c30SEric Dumazet 		if (mss <= 0)
266506425c30SEric Dumazet 			return 0;
266606425c30SEric Dumazet 	}
2667b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2668463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26691da177e4SLinus Torvalds 
2670b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2671056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2672056834d9SIlpo Järvinen 					       4U * tp->advmss);
26731da177e4SLinus Torvalds 
267486c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
267586c1a045SFlorian Westphal 		 * increase it due to wscale.
267686c1a045SFlorian Westphal 		 */
267786c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
267886c1a045SFlorian Westphal 
267986c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
268086c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
268186c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
268286c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
268386c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
268486c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
268586c1a045SFlorian Westphal 		 */
268686c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
26871da177e4SLinus Torvalds 			return 0;
26881da177e4SLinus Torvalds 	}
26891da177e4SLinus Torvalds 
26901da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
26911da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
26921da177e4SLinus Torvalds 
26931da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
26941da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
26951da177e4SLinus Torvalds 	 */
26961da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
26971da177e4SLinus Torvalds 		window = free_space;
26981da177e4SLinus Torvalds 
26991da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
27001da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
27011da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
27021da177e4SLinus Torvalds 		 */
27031935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
27041da177e4SLinus Torvalds 	} else {
27051935299dSGao Feng 		window = tp->rcv_wnd;
27061da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
27071da177e4SLinus Torvalds 		 * Window clamp already applied above.
27081da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
27091da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
27101da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
27111da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
27121da177e4SLinus Torvalds 		 * is too small.
27131da177e4SLinus Torvalds 		 */
27141da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
27151935299dSGao Feng 			window = rounddown(free_space, mss);
271684565070SJohn Heffner 		else if (mss == full_space &&
2717b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
271884565070SJohn Heffner 			window = free_space;
27191da177e4SLinus Torvalds 	}
27201da177e4SLinus Torvalds 
27211da177e4SLinus Torvalds 	return window;
27221da177e4SLinus Torvalds }
27231da177e4SLinus Torvalds 
2724cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2725082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2726082ac2d5SMartin KaFai Lau {
27270a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27280a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27290a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2730082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2731082ac2d5SMartin KaFai Lau 
27320a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2733082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27342de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27352de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2736082ac2d5SMartin KaFai Lau 	}
2737082ac2d5SMartin KaFai Lau }
2738082ac2d5SMartin KaFai Lau 
27394a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2740f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27411da177e4SLinus Torvalds {
27421da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
274375c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
274413dde04fSWei Yongjun 	int next_skb_size;
27451da177e4SLinus Torvalds 
2746058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27471da177e4SLinus Torvalds 
2748058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
27491da177e4SLinus Torvalds 
2750f8071cdeSEric Dumazet 	if (next_skb_size) {
2751f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2752f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2753f8071cdeSEric Dumazet 				      next_skb_size);
2754f8071cdeSEric Dumazet 		else if (!skb_shift(skb, next_skb, next_skb_size))
2755f8071cdeSEric Dumazet 			return false;
2756f8071cdeSEric Dumazet 	}
27572b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2758a6963a6bSIlpo Järvinen 
27591da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27601da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27611da177e4SLinus Torvalds 
2762e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27634de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27641da177e4SLinus Torvalds 
27651da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27661da177e4SLinus Torvalds 	 * packet counting does not break.
27671da177e4SLinus Torvalds 	 */
27684828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2769a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2770b7689205SIlpo Järvinen 
2771b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2772ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2773ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2774ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2775b7689205SIlpo Järvinen 
2776797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2777797108d1SIlpo Järvinen 
2778082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2779082ac2d5SMartin KaFai Lau 
278075c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2781f8071cdeSEric Dumazet 	return true;
27821da177e4SLinus Torvalds }
27831da177e4SLinus Torvalds 
278467edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2785a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
27864a17fc3aSIlpo Järvinen {
27874a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2788a2a385d6SEric Dumazet 		return false;
27894a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2790a2a385d6SEric Dumazet 		return false;
27912331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
27924a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2793a2a385d6SEric Dumazet 		return false;
27944a17fc3aSIlpo Järvinen 
2795a2a385d6SEric Dumazet 	return true;
27964a17fc3aSIlpo Järvinen }
27974a17fc3aSIlpo Järvinen 
279867edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
279967edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
280067edfef7SAndi Kleen  */
28014a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
28024a17fc3aSIlpo Järvinen 				     int space)
28034a17fc3aSIlpo Järvinen {
28044a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
28054a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2806a2a385d6SEric Dumazet 	bool first = true;
28074a17fc3aSIlpo Järvinen 
2808e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
28094a17fc3aSIlpo Järvinen 		return;
28104de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
28114a17fc3aSIlpo Järvinen 		return;
28124a17fc3aSIlpo Järvinen 
281375c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
28144a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
28154a17fc3aSIlpo Järvinen 			break;
28164a17fc3aSIlpo Järvinen 
2817a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2818a643b5d4SMartin KaFai Lau 			break;
2819a643b5d4SMartin KaFai Lau 
28204a17fc3aSIlpo Järvinen 		space -= skb->len;
28214a17fc3aSIlpo Järvinen 
28224a17fc3aSIlpo Järvinen 		if (first) {
2823a2a385d6SEric Dumazet 			first = false;
28244a17fc3aSIlpo Järvinen 			continue;
28254a17fc3aSIlpo Järvinen 		}
28264a17fc3aSIlpo Järvinen 
28274a17fc3aSIlpo Järvinen 		if (space < 0)
28284a17fc3aSIlpo Järvinen 			break;
28294a17fc3aSIlpo Järvinen 
28304a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28314a17fc3aSIlpo Järvinen 			break;
28324a17fc3aSIlpo Järvinen 
2833f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2834f8071cdeSEric Dumazet 			break;
28354a17fc3aSIlpo Järvinen 	}
28364a17fc3aSIlpo Järvinen }
28374a17fc3aSIlpo Järvinen 
28381da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28391da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28401da177e4SLinus Torvalds  * error occurred which prevented the send.
28411da177e4SLinus Torvalds  */
284210d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28431da177e4SLinus Torvalds {
28445d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
284510d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28467d227cd2SSridhar Samudrala 	unsigned int cur_mss;
284710d3be56SEric Dumazet 	int diff, len, err;
28481da177e4SLinus Torvalds 
284910d3be56SEric Dumazet 
285010d3be56SEric Dumazet 	/* Inconclusive MTU probe */
285110d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
28525d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
28535d424d5aSJohn Heffner 
28541da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2855caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
28561da177e4SLinus Torvalds 	 */
285714afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2858ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2859ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28601da177e4SLinus Torvalds 		return -EAGAIN;
28611da177e4SLinus Torvalds 
28621f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28631f3279aeSEric Dumazet 		return -EBUSY;
28641f3279aeSEric Dumazet 
28651da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28667f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
28677f582b24SEric Dumazet 			WARN_ON_ONCE(1);
28687f582b24SEric Dumazet 			return -EINVAL;
28697f582b24SEric Dumazet 		}
28701da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28711da177e4SLinus Torvalds 			return -ENOMEM;
28721da177e4SLinus Torvalds 	}
28731da177e4SLinus Torvalds 
28747d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28757d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
28767d227cd2SSridhar Samudrala 
28770c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
28787d227cd2SSridhar Samudrala 
28791da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
28801da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
28811da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
28821da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
28831da177e4SLinus Torvalds 	 */
28849d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
28859d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
28861da177e4SLinus Torvalds 		return -EAGAIN;
28871da177e4SLinus Torvalds 
288810d3be56SEric Dumazet 	len = cur_mss * segs;
288910d3be56SEric Dumazet 	if (skb->len > len) {
289075c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
289175c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
28921da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
289302276f3cSIlpo Järvinen 	} else {
2894c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2895c52e2421SEric Dumazet 			return -ENOMEM;
289610d3be56SEric Dumazet 
289710d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
289810d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
289910d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
290010d3be56SEric Dumazet 		if (diff)
290110d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
290210d3be56SEric Dumazet 		if (skb->len < cur_mss)
290310d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
29041da177e4SLinus Torvalds 	}
29051da177e4SLinus Torvalds 
290649213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
290749213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
290849213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
290949213555SDaniel Borkmann 
2910678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2911678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2912678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2913678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2914678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2915678550c6SYuchung Cheng 	tp->total_retrans += segs;
2916fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2917678550c6SYuchung Cheng 
291850bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
291950bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
292050bceae9SThomas Graf 	 * beyond what csum_start can cover.
292150bceae9SThomas Graf 	 */
292250bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
292350bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
292410a81980SEric Dumazet 		struct sk_buff *nskb;
292510a81980SEric Dumazet 
2926e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
292710a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2928c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2929117632e6SEric Dumazet 				     -ENOBUFS;
2930e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2931e2080072SEric Dumazet 
29325889e2c0SYousuk Seung 		if (!err) {
2933a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
29345889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29355889e2c0SYousuk Seung 		}
2936117632e6SEric Dumazet 	} else {
2937c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2938117632e6SEric Dumazet 	}
2939c84a5711SYuchung Cheng 
2940a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2941a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2942a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2943a31ad29eSLawrence Brakmo 
2944fc9f3501SEric Dumazet 	if (likely(!err)) {
2945c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2946e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2947678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2948ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
2949fc9f3501SEric Dumazet 	}
2950c84a5711SYuchung Cheng 	return err;
295193b174adSYuchung Cheng }
295293b174adSYuchung Cheng 
295310d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
295493b174adSYuchung Cheng {
295593b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
295610d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
29571da177e4SLinus Torvalds 
29581da177e4SLinus Torvalds 	if (err == 0) {
29591da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
29601da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2961e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
29621da177e4SLinus Torvalds 		}
29631da177e4SLinus Torvalds #endif
29641da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
29651da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
29661da177e4SLinus Torvalds 
29671da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
29681da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
29697faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
29701da177e4SLinus Torvalds 
29711da177e4SLinus Torvalds 	}
29726e08d5e3SYuchung Cheng 
29736e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
29746e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
29756e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
29761da177e4SLinus Torvalds 	return err;
29771da177e4SLinus Torvalds }
29781da177e4SLinus Torvalds 
29791da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
29801da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
29811da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
29821da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
29831da177e4SLinus Torvalds  */
29841da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
29851da177e4SLinus Torvalds {
29866687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2987b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
29881da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2989840a3cbeSYuchung Cheng 	u32 max_segs;
299061eb55f4SIlpo Järvinen 	int mib_idx;
29916a438bbeSStephen Hemminger 
299245e77d31SIlpo Järvinen 	if (!tp->packets_out)
299345e77d31SIlpo Järvinen 		return;
299445e77d31SIlpo Järvinen 
299575c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
2996b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
2997ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
299875c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
2999dca0aaf8SEric Dumazet 		__u8 sacked;
300010d3be56SEric Dumazet 		int segs;
30011da177e4SLinus Torvalds 
3002218af599SEric Dumazet 		if (tcp_pacing_check(sk))
3003218af599SEric Dumazet 			break;
3004218af599SEric Dumazet 
30056a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
300651456b29SIan Morris 		if (!hole)
30076a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
30086a438bbeSStephen Hemminger 
300910d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
301010d3be56SEric Dumazet 		if (segs <= 0)
30111da177e4SLinus Torvalds 			return;
3012dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
3013a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
3014a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
3015a3d2e9f8SEric Dumazet 		 */
3016a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
30170e1c54c2SIlpo Järvinen 
3018840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
3019006f582cSIlpo Järvinen 			break;
30200e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
302151456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
30220e1c54c2SIlpo Järvinen 				hole = skb;
302361eb55f4SIlpo Järvinen 			continue;
30241da177e4SLinus Torvalds 
30250e1c54c2SIlpo Järvinen 		} else {
30260e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30270e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30280e1c54c2SIlpo Järvinen 			else
30290e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30300e1c54c2SIlpo Järvinen 		}
30310e1c54c2SIlpo Järvinen 
30320e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
303361eb55f4SIlpo Järvinen 			continue;
303440b215e5SPavel Emelyanov 
3035f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3036f9616c35SEric Dumazet 			return;
3037f9616c35SEric Dumazet 
303810d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30391da177e4SLinus Torvalds 			return;
304024ab6becSYuchung Cheng 
3041de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30421da177e4SLinus Torvalds 
3043684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3044a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3045a262f0cdSNandita Dukkipati 
304675c119afSEric Dumazet 		if (skb == rtx_head &&
304757dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
30483f80e08fSEric Dumazet 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30493f421baaSArnaldo Carvalho de Melo 					     inet_csk(sk)->icsk_rto,
30503f80e08fSEric Dumazet 					     TCP_RTO_MAX,
30513f80e08fSEric Dumazet 					     skb);
30521da177e4SLinus Torvalds 	}
30531da177e4SLinus Torvalds }
30541da177e4SLinus Torvalds 
3055d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3056d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3057845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3058845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3059a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3060a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3061d83769a5SEric Dumazet  */
3062a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3063d83769a5SEric Dumazet {
3064e805605cSJohannes Weiner 	int amt;
3065d83769a5SEric Dumazet 
3066d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3067d83769a5SEric Dumazet 		return;
3068d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3069d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3070e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3071e805605cSJohannes Weiner 
3072baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3073baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3074d83769a5SEric Dumazet }
3075d83769a5SEric Dumazet 
3076845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3077845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
30781da177e4SLinus Torvalds  */
30791da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
30801da177e4SLinus Torvalds {
3081845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
30821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30831da177e4SLinus Torvalds 
3084845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3085845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3086845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3087845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
30881da177e4SLinus Torvalds 	 */
308975c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
309075c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
309175c119afSEric Dumazet 
309275c119afSEric Dumazet 	if (tskb) {
3093845704a5SEric Dumazet coalesce:
3094845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3095845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
30961da177e4SLinus Torvalds 		tp->write_seq++;
309775c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3098845704a5SEric Dumazet 			/* This means tskb was already sent.
3099845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3100845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3101845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3102845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3103845704a5SEric Dumazet 			 */
3104845704a5SEric Dumazet 			tp->snd_nxt++;
3105845704a5SEric Dumazet 			return;
3106845704a5SEric Dumazet 		}
31071da177e4SLinus Torvalds 	} else {
3108845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3109845704a5SEric Dumazet 		if (unlikely(!skb)) {
3110845704a5SEric Dumazet 			if (tskb)
3111845704a5SEric Dumazet 				goto coalesce;
3112845704a5SEric Dumazet 			return;
31131da177e4SLinus Torvalds 		}
3114e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3115d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3116a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
31171da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3118e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3119a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
31201da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
31211da177e4SLinus Torvalds 	}
3122845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
31231da177e4SLinus Torvalds }
31241da177e4SLinus Torvalds 
31251da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
31261da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
31271da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
312865bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31291da177e4SLinus Torvalds  */
3130dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31311da177e4SLinus Torvalds {
31321da177e4SLinus Torvalds 	struct sk_buff *skb;
31331da177e4SLinus Torvalds 
31347cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31357cc2b043SGao Feng 
31361da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31371da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31381da177e4SLinus Torvalds 	if (!skb) {
31394e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31401da177e4SLinus Torvalds 		return;
31411da177e4SLinus Torvalds 	}
31421da177e4SLinus Torvalds 
31431da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31441da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3145e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3146a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31479a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
31481da177e4SLinus Torvalds 	/* Send it off. */
3149dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
31504e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3151c24b14c4SSong Liu 
3152c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3153c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3154c24b14c4SSong Liu 	 */
3155c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
31561da177e4SLinus Torvalds }
31571da177e4SLinus Torvalds 
315867edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
315967edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
31601da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
31611da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
31621da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
31631da177e4SLinus Torvalds  */
31641da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
31651da177e4SLinus Torvalds {
31661da177e4SLinus Torvalds 	struct sk_buff *skb;
31671da177e4SLinus Torvalds 
316875c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
316951456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
317075c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31711da177e4SLinus Torvalds 		return -EFAULT;
31721da177e4SLinus Torvalds 	}
31734de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31741da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3175e2080072SEric Dumazet 			struct sk_buff *nskb;
3176e2080072SEric Dumazet 
3177e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3178e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3179e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
318051456b29SIan Morris 			if (!nskb)
31811da177e4SLinus Torvalds 				return -ENOMEM;
3182e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
318375c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3184f4a775d1SEric Dumazet 			__skb_header_release(nskb);
318575c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
31863ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
31873ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
31881da177e4SLinus Torvalds 			skb = nskb;
31891da177e4SLinus Torvalds 		}
31901da177e4SLinus Torvalds 
31914de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3192735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
31931da177e4SLinus Torvalds 	}
3194dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
31951da177e4SLinus Torvalds }
31961da177e4SLinus Torvalds 
31974aea39c1SEric Dumazet /**
31984aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
31994aea39c1SEric Dumazet  * sk: listener socket
32004aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
32014aea39c1SEric Dumazet  * req: request_sock pointer
32024aea39c1SEric Dumazet  *
32034aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
32044aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
32054aea39c1SEric Dumazet  */
32065d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3207e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3208ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3209b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
32101da177e4SLinus Torvalds {
32112e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
32125d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
321380f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
32145d062de7SEric Dumazet 	struct tcp_out_options opts;
32155d062de7SEric Dumazet 	struct sk_buff *skb;
3216bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
32175d062de7SEric Dumazet 	struct tcphdr *th;
3218f5fff5dcSTom Quetchenbach 	int mss;
32191da177e4SLinus Torvalds 
3220ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
32214aea39c1SEric Dumazet 	if (unlikely(!skb)) {
32224aea39c1SEric Dumazet 		dst_release(dst);
32231da177e4SLinus Torvalds 		return NULL;
32244aea39c1SEric Dumazet 	}
32251da177e4SLinus Torvalds 	/* Reserve space for headers. */
32261da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32271da177e4SLinus Torvalds 
3228b3d05147SEric Dumazet 	switch (synack_type) {
3229b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32309e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3231b3d05147SEric Dumazet 		break;
3232b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3233b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3234b3d05147SEric Dumazet 		 * to avoid false sharing.
3235b3d05147SEric Dumazet 		 */
3236b3d05147SEric Dumazet 		break;
3237b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3238ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3239ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3240ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3241ca6fb065SEric Dumazet 		 */
3242ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3243b3d05147SEric Dumazet 		break;
3244ca6fb065SEric Dumazet 	}
32454aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
32461da177e4SLinus Torvalds 
32473541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3248f5fff5dcSTom Quetchenbach 
324933ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
32508b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
32518b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
3252d3edd06eSEric Dumazet 		skb->skb_mstamp_ns = cookie_init_timestamp(req);
32538b5f12d0SFlorian Westphal 	else
32548b5f12d0SFlorian Westphal #endif
3255d3edd06eSEric Dumazet 		skb->skb_mstamp_ns = tcp_clock_ns();
325680f03e27SEric Dumazet 
325780f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
325880f03e27SEric Dumazet 	rcu_read_lock();
3259fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
326080f03e27SEric Dumazet #endif
326158d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
326260e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
326360e2a778SUrsula Braun 					     foc) + sizeof(*th);
326433ad798cSAdam Langley 
3265aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3266aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
32671da177e4SLinus Torvalds 
3268ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32691da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
32701da177e4SLinus Torvalds 	th->syn = 1;
32711da177e4SLinus Torvalds 	th->ack = 1;
32726ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3273b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3274634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3275e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
32763b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
32773b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
32788336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
32798336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
32801da177e4SLinus Torvalds 
32811da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3282ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
32835d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
32841da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
328590bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3286cfb6eeb4SYOSHIFUJI Hideaki 
3287cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3288cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
328980f03e27SEric Dumazet 	if (md5)
3290bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
329139f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
329280f03e27SEric Dumazet 	rcu_read_unlock();
3293cfb6eeb4SYOSHIFUJI Hideaki #endif
3294cfb6eeb4SYOSHIFUJI Hideaki 
3295b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
32962456e855SThomas Gleixner 	skb->tstamp = 0;
32971da177e4SLinus Torvalds 	return skb;
32981da177e4SLinus Torvalds }
32994bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
33001da177e4SLinus Torvalds 
330181164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
330281164413SDaniel Borkmann {
330381164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
330481164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
330581164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
330681164413SDaniel Borkmann 
330781164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
330881164413SDaniel Borkmann 		return;
330981164413SDaniel Borkmann 
331081164413SDaniel Borkmann 	rcu_read_lock();
331181164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
331281164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
331381164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
331481164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
331581164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
331681164413SDaniel Borkmann 	}
331781164413SDaniel Borkmann 	rcu_read_unlock();
331881164413SDaniel Borkmann }
331981164413SDaniel Borkmann 
332067edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3321f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
33221da177e4SLinus Torvalds {
3323cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
33241da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33251da177e4SLinus Torvalds 	__u8 rcv_wscale;
332613d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
33271da177e4SLinus Torvalds 
33281da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33291da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33301da177e4SLinus Torvalds 	 */
33315d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33325d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33335d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33341da177e4SLinus Torvalds 
3335cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
333600db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3337cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3338cfb6eeb4SYOSHIFUJI Hideaki #endif
3339cfb6eeb4SYOSHIFUJI Hideaki 
33401da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
33411da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
33421da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
33431da177e4SLinus Torvalds 	tp->max_window = 0;
33445d424d5aSJohn Heffner 	tcp_mtup_init(sk);
33451da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
33461da177e4SLinus Torvalds 
334781164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
334881164413SDaniel Borkmann 
33491da177e4SLinus Torvalds 	if (!tp->window_clamp)
33501da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
33513541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3352f5fff5dcSTom Quetchenbach 
33531da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
33541da177e4SLinus Torvalds 
3355e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3356e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3357e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3358e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3359e88c64f0SHagen Paul Pfeifer 
336013d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
336113d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
336213d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
336313d3b1ebSLawrence Brakmo 
3364ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
33651da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33661da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33671da177e4SLinus Torvalds 				  &tp->window_clamp,
33689bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
336931d12926Slaurent chavey 				  &rcv_wscale,
337013d3b1ebSLawrence Brakmo 				  rcv_wnd);
33711da177e4SLinus Torvalds 
33721da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
33731da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
33741da177e4SLinus Torvalds 
33751da177e4SLinus Torvalds 	sk->sk_err = 0;
33761da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
33771da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3378ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
33797f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
33801da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
33811da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
338233f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3383370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3384ee995283SPavel Emelyanov 
3385ee995283SPavel Emelyanov 	if (likely(!tp->repair))
33861da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3387c7781a6eSAndrew Vagin 	else
338870eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3389ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3390ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
33911da177e4SLinus Torvalds 
33928550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3393463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
33941da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
33951da177e4SLinus Torvalds }
33961da177e4SLinus Torvalds 
3397783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3398783237e8SYuchung Cheng {
3399783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3400783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3401783237e8SYuchung Cheng 
3402783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3403f4a775d1SEric Dumazet 	__skb_header_release(skb);
3404783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3405783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3406783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3407783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3408783237e8SYuchung Cheng }
3409783237e8SYuchung Cheng 
3410783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3411783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3412783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3413783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3414783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3415783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3416783237e8SYuchung Cheng  */
3417783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3418783237e8SYuchung Cheng {
3419783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3420783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3421065263f4SWei Wang 	int space, err = 0;
3422355a901eSEric Dumazet 	struct sk_buff *syn_data;
3423783237e8SYuchung Cheng 
342467da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3425065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3426783237e8SYuchung Cheng 		goto fallback;
3427783237e8SYuchung Cheng 
3428783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3429783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3430783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3431783237e8SYuchung Cheng 	 */
34323541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34333541f9e8SEric Dumazet 
34341b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3435783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3436783237e8SYuchung Cheng 
3437f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3438f5ddcbbbSEric Dumazet 
3439f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3440f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3441f5ddcbbbSEric Dumazet 
3442eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3443355a901eSEric Dumazet 	if (!syn_data)
3444783237e8SYuchung Cheng 		goto fallback;
3445355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3446355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
344707e100f9SEric Dumazet 	if (space) {
344807e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
344957be5bdaSAl Viro 					    &fo->data->msg_iter);
345057be5bdaSAl Viro 		if (unlikely(!copied)) {
3451ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3452355a901eSEric Dumazet 			kfree_skb(syn_data);
3453783237e8SYuchung Cheng 			goto fallback;
3454783237e8SYuchung Cheng 		}
345557be5bdaSAl Viro 		if (copied != space) {
345657be5bdaSAl Viro 			skb_trim(syn_data, copied);
345757be5bdaSAl Viro 			space = copied;
345857be5bdaSAl Viro 		}
345907e100f9SEric Dumazet 	}
3460355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3461355a901eSEric Dumazet 	if (space == fo->size)
3462355a901eSEric Dumazet 		fo->data = NULL;
3463355a901eSEric Dumazet 	fo->copied = space;
3464783237e8SYuchung Cheng 
3465355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34660f87230dSFrancis Yan 	if (syn_data->len)
34670f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3468355a901eSEric Dumazet 
3469355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3470355a901eSEric Dumazet 
3471d3edd06eSEric Dumazet 	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3472355a901eSEric Dumazet 
3473355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3474355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3475355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3476355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3477431a9124SEric Dumazet 	 */
3478355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3479355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3480355a901eSEric Dumazet 	if (!err) {
348167da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
348275c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3483f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3484783237e8SYuchung Cheng 		goto done;
3485783237e8SYuchung Cheng 	}
3486783237e8SYuchung Cheng 
348775c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
348875c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3489b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3490b5b7db8dSEric Dumazet 
3491783237e8SYuchung Cheng fallback:
3492783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3493783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3494783237e8SYuchung Cheng 		fo->cookie.len = 0;
3495783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3496783237e8SYuchung Cheng 	if (err)
3497783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3498783237e8SYuchung Cheng done:
3499783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3500783237e8SYuchung Cheng 	return err;
3501783237e8SYuchung Cheng }
3502783237e8SYuchung Cheng 
350367edfef7SAndi Kleen /* Build a SYN and send it off. */
35041da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
35051da177e4SLinus Torvalds {
35061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
35071da177e4SLinus Torvalds 	struct sk_buff *buff;
3508ee586811SEric Paris 	int err;
35091da177e4SLinus Torvalds 
3510de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
35118ba60924SEric Dumazet 
35128ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
35138ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
35148ba60924SEric Dumazet 
35151da177e4SLinus Torvalds 	tcp_connect_init(sk);
35161da177e4SLinus Torvalds 
35172b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
35182b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
35192b916477SAndrey Vagin 		return 0;
35202b916477SAndrey Vagin 	}
35212b916477SAndrey Vagin 
3522eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3523355a901eSEric Dumazet 	if (unlikely(!buff))
35241da177e4SLinus Torvalds 		return -ENOBUFS;
35251da177e4SLinus Torvalds 
3526a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
35279a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35289a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3529783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3530735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
353175c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35321da177e4SLinus Torvalds 
3533783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3534783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3535783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3536ee586811SEric Paris 	if (err == -ECONNREFUSED)
3537ee586811SEric Paris 		return err;
3538bd37a088SWei Yongjun 
3539bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3540bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3541bd37a088SWei Yongjun 	 */
3542bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3543bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3544b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3545b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3546b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3547b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3548b5b7db8dSEric Dumazet 	}
354981cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
35501da177e4SLinus Torvalds 
35511da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
35523f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35533f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
35541da177e4SLinus Torvalds 	return 0;
35551da177e4SLinus Torvalds }
35564bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
35571da177e4SLinus Torvalds 
35581da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
35591da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
35601da177e4SLinus Torvalds  * for details.
35611da177e4SLinus Torvalds  */
35621da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
35631da177e4SLinus Torvalds {
3564463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3565463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35661da177e4SLinus Torvalds 	unsigned long timeout;
35671da177e4SLinus Torvalds 
35681da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3569463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
35701da177e4SLinus Torvalds 		int max_ato = HZ / 2;
35711da177e4SLinus Torvalds 
3572056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3573056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
35741da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
35751da177e4SLinus Torvalds 
35761da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
35771da177e4SLinus Torvalds 
35781da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3579463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
35801da177e4SLinus Torvalds 		 * directly.
35811da177e4SLinus Torvalds 		 */
3582740b0f18SEric Dumazet 		if (tp->srtt_us) {
3583740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3584740b0f18SEric Dumazet 					TCP_DELACK_MIN);
35851da177e4SLinus Torvalds 
35861da177e4SLinus Torvalds 			if (rtt < max_ato)
35871da177e4SLinus Torvalds 				max_ato = rtt;
35881da177e4SLinus Torvalds 		}
35891da177e4SLinus Torvalds 
35901da177e4SLinus Torvalds 		ato = min(ato, max_ato);
35911da177e4SLinus Torvalds 	}
35921da177e4SLinus Torvalds 
35931da177e4SLinus Torvalds 	/* Stay within the limit we were given */
35941da177e4SLinus Torvalds 	timeout = jiffies + ato;
35951da177e4SLinus Torvalds 
35961da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3597463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
35981da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
35991da177e4SLinus Torvalds 		 * send ACK now.
36001da177e4SLinus Torvalds 		 */
3601463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3602463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
36031da177e4SLinus Torvalds 			tcp_send_ack(sk);
36041da177e4SLinus Torvalds 			return;
36051da177e4SLinus Torvalds 		}
36061da177e4SLinus Torvalds 
3607463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3608463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
36091da177e4SLinus Torvalds 	}
3610463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3611463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3612463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
36131da177e4SLinus Torvalds }
36141da177e4SLinus Torvalds 
36151da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
36162987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
36171da177e4SLinus Torvalds {
36181da177e4SLinus Torvalds 	struct sk_buff *buff;
36191da177e4SLinus Torvalds 
3620058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3621058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3622058dc334SIlpo Järvinen 		return;
3623058dc334SIlpo Järvinen 
36241da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
36251da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
36261da177e4SLinus Torvalds 	 * sock.
36271da177e4SLinus Torvalds 	 */
36287450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36297450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36307450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3631463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3632463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36333f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36343f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36351da177e4SLinus Torvalds 		return;
36361da177e4SLinus Torvalds 	}
36371da177e4SLinus Torvalds 
36381da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36391da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3640a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
36411da177e4SLinus Torvalds 
364298781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
364398781965SEric Dumazet 	 * too much.
364498781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
364598781965SEric Dumazet 	 */
364698781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
364798781965SEric Dumazet 
36481da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
36492987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
36501da177e4SLinus Torvalds }
365127cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
36522987babbSYuchung Cheng 
36532987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
36542987babbSYuchung Cheng {
36552987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
36561da177e4SLinus Torvalds }
36571da177e4SLinus Torvalds 
36581da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
36591da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
36601da177e4SLinus Torvalds  *
36611da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
36621da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
36631da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
36641da177e4SLinus Torvalds  *
36651da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
36661da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36671da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
36681da177e4SLinus Torvalds  */
3669e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
36701da177e4SLinus Torvalds {
36711da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36721da177e4SLinus Torvalds 	struct sk_buff *skb;
36731da177e4SLinus Torvalds 
36741da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
36757450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
36767450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
367751456b29SIan Morris 	if (!skb)
36781da177e4SLinus Torvalds 		return -1;
36791da177e4SLinus Torvalds 
36801da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
36811da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36821da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
36831da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
36841da177e4SLinus Torvalds 	 * send it.
36851da177e4SLinus Torvalds 	 */
3686a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3687e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
36887450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
36891da177e4SLinus Torvalds }
36901da177e4SLinus Torvalds 
3691385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3692ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3693ee995283SPavel Emelyanov {
3694ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3695ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
36969a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3697e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3698ee995283SPavel Emelyanov 	}
3699ee995283SPavel Emelyanov }
3700ee995283SPavel Emelyanov 
370167edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3702e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
37031da177e4SLinus Torvalds {
37041da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
37051da177e4SLinus Torvalds 	struct sk_buff *skb;
37061da177e4SLinus Torvalds 
3707058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3708058dc334SIlpo Järvinen 		return -1;
3709058dc334SIlpo Järvinen 
371000db4124SIan Morris 	skb = tcp_send_head(sk);
371100db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
37121da177e4SLinus Torvalds 		int err;
37130c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
371490840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
37151da177e4SLinus Torvalds 
37161da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
37171da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
37181da177e4SLinus Torvalds 
37191da177e4SLinus Torvalds 		/* We are probing the opening of a window
37201da177e4SLinus Torvalds 		 * but the window size is != 0
37211da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
37221da177e4SLinus Torvalds 		 */
37231da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
37241da177e4SLinus Torvalds 		    skb->len > mss) {
37251da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
37264de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
372775c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
372875c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37291da177e4SLinus Torvalds 				return -1;
37301da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37315bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37321da177e4SLinus Torvalds 
37334de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3734dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
373566f5fe62SIlpo Järvinen 		if (!err)
373666f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37371da177e4SLinus Torvalds 		return err;
37381da177e4SLinus Torvalds 	} else {
373933f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3740e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3741e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
37421da177e4SLinus Torvalds 	}
37431da177e4SLinus Torvalds }
37441da177e4SLinus Torvalds 
37451da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
37461da177e4SLinus Torvalds  * a partial packet else a zero probe.
37471da177e4SLinus Torvalds  */
37481da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
37491da177e4SLinus Torvalds {
3750463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
37511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3752c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3753fcdd1cf4SEric Dumazet 	unsigned long probe_max;
37541da177e4SLinus Torvalds 	int err;
37551da177e4SLinus Torvalds 
3756e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
37571da177e4SLinus Torvalds 
375875c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
37591da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
37606687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3761463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
37621da177e4SLinus Torvalds 		return;
37631da177e4SLinus Torvalds 	}
37641da177e4SLinus Torvalds 
37651da177e4SLinus Torvalds 	if (err <= 0) {
3766c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3767463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
37686687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3769fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
37701da177e4SLinus Torvalds 	} else {
37711da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
37726687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
37731da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
37741da177e4SLinus Torvalds 		 *
37751da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
37761da177e4SLinus Torvalds 		 */
37776687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
37786687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3779fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
37801da177e4SLinus Torvalds 	}
37813f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
378221c8fe99SEric Dumazet 			     tcp_probe0_when(sk, probe_max),
37833f80e08fSEric Dumazet 			     TCP_RTO_MAX,
37843f80e08fSEric Dumazet 			     NULL);
37851da177e4SLinus Torvalds }
37865db92c99SOctavian Purdila 
3787ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
37885db92c99SOctavian Purdila {
37895db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
37905db92c99SOctavian Purdila 	struct flowi fl;
37915db92c99SOctavian Purdila 	int res;
37925db92c99SOctavian Purdila 
379358d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3794b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
37955db92c99SOctavian Purdila 	if (!res) {
379690bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
379702a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
37987e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
37997e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3800cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
38015db92c99SOctavian Purdila 	}
38025db92c99SOctavian Purdila 	return res;
38035db92c99SOctavian Purdila }
38045db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3805