xref: /linux/net/ipv4/tcp_output.c (revision ec641b39457e17774313b66697a8a1dc070257bd)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
4460e2a778SUrsula Braun #include <linux/static_key.h>
451da177e4SLinus Torvalds 
46e086101bSCong Wang #include <trace/events/tcp.h>
4735089bb2SDavid S. Miller 
489799ccb0SEric Dumazet /* Refresh clocks of a TCP socket,
499799ccb0SEric Dumazet  * ensuring monotically increasing values.
509799ccb0SEric Dumazet  */
519799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp)
529799ccb0SEric Dumazet {
539799ccb0SEric Dumazet 	u64 val = tcp_clock_ns();
549799ccb0SEric Dumazet 
555f6188a8SEric Dumazet 	if (val > tp->tcp_clock_cache)
565f6188a8SEric Dumazet 		tp->tcp_clock_cache = val;
579799ccb0SEric Dumazet 
589799ccb0SEric Dumazet 	val = div_u64(val, NSEC_PER_USEC);
599799ccb0SEric Dumazet 	if (val > tp->tcp_mstamp)
609799ccb0SEric Dumazet 		tp->tcp_mstamp = val;
619799ccb0SEric Dumazet }
629799ccb0SEric Dumazet 
6346d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6446d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
65519855c5SWilliam Allen Simpson 
6667edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6775c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
686ff03ac3SIlpo Järvinen {
696ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
706ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7166f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
729e412ba7SIlpo Järvinen 
731da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
748512430eSIlpo Järvinen 
7575c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
7675c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
7775c119afSEric Dumazet 
7866f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
79bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
80750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
81f19c29e3SYuchung Cheng 
82f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
83f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
846a5dc9e5SEric Dumazet }
851da177e4SLinus Torvalds 
86a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
87a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
881da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
891da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
901da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
911da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
921da177e4SLinus Torvalds  */
93cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
941da177e4SLinus Torvalds {
95cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
969e412ba7SIlpo Järvinen 
97a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
98a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
99a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1001da177e4SLinus Torvalds 		return tp->snd_nxt;
1011da177e4SLinus Torvalds 	else
10290840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1061da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1071da177e4SLinus Torvalds  *
1081da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1091da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1101da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1111da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1121da177e4SLinus Torvalds  *    large MSS.
1131da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1141da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1151da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1161da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1171da177e4SLinus Torvalds  *    probably even Jumbo".
1181da177e4SLinus Torvalds  */
1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
122cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1231da177e4SLinus Torvalds 	int mss = tp->advmss;
1241da177e4SLinus Torvalds 
1250dbaee3bSDavid S. Miller 	if (dst) {
1260dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1270dbaee3bSDavid S. Miller 
1280dbaee3bSDavid S. Miller 		if (metric < mss) {
1290dbaee3bSDavid S. Miller 			mss = metric;
1301da177e4SLinus Torvalds 			tp->advmss = mss;
1311da177e4SLinus Torvalds 		}
1320dbaee3bSDavid S. Miller 	}
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds 	return (__u16)mss;
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1386f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1396f021c62SEric Dumazet  */
1406f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1411da177e4SLinus Torvalds {
142463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1436f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1441da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1451da177e4SLinus Torvalds 
1466687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1491da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1501da177e4SLinus Torvalds 
151463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1521da177e4SLinus Torvalds 		cwnd >>= 1;
1531da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
154c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1551da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
15867edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15940efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
160cf533ea5SEric Dumazet 				struct sock *sk)
1611da177e4SLinus Torvalds {
162463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
163d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1641da177e4SLinus Torvalds 
16505c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16605c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16705c5a46dSNeal Cardwell 
1681da177e4SLinus Torvalds 	tp->lsndtime = now;
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1711da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1721da177e4SLinus Torvalds 	 */
1732251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
174463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
17767edfef7SAndi Kleen /* Account for an ACK we sent. */
17827cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
17927cde44aSYuchung Cheng 				      u32 rcv_nxt)
1801da177e4SLinus Torvalds {
1815d9f4262SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
1825d9f4262SEric Dumazet 
18386de5921SEric Dumazet 	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
184200d95f4SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
18586de5921SEric Dumazet 			      tp->compressed_ack - TCP_FASTRETRANS_THRESH);
18686de5921SEric Dumazet 		tp->compressed_ack = TCP_FASTRETRANS_THRESH;
1875d9f4262SEric Dumazet 		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
1885d9f4262SEric Dumazet 			__sock_put(sk);
1895d9f4262SEric Dumazet 	}
19027cde44aSYuchung Cheng 
19127cde44aSYuchung Cheng 	if (unlikely(rcv_nxt != tp->rcv_nxt))
19227cde44aSYuchung Cheng 		return;  /* Special ACK sent by DCTCP to reflect ECN */
193463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
194463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1981da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1991da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2001da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2011da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2021da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2031da177e4SLinus Torvalds  */
204ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
2051da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
20631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
20731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2121da177e4SLinus Torvalds 	if (*window_clamp == 0)
213589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2141da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2171da177e4SLinus Torvalds 	if (space > mss)
218589c49cbSGao Feng 		space = rounddown(space, mss);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
22615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2271da177e4SLinus Torvalds 	 */
228ceef9ab6SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2291da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23015d99e02SRick Jones 	else
231a337531bSYuchung Cheng 		(*rcv_wnd) = min_t(u32, space, U16_MAX);
232a337531bSYuchung Cheng 
233a337531bSYuchung Cheng 	if (init_rcv_wnd)
234a337531bSYuchung Cheng 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
23515d99e02SRick Jones 
2361da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2371da177e4SLinus Torvalds 	if (wscale_ok) {
238589c49cbSGao Feng 		/* Set window scaling on max possible window */
239356d1833SEric Dumazet 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
240f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
241316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
242589c49cbSGao Feng 		while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
2431da177e4SLinus Torvalds 			space >>= 1;
2441da177e4SLinus Torvalds 			(*rcv_wscale)++;
2451da177e4SLinus Torvalds 		}
2461da177e4SLinus Torvalds 	}
2471da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
248589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2491da177e4SLinus Torvalds }
2504bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2531da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2541da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2551da177e4SLinus Torvalds  * frame.
2561da177e4SLinus Torvalds  */
25740efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2581da177e4SLinus Torvalds {
2591da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2608e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2611da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2621da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds 	/* Never shrink the offered window */
2651da177e4SLinus Torvalds 	if (new_win < cur_win) {
2661da177e4SLinus Torvalds 		/* Danger Will Robinson!
2671da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2681da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2691da177e4SLinus Torvalds 		 * window in time.  --DaveM
2701da177e4SLinus Torvalds 		 *
2711da177e4SLinus Torvalds 		 * Relax Will Robinson.
2721da177e4SLinus Torvalds 		 */
2738e165e20SFlorian Westphal 		if (new_win == 0)
2748e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2758e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
276607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2771da177e4SLinus Torvalds 	}
2781da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2791da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2821da177e4SLinus Torvalds 	 * scaled window.
2831da177e4SLinus Torvalds 	 */
284ceef9ab6SEric Dumazet 	if (!tp->rx_opt.rcv_wscale &&
285ceef9ab6SEric Dumazet 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
2861da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2871da177e4SLinus Torvalds 	else
2881da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2911da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2921da177e4SLinus Torvalds 
29331770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
2948e165e20SFlorian Westphal 	if (new_win == 0) {
29531770e34SFlorian Westphal 		tp->pred_flags = 0;
2968e165e20SFlorian Westphal 		if (old_win)
2978e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2988e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
2998e165e20SFlorian Westphal 	} else if (old_win == 0) {
3008e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3018e165e20SFlorian Westphal 	}
3021da177e4SLinus Torvalds 
3031da177e4SLinus Torvalds 	return new_win;
3041da177e4SLinus Torvalds }
3051da177e4SLinus Torvalds 
30667edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
307735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
308bdf1ee5dSIlpo Järvinen {
30930e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31030e502a3SDaniel Borkmann 
3114de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
312bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3134de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
31491b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
31591b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
31630e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
317bdf1ee5dSIlpo Järvinen }
318bdf1ee5dSIlpo Järvinen 
31967edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
320735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
321bdf1ee5dSIlpo Järvinen {
322bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
32391b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
324f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
32591b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
326f7b3bec6SFlorian Westphal 
327f7b3bec6SFlorian Westphal 	if (!use_ecn) {
328f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
329f7b3bec6SFlorian Westphal 
330f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
331f7b3bec6SFlorian Westphal 			use_ecn = true;
332f7b3bec6SFlorian Westphal 	}
333bdf1ee5dSIlpo Järvinen 
334bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
335f7b3bec6SFlorian Westphal 
336f7b3bec6SFlorian Westphal 	if (use_ecn) {
3374de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
338bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
33991b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34030e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
341bdf1ee5dSIlpo Järvinen 	}
342bdf1ee5dSIlpo Järvinen }
343bdf1ee5dSIlpo Järvinen 
34449213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
34549213555SDaniel Borkmann {
34649213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
34749213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
34849213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
34949213555SDaniel Borkmann 		 */
35049213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
35149213555SDaniel Borkmann }
35249213555SDaniel Borkmann 
353735d3831SFlorian Westphal static void
3546ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
355bdf1ee5dSIlpo Järvinen {
3566ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
357bdf1ee5dSIlpo Järvinen 		th->ece = 1;
358bdf1ee5dSIlpo Järvinen }
359bdf1ee5dSIlpo Järvinen 
36067edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
36167edfef7SAndi Kleen  * be sent.
36267edfef7SAndi Kleen  */
363735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
364ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
365bdf1ee5dSIlpo Järvinen {
366bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
367bdf1ee5dSIlpo Järvinen 
368bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
369bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
370bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
371bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
372bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
373bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
374bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
375ea1627c2SEric Dumazet 				th->cwr = 1;
376bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
377bdf1ee5dSIlpo Järvinen 			}
37830e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
379bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
380bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
381bdf1ee5dSIlpo Järvinen 		}
382bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
383ea1627c2SEric Dumazet 			th->ece = 1;
384bdf1ee5dSIlpo Järvinen 	}
385bdf1ee5dSIlpo Järvinen }
386bdf1ee5dSIlpo Järvinen 
387e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
388e870a8efSIlpo Järvinen  * auto increment end seqno.
389e870a8efSIlpo Järvinen  */
390e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
391e870a8efSIlpo Järvinen {
3922e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
393e870a8efSIlpo Järvinen 
3944de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
395e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
396e870a8efSIlpo Järvinen 
397cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
398e870a8efSIlpo Järvinen 
399e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
400a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
401e870a8efSIlpo Järvinen 		seq++;
402e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
403e870a8efSIlpo Järvinen }
404e870a8efSIlpo Järvinen 
405a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
40633f5f57eSIlpo Järvinen {
40733f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40833f5f57eSIlpo Järvinen }
40933f5f57eSIlpo Järvinen 
41033ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
41133ad798cSAdam Langley #define OPTION_TS		(1 << 1)
41233ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
41389e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4142100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
41560e2a778SUrsula Braun #define OPTION_SMC		(1 << 9)
41660e2a778SUrsula Braun 
41760e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options)
41860e2a778SUrsula Braun {
41960e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
42060e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
42160e2a778SUrsula Braun 		if (unlikely(OPTION_SMC & *options)) {
42260e2a778SUrsula Braun 			*ptr++ = htonl((TCPOPT_NOP  << 24) |
42360e2a778SUrsula Braun 				       (TCPOPT_NOP  << 16) |
42460e2a778SUrsula Braun 				       (TCPOPT_EXP <<  8) |
42560e2a778SUrsula Braun 				       (TCPOLEN_EXP_SMC_BASE));
42660e2a778SUrsula Braun 			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
42760e2a778SUrsula Braun 		}
42860e2a778SUrsula Braun 	}
42960e2a778SUrsula Braun #endif
43060e2a778SUrsula Braun }
43133ad798cSAdam Langley 
43233ad798cSAdam Langley struct tcp_out_options {
4332100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4342100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
43533ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43633ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
437bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
438bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4392100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4402100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
44133ad798cSAdam Langley };
44233ad798cSAdam Langley 
44367edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
44467edfef7SAndi Kleen  *
44567edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
446fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
447fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4488e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
449fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
450fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
451fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
452fd6149d3SIlpo Järvinen  *
453fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
454fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
455fd6149d3SIlpo Järvinen  */
45633ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
457bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
458bd0388aeSWilliam Allen Simpson {
4592100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
460bd0388aeSWilliam Allen Simpson 
461bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4621a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4631a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
464bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
465bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46633ad798cSAdam Langley 		ptr += 4;
46733ad798cSAdam Langley 	}
46833ad798cSAdam Langley 
469fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
470fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
471fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
472fd6149d3SIlpo Järvinen 			       opts->mss);
473fd6149d3SIlpo Järvinen 	}
474fd6149d3SIlpo Järvinen 
475bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
476bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47733ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47833ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47933ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
48033ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
481bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
48233ad798cSAdam Langley 		} else {
483496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
48440efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
48540efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48640efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48740efc6faSStephen Hemminger 		}
48833ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48933ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
49033ad798cSAdam Langley 	}
49133ad798cSAdam Langley 
492bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
49333ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49433ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
49533ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49633ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49733ad798cSAdam Langley 	}
49833ad798cSAdam Langley 
499bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
50033ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
50133ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
50233ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
50333ad798cSAdam Langley 			       opts->ws);
50433ad798cSAdam Langley 	}
50533ad798cSAdam Langley 
50633ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50733ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50833ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50940efc6faSStephen Hemminger 		int this_sack;
51040efc6faSStephen Hemminger 
51140efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
51240efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
51340efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
51433ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
51540efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5162de979bdSStephen Hemminger 
51733ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51833ad798cSAdam Langley 		     ++this_sack) {
51940efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
52040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
52140efc6faSStephen Hemminger 		}
5222de979bdSStephen Hemminger 
52340efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
52440efc6faSStephen Hemminger 	}
5252100c8d2SYuchung Cheng 
5262100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5272100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5287f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5297f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5302100c8d2SYuchung Cheng 
5317f9b838bSDaniel Lee 		if (foc->exp) {
5327f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5337f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5342100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5357f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5367f9b838bSDaniel Lee 		} else {
5377f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5387f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5397f9b838bSDaniel Lee 			*p++ = len;
5402100c8d2SYuchung Cheng 		}
5417f9b838bSDaniel Lee 
5427f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5437f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5447f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5457f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5467f9b838bSDaniel Lee 		}
5477f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5482100c8d2SYuchung Cheng 	}
54960e2a778SUrsula Braun 
55060e2a778SUrsula Braun 	smc_options_write(ptr, &options);
55160e2a778SUrsula Braun }
55260e2a778SUrsula Braun 
55360e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp,
55460e2a778SUrsula Braun 			   struct tcp_out_options *opts,
55560e2a778SUrsula Braun 			   unsigned int *remaining)
55660e2a778SUrsula Braun {
55760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
55860e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
55960e2a778SUrsula Braun 		if (tp->syn_smc) {
56060e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
56160e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
56260e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
56360e2a778SUrsula Braun 			}
56460e2a778SUrsula Braun 		}
56560e2a778SUrsula Braun 	}
56660e2a778SUrsula Braun #endif
56760e2a778SUrsula Braun }
56860e2a778SUrsula Braun 
56960e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp,
57060e2a778SUrsula Braun 				const struct inet_request_sock *ireq,
57160e2a778SUrsula Braun 				struct tcp_out_options *opts,
57260e2a778SUrsula Braun 				unsigned int *remaining)
57360e2a778SUrsula Braun {
57460e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
57560e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
57660e2a778SUrsula Braun 		if (tp->syn_smc && ireq->smc_ok) {
57760e2a778SUrsula Braun 			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
57860e2a778SUrsula Braun 				opts->options |= OPTION_SMC;
57960e2a778SUrsula Braun 				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
58060e2a778SUrsula Braun 			}
58160e2a778SUrsula Braun 		}
58260e2a778SUrsula Braun 	}
58360e2a778SUrsula Braun #endif
58440efc6faSStephen Hemminger }
58540efc6faSStephen Hemminger 
58667edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
58767edfef7SAndi Kleen  * network wire format yet.
58867edfef7SAndi Kleen  */
58995c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
59033ad798cSAdam Langley 				struct tcp_out_options *opts,
591cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
592cf533ea5SEric Dumazet {
59333ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
59495c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
595783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
59633ad798cSAdam Langley 
5978c2320e8SEric Dumazet 	*md5 = NULL;
598cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
5998c2320e8SEric Dumazet 	if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
60033ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
60133ad798cSAdam Langley 		if (*md5) {
60233ad798cSAdam Langley 			opts->options |= OPTION_MD5;
603bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_MD5SIG_ALIGNED;
604cfb6eeb4SYOSHIFUJI Hideaki 		}
6058c2320e8SEric Dumazet 	}
606cfb6eeb4SYOSHIFUJI Hideaki #endif
60733ad798cSAdam Langley 
60833ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
60933ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
61033ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
61133ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
61233ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
61333ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
61433ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
61533ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
61633ad798cSAdam Langley 	 * going out.  */
61733ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
618bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
61933ad798cSAdam Langley 
6205d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
62133ad798cSAdam Langley 		opts->options |= OPTION_TS;
6227faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
62333ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
624bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62533ad798cSAdam Langley 	}
6269bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
62733ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
62889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
629bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
63033ad798cSAdam Langley 	}
631f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
63233ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
633b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
634bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
63533ad798cSAdam Langley 	}
63633ad798cSAdam Langley 
637783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
6382646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
6392646c831SDaniel Lee 
6402646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6412646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
642783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
643783237e8SYuchung Cheng 		if (remaining >= need) {
644783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
645783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
646783237e8SYuchung Cheng 			remaining -= need;
647783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6482646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
649783237e8SYuchung Cheng 		}
650783237e8SYuchung Cheng 	}
651bd0388aeSWilliam Allen Simpson 
65260e2a778SUrsula Braun 	smc_set_option(tp, opts, &remaining);
65360e2a778SUrsula Braun 
654bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65533ad798cSAdam Langley }
65633ad798cSAdam Langley 
65767edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
65860e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk,
65960e2a778SUrsula Braun 				       struct request_sock *req,
66095c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
66133ad798cSAdam Langley 				       struct tcp_out_options *opts,
66280f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6638336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6644957faadSWilliam Allen Simpson {
66533ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
66695c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
66733ad798cSAdam Langley 
66833ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
66980f03e27SEric Dumazet 	if (md5) {
67033ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6714957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6724957faadSWilliam Allen Simpson 
6734957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6744957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6754957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6764957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6774957faadSWilliam Allen Simpson 		 */
678de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
67933ad798cSAdam Langley 	}
68033ad798cSAdam Langley #endif
68133ad798cSAdam Langley 
6824957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68333ad798cSAdam Langley 	opts->mss = mss;
6844957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
68533ad798cSAdam Langley 
68633ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
68733ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
68889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6894957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
69033ad798cSAdam Langley 	}
691de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
69233ad798cSAdam Langley 		opts->options |= OPTION_TS;
69395a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
69433ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6954957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
69633ad798cSAdam Langley 	}
69733ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
69833ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
699de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
7004957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70133ad798cSAdam Langley 	}
7027f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
7037f9b838bSDaniel Lee 		u32 need = foc->len;
7047f9b838bSDaniel Lee 
7057f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
7067f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
7078336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
7088336886fSJerry Chu 		if (remaining >= need) {
7098336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
7108336886fSJerry Chu 			opts->fastopen_cookie = foc;
7118336886fSJerry Chu 			remaining -= need;
7128336886fSJerry Chu 		}
7138336886fSJerry Chu 	}
7144957faadSWilliam Allen Simpson 
71560e2a778SUrsula Braun 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
71660e2a778SUrsula Braun 
7174957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
71833ad798cSAdam Langley }
71933ad798cSAdam Langley 
72067edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
72167edfef7SAndi Kleen  * final wire format yet.
72267edfef7SAndi Kleen  */
72395c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
72433ad798cSAdam Langley 					struct tcp_out_options *opts,
725cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
726cf533ea5SEric Dumazet {
72733ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
72895c96174SEric Dumazet 	unsigned int size = 0;
729cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
73033ad798cSAdam Langley 
7315843ef42SAndi Kleen 	opts->options = 0;
7325843ef42SAndi Kleen 
7338c2320e8SEric Dumazet 	*md5 = NULL;
73433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
7358c2320e8SEric Dumazet 	if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
73633ad798cSAdam Langley 		*md5 = tp->af_specific->md5_lookup(sk, sk);
7378c2320e8SEric Dumazet 		if (*md5) {
73833ad798cSAdam Langley 			opts->options |= OPTION_MD5;
73933ad798cSAdam Langley 			size += TCPOLEN_MD5SIG_ALIGNED;
74033ad798cSAdam Langley 		}
7418c2320e8SEric Dumazet 	}
74233ad798cSAdam Langley #endif
74333ad798cSAdam Langley 
74433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
74533ad798cSAdam Langley 		opts->options |= OPTION_TS;
7467faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
74733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
74833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
74933ad798cSAdam Langley 	}
75033ad798cSAdam Langley 
751cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
752cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
75395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
75433ad798cSAdam Langley 		opts->num_sack_blocks =
75595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
75633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
75733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
75833ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
75933ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
76033ad798cSAdam Langley 	}
76133ad798cSAdam Langley 
76233ad798cSAdam Langley 	return size;
76340efc6faSStephen Hemminger }
7641da177e4SLinus Torvalds 
76546d3ceabSEric Dumazet 
76646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
76746d3ceabSEric Dumazet  *
76846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
76946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
77046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
77146d3ceabSEric Dumazet  *
77246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
77346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7748e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
77546d3ceabSEric Dumazet  *
77646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
77746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
77846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
77946d3ceabSEric Dumazet  */
78046d3ceabSEric Dumazet struct tsq_tasklet {
78146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
78246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
78346d3ceabSEric Dumazet };
78446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
78546d3ceabSEric Dumazet 
78673a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk)
7876f458dfbSEric Dumazet {
7886f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7896f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
790f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
791f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
792f9616c35SEric Dumazet 
793f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
7943a91d29fSKoichiro Den 		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
7953a91d29fSKoichiro Den 			tcp_mstamp_refresh(tp);
796f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
7973a91d29fSKoichiro Den 		}
798f9616c35SEric Dumazet 
799f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
800bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
8016f458dfbSEric Dumazet 	}
802f9616c35SEric Dumazet }
80373a6bab5SEric Dumazet 
80473a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk)
80573a6bab5SEric Dumazet {
80673a6bab5SEric Dumazet 	bh_lock_sock(sk);
80773a6bab5SEric Dumazet 	if (!sock_owned_by_user(sk))
80873a6bab5SEric Dumazet 		tcp_tsq_write(sk);
80973a6bab5SEric Dumazet 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
81073a6bab5SEric Dumazet 		sock_hold(sk);
81173a6bab5SEric Dumazet 	bh_unlock_sock(sk);
81273a6bab5SEric Dumazet }
81346d3ceabSEric Dumazet /*
8148e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
81546d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
8168e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
81746d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
81846d3ceabSEric Dumazet  */
81946d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
82046d3ceabSEric Dumazet {
82146d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
82246d3ceabSEric Dumazet 	LIST_HEAD(list);
82346d3ceabSEric Dumazet 	unsigned long flags;
82446d3ceabSEric Dumazet 	struct list_head *q, *n;
82546d3ceabSEric Dumazet 	struct tcp_sock *tp;
82646d3ceabSEric Dumazet 	struct sock *sk;
82746d3ceabSEric Dumazet 
82846d3ceabSEric Dumazet 	local_irq_save(flags);
82946d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
83046d3ceabSEric Dumazet 	local_irq_restore(flags);
83146d3ceabSEric Dumazet 
83246d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
83346d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
83446d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
83546d3ceabSEric Dumazet 
83646d3ceabSEric Dumazet 		sk = (struct sock *)tp;
8370a9648f1SEric Dumazet 		smp_mb__before_atomic();
8387aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
8397aa5470cSEric Dumazet 
8406f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
84146d3ceabSEric Dumazet 		sk_free(sk);
84246d3ceabSEric Dumazet 	}
84346d3ceabSEric Dumazet }
84446d3ceabSEric Dumazet 
84540fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
84640fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
84740fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
84840fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
84946d3ceabSEric Dumazet /**
85046d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
85146d3ceabSEric Dumazet  * @sk: socket
85246d3ceabSEric Dumazet  *
85346d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
85446d3ceabSEric Dumazet  * actions before socket release.
85546d3ceabSEric Dumazet  */
85646d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
85746d3ceabSEric Dumazet {
8586f458dfbSEric Dumazet 	unsigned long flags, nflags;
85946d3ceabSEric Dumazet 
8606f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8616f458dfbSEric Dumazet 	do {
8627aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8636f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8646f458dfbSEric Dumazet 			return;
8656f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8667aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8676f458dfbSEric Dumazet 
86873a6bab5SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED) {
86973a6bab5SEric Dumazet 		tcp_tsq_write(sk);
87073a6bab5SEric Dumazet 		__sock_put(sk);
87173a6bab5SEric Dumazet 	}
872c3f9b018SEric Dumazet 	/* Here begins the tricky part :
873c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
874c3f9b018SEric Dumazet 	 * 1) BH disabled
875c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
876c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
877c3f9b018SEric Dumazet 	 *
878c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
879c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
880c3f9b018SEric Dumazet 	 */
881c3f9b018SEric Dumazet 	sock_release_ownership(sk);
882c3f9b018SEric Dumazet 
88340fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8846f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
885144d56e9SEric Dumazet 		__sock_put(sk);
886144d56e9SEric Dumazet 	}
88740fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8886f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
889144d56e9SEric Dumazet 		__sock_put(sk);
890144d56e9SEric Dumazet 	}
89140fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8924fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
893144d56e9SEric Dumazet 		__sock_put(sk);
894144d56e9SEric Dumazet 	}
89546d3ceabSEric Dumazet }
89646d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
89746d3ceabSEric Dumazet 
89846d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
89946d3ceabSEric Dumazet {
90046d3ceabSEric Dumazet 	int i;
90146d3ceabSEric Dumazet 
90246d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
90346d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
90446d3ceabSEric Dumazet 
90546d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
90646d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
90746d3ceabSEric Dumazet 			     tcp_tasklet_func,
90846d3ceabSEric Dumazet 			     (unsigned long)tsq);
90946d3ceabSEric Dumazet 	}
91046d3ceabSEric Dumazet }
91146d3ceabSEric Dumazet 
91246d3ceabSEric Dumazet /*
91346d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
9148e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
91546d3ceabSEric Dumazet  * hold qdisc lock.
91646d3ceabSEric Dumazet  */
917d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
91846d3ceabSEric Dumazet {
91946d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
92046d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
921408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
9229b462d02SEric Dumazet 
9239b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
9249b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
9259b462d02SEric Dumazet 	 */
92614afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
9279b462d02SEric Dumazet 
9289b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
9299b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
9309b462d02SEric Dumazet 	 * This gives :
9319b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
9329b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
9339b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
9349b462d02SEric Dumazet 	 */
93514afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
9369b462d02SEric Dumazet 		goto out;
93746d3ceabSEric Dumazet 
9387aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
93946d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
940a9b204d1SEric Dumazet 		bool empty;
94146d3ceabSEric Dumazet 
942408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
943408f0a6cSEric Dumazet 			goto out;
944408f0a6cSEric Dumazet 
94573a6bab5SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
9467aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
947408f0a6cSEric Dumazet 		if (nval != oval)
948408f0a6cSEric Dumazet 			continue;
949408f0a6cSEric Dumazet 
95046d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
95146d3ceabSEric Dumazet 		local_irq_save(flags);
952903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
953a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
95446d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
955a9b204d1SEric Dumazet 		if (empty)
95646d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
95746d3ceabSEric Dumazet 		local_irq_restore(flags);
9589b462d02SEric Dumazet 		return;
95946d3ceabSEric Dumazet 	}
9609b462d02SEric Dumazet out:
9619b462d02SEric Dumazet 	sk_free(sk);
96246d3ceabSEric Dumazet }
96346d3ceabSEric Dumazet 
96473a6bab5SEric Dumazet /* Note: Called under soft irq.
96573a6bab5SEric Dumazet  * We can call TCP stack right away, unless socket is owned by user.
966218af599SEric Dumazet  */
967218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
968218af599SEric Dumazet {
969218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
970218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
971218af599SEric Dumazet 
97273a6bab5SEric Dumazet 	tcp_tsq_handler(sk);
97373a6bab5SEric Dumazet 	sock_put(sk);
974218af599SEric Dumazet 
975218af599SEric Dumazet 	return HRTIMER_NORESTART;
976218af599SEric Dumazet }
977218af599SEric Dumazet 
978a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
979a7a25630SEric Dumazet 				      u64 prior_wstamp)
980e2080072SEric Dumazet {
981ab408b6dSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
982ab408b6dSEric Dumazet 
983d3edd06eSEric Dumazet 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
984ab408b6dSEric Dumazet 	if (sk->sk_pacing_status != SK_PACING_NONE) {
98576a9ebe8SEric Dumazet 		unsigned long rate = sk->sk_pacing_rate;
986ab408b6dSEric Dumazet 
987ab408b6dSEric Dumazet 		/* Original sch_fq does not pace first 10 MSS
988ab408b6dSEric Dumazet 		 * Note that tp->data_segs_out overflows after 2^32 packets,
989ab408b6dSEric Dumazet 		 * this is a minor annoyance.
990ab408b6dSEric Dumazet 		 */
99176a9ebe8SEric Dumazet 		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
992a7a25630SEric Dumazet 			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
993a7a25630SEric Dumazet 			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
994a7a25630SEric Dumazet 
995a7a25630SEric Dumazet 			/* take into account OS jitter */
996a7a25630SEric Dumazet 			len_ns -= min_t(u64, len_ns / 2, credit);
997a7a25630SEric Dumazet 			tp->tcp_wstamp_ns += len_ns;
998ab408b6dSEric Dumazet 		}
999ab408b6dSEric Dumazet 	}
1000e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1001e2080072SEric Dumazet }
1002e2080072SEric Dumazet 
10031da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
10041da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
10051da177e4SLinus Torvalds  * transmission and possible later retransmissions.
10061da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
10071da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
10081da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
10091da177e4SLinus Torvalds  * device.
10101da177e4SLinus Torvalds  *
10111da177e4SLinus Torvalds  * We are working here with either a clone of the original
10121da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
10131da177e4SLinus Torvalds  */
10142987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
10152987babbSYuchung Cheng 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
10161da177e4SLinus Torvalds {
10176687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1018dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1019dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1020dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
102133ad798cSAdam Langley 	struct tcp_out_options opts;
102295c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10238c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1024cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10251da177e4SLinus Torvalds 	struct tcphdr *th;
1026a7a25630SEric Dumazet 	u64 prior_wstamp;
10271da177e4SLinus Torvalds 	int err;
10281da177e4SLinus Torvalds 
1029dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10306f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
1031dfb4b9dcSDavid S. Miller 
1032ccdbb6e9SEric Dumazet 	if (clone_it) {
10336f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10346f094b9eSLawrence Brakmo 			- tp->snd_una;
10358c72c65bSEric Dumazet 		oskb = skb;
1036e2080072SEric Dumazet 
1037e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1038e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1039e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1040dfb4b9dcSDavid S. Miller 			else
1041e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1042e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1043e2080072SEric Dumazet 
1044dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1045dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1046dfb4b9dcSDavid S. Miller 	}
10475f6188a8SEric Dumazet 
1048a7a25630SEric Dumazet 	prior_wstamp = tp->tcp_wstamp_ns;
10495f6188a8SEric Dumazet 	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
10505f6188a8SEric Dumazet 
1051d3edd06eSEric Dumazet 	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1052dfb4b9dcSDavid S. Miller 
1053dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1054dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
105533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10561da177e4SLinus Torvalds 
10574de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
105833ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
105933ad798cSAdam Langley 	else
106033ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
106133ad798cSAdam Langley 							   &md5);
106233ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10631da177e4SLinus Torvalds 
1064547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1065b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
106673a6bab5SEric Dumazet 	 * which holds one reference to sk.
1067b2532eb9SEric Dumazet 	 *
1068b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1069b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1070547669d4SEric Dumazet 	 */
1071b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10721da177e4SLinus Torvalds 
107338ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
107438ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
107538ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
107638ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
107738ab52e8SEric Dumazet 	 */
107838ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
107938ab52e8SEric Dumazet 
1080aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1081aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
108246d3ceabSEric Dumazet 
108346d3ceabSEric Dumazet 	skb_orphan(skb);
108446d3ceabSEric Dumazet 	skb->sk = sk;
10851d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1086b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
108714afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10881da177e4SLinus Torvalds 
1089c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1090c3a2e837SJulian Anastasov 
10911da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1092ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1093c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1094c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10951da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10962987babbSYuchung Cheng 	th->ack_seq		= htonl(rcv_nxt);
1097df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10984de075e0SEric Dumazet 					tcb->tcp_flags);
1099dfb4b9dcSDavid S. Miller 
11001da177e4SLinus Torvalds 	th->check		= 0;
11011da177e4SLinus Torvalds 	th->urg_ptr		= 0;
11021da177e4SLinus Torvalds 
110333f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
11047691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
11057691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
11061da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
11071da177e4SLinus Torvalds 			th->urg = 1;
11087691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
11090eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
11107691367dSHerbert Xu 			th->urg = 1;
11117691367dSHerbert Xu 		}
11121da177e4SLinus Torvalds 	}
11131da177e4SLinus Torvalds 
1114bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
111551466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1116ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1117ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1118ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1119ea1627c2SEric Dumazet 	} else {
1120ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1121ea1627c2SEric Dumazet 		 * is never scaled.
1122ea1627c2SEric Dumazet 		 */
1123ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1124ea1627c2SEric Dumazet 	}
1125cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1126cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1127cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1128a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1129bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
113039f8e58eSEric Dumazet 					       md5, sk, skb);
1131cfb6eeb4SYOSHIFUJI Hideaki 	}
1132cfb6eeb4SYOSHIFUJI Hideaki #endif
1133cfb6eeb4SYOSHIFUJI Hideaki 
1134bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11351da177e4SLinus Torvalds 
11364de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
113727cde44aSYuchung Cheng 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
11381da177e4SLinus Torvalds 
1139a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1140cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1141a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1142ba113c3aSWei Wang 		tp->bytes_sent += skb->len - tcp_header_size;
1143a44d6eacSMartin KaFai Lau 	}
11441da177e4SLinus Torvalds 
1145bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1146aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1147aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11481da177e4SLinus Torvalds 
11492efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1150f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1151cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1152f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1153cd7d8498SEric Dumazet 
1154d3edd06eSEric Dumazet 	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1155971f10ecSEric Dumazet 
1156971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1157971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1158971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1159971f10ecSEric Dumazet 
1160b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11617faee5c0SEric Dumazet 
11628c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11635ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11648c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11658c72c65bSEric Dumazet 	}
1166fc225799SEric Dumazet 	if (!err && oskb) {
1167a7a25630SEric Dumazet 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1168fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1169fc225799SEric Dumazet 	}
11708c72c65bSEric Dumazet 	return err;
11711da177e4SLinus Torvalds }
11721da177e4SLinus Torvalds 
11732987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
11742987babbSYuchung Cheng 			    gfp_t gfp_mask)
11752987babbSYuchung Cheng {
11762987babbSYuchung Cheng 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
11772987babbSYuchung Cheng 				  tcp_sk(sk)->rcv_nxt);
11782987babbSYuchung Cheng }
11792987babbSYuchung Cheng 
118067edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11811da177e4SLinus Torvalds  *
11821da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11831da177e4SLinus Torvalds  * otherwise socket can stall.
11841da177e4SLinus Torvalds  */
11851da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11861da177e4SLinus Torvalds {
11871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11901da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1191f4a775d1SEric Dumazet 	__skb_header_release(skb);
1192fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11933ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11943ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11951da177e4SLinus Torvalds }
11961da177e4SLinus Torvalds 
119767edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
11985bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1199f6302d1dSDavid S. Miller {
12004a64fd6cSEric Dumazet 	if (skb->len <= mss_now) {
1201f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1202f6302d1dSDavid S. Miller 		 * non-TSO case.
1203f6302d1dSDavid S. Miller 		 */
1204cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1205f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1206f6302d1dSDavid S. Miller 	} else {
1207cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1208f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
12091da177e4SLinus Torvalds 	}
12101da177e4SLinus Torvalds }
12111da177e4SLinus Torvalds 
1212797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1213797108d1SIlpo Järvinen  * tweaks to fix counters
1214797108d1SIlpo Järvinen  */
1215cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1216797108d1SIlpo Järvinen {
1217797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1218797108d1SIlpo Järvinen 
1219797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1220797108d1SIlpo Järvinen 
1221797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1222797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1223797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1224797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1225797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1226797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1227797108d1SIlpo Järvinen 
1228797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1229797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1230797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1231797108d1SIlpo Järvinen 
1232797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1233797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1234713bafeaSYuchung Cheng 	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1235797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1236797108d1SIlpo Järvinen 
1237797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1238797108d1SIlpo Järvinen }
1239797108d1SIlpo Järvinen 
12400a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12410a2cf20cSSoheil Hassas Yeganeh {
12420a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12430a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12440a2cf20cSSoheil Hassas Yeganeh }
12450a2cf20cSSoheil Hassas Yeganeh 
1246490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1247490cc7d0SWillem de Bruijn {
1248490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1249490cc7d0SWillem de Bruijn 
12500a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1251490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1252490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1253490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1254490cc7d0SWillem de Bruijn 
1255490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1256490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1257490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1258b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1259b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1260490cc7d0SWillem de Bruijn 	}
1261490cc7d0SWillem de Bruijn }
1262490cc7d0SWillem de Bruijn 
1263a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1264a166140eSMartin KaFai Lau {
1265a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1266a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1267a166140eSMartin KaFai Lau }
1268a166140eSMartin KaFai Lau 
126975c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
127075c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
127175c119afSEric Dumazet 					 struct sk_buff *buff,
127275c119afSEric Dumazet 					 struct sock *sk,
127375c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
127475c119afSEric Dumazet {
127575c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
127675c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
127775c119afSEric Dumazet 	else
127875c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
127975c119afSEric Dumazet }
128075c119afSEric Dumazet 
12811da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12821da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12831da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12841da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12851da177e4SLinus Torvalds  */
128675c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
128775c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12886cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12891da177e4SLinus Torvalds {
12901da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12911da177e4SLinus Torvalds 	struct sk_buff *buff;
12926475be16SDavid S. Miller 	int nsize, old_factor;
1293b60b49eaSHerbert Xu 	int nlen;
12949ce01461SIlpo Järvinen 	u8 flags;
12951da177e4SLinus Torvalds 
12962fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12972fceec13SIlpo Järvinen 		return -EINVAL;
12986a438bbeSStephen Hemminger 
12991da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
13001da177e4SLinus Torvalds 	if (nsize < 0)
13011da177e4SLinus Torvalds 		nsize = 0;
13021da177e4SLinus Torvalds 
13036cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
13041da177e4SLinus Torvalds 		return -ENOMEM;
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1307eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
130851456b29SIan Morris 	if (!buff)
13091da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1310ef5cb973SHerbert Xu 
13113ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
13123ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1313b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1314b60b49eaSHerbert Xu 	buff->truesize += nlen;
1315b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13181da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13191da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13201da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13234de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13244de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13254de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1326e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1327a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds 	skb_split(skb, buff, len);
13301da177e4SLinus Torvalds 
133198be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
13321da177e4SLinus Torvalds 
1333a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1334490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13351da177e4SLinus Torvalds 
13366475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13376475be16SDavid S. Miller 
13381da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13395bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13405bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13411da177e4SLinus Torvalds 
1342b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1343b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1344b9f64820SYuchung Cheng 
13456475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13466475be16SDavid S. Miller 	 * adjust the various packet counters.
13476475be16SDavid S. Miller 	 */
1348cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13496475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13506475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13511da177e4SLinus Torvalds 
1352797108d1SIlpo Järvinen 		if (diff)
1353797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13541da177e4SLinus Torvalds 	}
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1357f4a775d1SEric Dumazet 	__skb_header_release(buff);
135875c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1359f67971e6SEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1360e2080072SEric Dumazet 		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13611da177e4SLinus Torvalds 
13621da177e4SLinus Torvalds 	return 0;
13631da177e4SLinus Torvalds }
13641da177e4SLinus Torvalds 
1365f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1366f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13671da177e4SLinus Torvalds  */
13687162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13691da177e4SLinus Torvalds {
13707b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13711da177e4SLinus Torvalds 	int i, k, eat;
13721da177e4SLinus Torvalds 
13734fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13744fa48bf3SEric Dumazet 	if (eat) {
13754fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13764fa48bf3SEric Dumazet 		len -= eat;
13774fa48bf3SEric Dumazet 		if (!len)
13787162fb24SEric Dumazet 			return 0;
13794fa48bf3SEric Dumazet 	}
13801da177e4SLinus Torvalds 	eat = len;
13811da177e4SLinus Torvalds 	k = 0;
13827b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13837b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13847b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
13859e903e08SEric Dumazet 
13869e903e08SEric Dumazet 		if (size <= eat) {
1387aff65da0SIan Campbell 			skb_frag_unref(skb, i);
13889e903e08SEric Dumazet 			eat -= size;
13891da177e4SLinus Torvalds 		} else {
13907b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
13911da177e4SLinus Torvalds 			if (eat) {
13927b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
13937b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
13941da177e4SLinus Torvalds 				eat = 0;
13951da177e4SLinus Torvalds 			}
13961da177e4SLinus Torvalds 			k++;
13971da177e4SLinus Torvalds 		}
13981da177e4SLinus Torvalds 	}
13997b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 	skb->data_len -= len;
14021da177e4SLinus Torvalds 	skb->len = skb->data_len;
14037162fb24SEric Dumazet 	return len;
14041da177e4SLinus Torvalds }
14051da177e4SLinus Torvalds 
140667edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14071da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14081da177e4SLinus Torvalds {
14097162fb24SEric Dumazet 	u32 delta_truesize;
14107162fb24SEric Dumazet 
141114bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14121da177e4SLinus Torvalds 		return -ENOMEM;
14131da177e4SLinus Torvalds 
14147162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
141784fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14181da177e4SLinus Torvalds 
14197162fb24SEric Dumazet 	if (delta_truesize) {
14207162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14217162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14227162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14231da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14247162fb24SEric Dumazet 	}
14251da177e4SLinus Torvalds 
14265b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14271da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14285bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds 	return 0;
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds 
14331b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14341b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14355d424d5aSJohn Heffner {
1436cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1437cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14385d424d5aSJohn Heffner 	int mss_now;
14395d424d5aSJohn Heffner 
14405d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14415d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14425d424d5aSJohn Heffner 	 */
14435d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14445d424d5aSJohn Heffner 
144567469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
144667469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
144767469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
144867469601SEric Dumazet 
144967469601SEric Dumazet 		if (dst && dst_allfrag(dst))
145067469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
145167469601SEric Dumazet 	}
145267469601SEric Dumazet 
14535d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14545d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14555d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14565d424d5aSJohn Heffner 
14575d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14585d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14595d424d5aSJohn Heffner 
14605d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14615d424d5aSJohn Heffner 	if (mss_now < 48)
14625d424d5aSJohn Heffner 		mss_now = 48;
14635d424d5aSJohn Heffner 	return mss_now;
14645d424d5aSJohn Heffner }
14655d424d5aSJohn Heffner 
14661b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14671b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14681b63edd6SYuchung Cheng {
14691b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14701b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14711b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14721b63edd6SYuchung Cheng }
14731b63edd6SYuchung Cheng 
14745d424d5aSJohn Heffner /* Inverse of above */
147567469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14765d424d5aSJohn Heffner {
1477cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1478cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14795d424d5aSJohn Heffner 	int mtu;
14805d424d5aSJohn Heffner 
14815d424d5aSJohn Heffner 	mtu = mss +
14825d424d5aSJohn Heffner 	      tp->tcp_header_len +
14835d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14845d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14855d424d5aSJohn Heffner 
148667469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
148767469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
148867469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
148967469601SEric Dumazet 
149067469601SEric Dumazet 		if (dst && dst_allfrag(dst))
149167469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
149267469601SEric Dumazet 	}
14935d424d5aSJohn Heffner 	return mtu;
14945d424d5aSJohn Heffner }
1495556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
14965d424d5aSJohn Heffner 
149767edfef7SAndi Kleen /* MTU probing init per socket */
14985d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
14995d424d5aSJohn Heffner {
15005d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15015d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1502b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15035d424d5aSJohn Heffner 
1504b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15055d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15065d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1507b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15085d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
150905cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1510c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15115d424d5aSJohn Heffner }
15124bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15135d424d5aSJohn Heffner 
15141da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15171da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1520caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15211da177e4SLinus Torvalds    It also does not include TCP options.
15221da177e4SLinus Torvalds 
1523d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15261da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15271da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15281da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15311da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15321da177e4SLinus Torvalds 
1533d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1534d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15351da177e4SLinus Torvalds  */
15361da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15371da177e4SLinus Torvalds {
15381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1539d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15405d424d5aSJohn Heffner 	int mss_now;
15411da177e4SLinus Torvalds 
15425d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15435d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15441da177e4SLinus Torvalds 
15455d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1546409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 	/* And store cached results */
1549d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15505d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15515d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1552c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	return mss_now;
15551da177e4SLinus Torvalds }
15564bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15591da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15601da177e4SLinus Torvalds  */
15610c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15621da177e4SLinus Torvalds {
1563cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1564cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1565c1b4a7e6SDavid S. Miller 	u32 mss_now;
156695c96174SEric Dumazet 	unsigned int header_len;
156733ad798cSAdam Langley 	struct tcp_out_options opts;
156833ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15691da177e4SLinus Torvalds 
1570c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1571c1b4a7e6SDavid S. Miller 
15721da177e4SLinus Torvalds 	if (dst) {
15731da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1574d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15751da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15761da177e4SLinus Torvalds 	}
15771da177e4SLinus Torvalds 
157833ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
157933ad798cSAdam Langley 		     sizeof(struct tcphdr);
158033ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
158133ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
158233ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
158333ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
158433ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
158533ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
158633ad798cSAdam Langley 		mss_now -= delta;
158733ad798cSAdam Langley 	}
1588cfb6eeb4SYOSHIFUJI Hideaki 
15891da177e4SLinus Torvalds 	return mss_now;
15901da177e4SLinus Torvalds }
15911da177e4SLinus Torvalds 
159286fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
159386fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
159486fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
159586fd14adSWeiping Pan  */
159686fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1597a762a980SDavid S. Miller {
15989e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1599a762a980SDavid S. Miller 
160086fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
160186fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
160286fd14adSWeiping Pan 		/* Limited by application or receiver window. */
160386fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
160486fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
160586fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
160686fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
160786fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
160886fd14adSWeiping Pan 		}
160986fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
161086fd14adSWeiping Pan 	}
1611c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
161286fd14adSWeiping Pan }
161386fd14adSWeiping Pan 
1614ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1615a762a980SDavid S. Miller {
16161b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1617a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1618a762a980SDavid S. Miller 
1619ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1620ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1621ca8a2263SNeal Cardwell 	 */
1622ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1623ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1624ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1625ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1626ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1627ca8a2263SNeal Cardwell 	}
1628e114a710SEric Dumazet 
162924901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1630a762a980SDavid S. Miller 		/* Network is feed fully. */
1631a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1632c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1633a762a980SDavid S. Miller 	} else {
1634a762a980SDavid S. Miller 		/* Network starves. */
1635a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1636a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1637a762a980SDavid S. Miller 
1638b510f0d2SEric Dumazet 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1639c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16401b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1641a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1642b0f71bd3SFrancis Yan 
1643b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1644b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1645b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1646b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
164775c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1648b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1649b0f71bd3SFrancis Yan 		 */
165075c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1651b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1652b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1653b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1654a762a980SDavid S. Miller 	}
1655a762a980SDavid S. Miller }
1656a762a980SDavid S. Miller 
1657d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1658d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1659d4589926SEric Dumazet {
1660d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1661d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1662d4589926SEric Dumazet }
1663d4589926SEric Dumazet 
1664d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1665d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1666d4589926SEric Dumazet  * The test is really :
1667d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1668d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1669d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1670d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16710e3a4803SIlpo Järvinen  */
1672d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1673d4589926SEric Dumazet 				const struct sk_buff *skb)
1674d4589926SEric Dumazet {
1675d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1676d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1677d4589926SEric Dumazet }
1678d4589926SEric Dumazet 
1679d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1680d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1681d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1682d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1683d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1684d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1685d4589926SEric Dumazet  */
1686d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1687cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1688d4589926SEric Dumazet {
1689d4589926SEric Dumazet 	return partial &&
1690d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1691d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1692d4589926SEric Dumazet }
1693605ad7f1SEric Dumazet 
1694605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1695605ad7f1SEric Dumazet  * to send one TSO packet per ms
1696605ad7f1SEric Dumazet  */
1697dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
16981b3878caSNeal Cardwell 			    int min_tso_segs)
1699605ad7f1SEric Dumazet {
1700605ad7f1SEric Dumazet 	u32 bytes, segs;
1701605ad7f1SEric Dumazet 
170276a9ebe8SEric Dumazet 	bytes = min_t(unsigned long,
170376a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift,
1704605ad7f1SEric Dumazet 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1705605ad7f1SEric Dumazet 
1706605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1707605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1708605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1709605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1710605ad7f1SEric Dumazet 	 */
17111b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1712605ad7f1SEric Dumazet 
1713350c9f48SEric Dumazet 	return segs;
1714605ad7f1SEric Dumazet }
1715605ad7f1SEric Dumazet 
1716ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1717ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1718ed6e7268SNeal Cardwell  */
1719ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1720ed6e7268SNeal Cardwell {
1721ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1722dcb8c9b4SEric Dumazet 	u32 min_tso, tso_segs;
1723ed6e7268SNeal Cardwell 
1724dcb8c9b4SEric Dumazet 	min_tso = ca_ops->min_tso_segs ?
1725dcb8c9b4SEric Dumazet 			ca_ops->min_tso_segs(sk) :
1726dcb8c9b4SEric Dumazet 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1727dcb8c9b4SEric Dumazet 
1728dcb8c9b4SEric Dumazet 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1729350c9f48SEric Dumazet 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1730ed6e7268SNeal Cardwell }
1731ed6e7268SNeal Cardwell 
1732d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1733d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1734d4589926SEric Dumazet 					const struct sk_buff *skb,
1735d4589926SEric Dumazet 					unsigned int mss_now,
1736d4589926SEric Dumazet 					unsigned int max_segs,
1737d4589926SEric Dumazet 					int nonagle)
1738c1b4a7e6SDavid S. Miller {
1739cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1740d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1741c1b4a7e6SDavid S. Miller 
174290840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17431485348dSBen Hutchings 	max_len = mss_now * max_segs;
17440e3a4803SIlpo Järvinen 
17451485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17461485348dSBen Hutchings 		return max_len;
17470e3a4803SIlpo Järvinen 
17485ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17495ea3a748SIlpo Järvinen 
17501485348dSBen Hutchings 	if (max_len <= needed)
17511485348dSBen Hutchings 		return max_len;
17520e3a4803SIlpo Järvinen 
1753d4589926SEric Dumazet 	partial = needed % mss_now;
1754d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1755d4589926SEric Dumazet 	 * to include this last segment in this skb.
1756d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1757d4589926SEric Dumazet 	 */
1758cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1759d4589926SEric Dumazet 		return needed - partial;
1760d4589926SEric Dumazet 
1761d4589926SEric Dumazet 	return needed;
1762c1b4a7e6SDavid S. Miller }
1763c1b4a7e6SDavid S. Miller 
1764c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1765c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1766c1b4a7e6SDavid S. Miller  */
1767cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1768cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1769c1b4a7e6SDavid S. Miller {
1770d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1771c1b4a7e6SDavid S. Miller 
1772c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17734de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17744de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1775c1b4a7e6SDavid S. Miller 		return 1;
1776c1b4a7e6SDavid S. Miller 
1777c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1778c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1779d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1780c1b4a7e6SDavid S. Miller 		return 0;
1781d649a7a8SEric Dumazet 
1782d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1783d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1784d649a7a8SEric Dumazet 	 */
1785d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1786d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1787c1b4a7e6SDavid S. Miller }
1788c1b4a7e6SDavid S. Miller 
1789b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
179067edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1791c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1792c1b4a7e6SDavid S. Miller  */
17935bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1794c1b4a7e6SDavid S. Miller {
1795c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1796c1b4a7e6SDavid S. Miller 
1797f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
17985bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1799c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1800c1b4a7e6SDavid S. Miller 	}
1801c1b4a7e6SDavid S. Miller 	return tso_segs;
1802c1b4a7e6SDavid S. Miller }
1803c1b4a7e6SDavid S. Miller 
1804c1b4a7e6SDavid S. Miller 
1805a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1806c1b4a7e6SDavid S. Miller  * sent now.
1807c1b4a7e6SDavid S. Miller  */
1808a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1809c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1810c1b4a7e6SDavid S. Miller {
1811c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1812c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1813c1b4a7e6SDavid S. Miller 	 *
1814c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1815c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1816c1b4a7e6SDavid S. Miller 	 */
1817c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1818a2a385d6SEric Dumazet 		return true;
1819c1b4a7e6SDavid S. Miller 
18209b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18219b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1822a2a385d6SEric Dumazet 		return true;
1823c1b4a7e6SDavid S. Miller 
1824cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1825a2a385d6SEric Dumazet 		return true;
1826c1b4a7e6SDavid S. Miller 
1827a2a385d6SEric Dumazet 	return false;
1828c1b4a7e6SDavid S. Miller }
1829c1b4a7e6SDavid S. Miller 
1830c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1831a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1832a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1833056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1834c1b4a7e6SDavid S. Miller {
1835c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1836c1b4a7e6SDavid S. Miller 
1837c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1838c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1839c1b4a7e6SDavid S. Miller 
184090840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1841c1b4a7e6SDavid S. Miller }
1842c1b4a7e6SDavid S. Miller 
1843c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1844c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1845c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1846c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1847c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1848c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1849c1b4a7e6SDavid S. Miller  */
185075c119afSEric Dumazet static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
185175c119afSEric Dumazet 			struct sk_buff *skb, unsigned int len,
1852c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1853c1b4a7e6SDavid S. Miller {
1854c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1855c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
18569ce01461SIlpo Järvinen 	u8 flags;
1857c1b4a7e6SDavid S. Miller 
1858c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1859c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
186075c119afSEric Dumazet 		return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
1861c1b4a7e6SDavid S. Miller 
1862eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
186351456b29SIan Morris 	if (unlikely(!buff))
1864c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1865c1b4a7e6SDavid S. Miller 
18663ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18673ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1868b60b49eaSHerbert Xu 	buff->truesize += nlen;
1869c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1870c1b4a7e6SDavid S. Miller 
1871c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1872c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1873c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1874c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1875c1b4a7e6SDavid S. Miller 
1876c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18774de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18784de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18794de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1880c1b4a7e6SDavid S. Miller 
1881c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1882c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1883c1b4a7e6SDavid S. Miller 
1884a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1885a166140eSMartin KaFai Lau 
188698be9b12SEric Dumazet 	buff->ip_summed = CHECKSUM_PARTIAL;
1887c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1888490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1889c1b4a7e6SDavid S. Miller 
1890c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
18915bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
18925bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1893c1b4a7e6SDavid S. Miller 
1894c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1895f4a775d1SEric Dumazet 	__skb_header_release(buff);
189675c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1897c1b4a7e6SDavid S. Miller 
1898c1b4a7e6SDavid S. Miller 	return 0;
1899c1b4a7e6SDavid S. Miller }
1900c1b4a7e6SDavid S. Miller 
1901c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1902c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1903c1b4a7e6SDavid S. Miller  *
1904c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1905c1b4a7e6SDavid S. Miller  */
1906ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907605ad7f1SEric Dumazet 				 bool *is_cwnd_limited, u32 max_segs)
1908c1b4a7e6SDavid S. Miller {
19096687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
191050c8339eSEric Dumazet 	u32 age, send_win, cong_win, limit, in_flight;
191150c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
191250c8339eSEric Dumazet 	struct sk_buff *head;
1913ad9f4f50SEric Dumazet 	int win_divisor;
1914c1b4a7e6SDavid S. Miller 
19154de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916ae8064acSJohn Heffner 		goto send_now;
1917c1b4a7e6SDavid S. Miller 
191899d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1919ae8064acSJohn Heffner 		goto send_now;
1920ae8064acSJohn Heffner 
19215f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
19225f852eb5SEric Dumazet 	 * only if the last write was recent.
19235f852eb5SEric Dumazet 	 */
1924d635fbe2SEric Dumazet 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
1925ae8064acSJohn Heffner 		goto send_now;
1926908a75c1SDavid S. Miller 
1927c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1928c1b4a7e6SDavid S. Miller 
1929c8c9aeb5SStefano Brivio 	BUG_ON(tcp_skb_pcount(skb) <= 1);
1930c8c9aeb5SStefano Brivio 	BUG_ON(tp->snd_cwnd <= in_flight);
1931c1b4a7e6SDavid S. Miller 
193290840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1933c1b4a7e6SDavid S. Miller 
1934c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1935c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1936c1b4a7e6SDavid S. Miller 
1937c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1938c1b4a7e6SDavid S. Miller 
1939ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1940605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1941ae8064acSJohn Heffner 		goto send_now;
1942ba244fe9SDavid S. Miller 
194362ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
194462ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
194562ad2761SIlpo Järvinen 		goto send_now;
194662ad2761SIlpo Järvinen 
19475bbcc0f5SLinus Torvalds 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1948ad9f4f50SEric Dumazet 	if (win_divisor) {
1949c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1950c1b4a7e6SDavid S. Miller 
1951c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1952c1b4a7e6SDavid S. Miller 		 * just use it.
1953c1b4a7e6SDavid S. Miller 		 */
1954ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1955c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1956ae8064acSJohn Heffner 			goto send_now;
1957c1b4a7e6SDavid S. Miller 	} else {
1958c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1959c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1960c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1961c1b4a7e6SDavid S. Miller 		 * then send now.
1962c1b4a7e6SDavid S. Miller 		 */
19636b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1964ae8064acSJohn Heffner 			goto send_now;
1965c1b4a7e6SDavid S. Miller 	}
1966c1b4a7e6SDavid S. Miller 
196775c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
196875c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
196975c119afSEric Dumazet 	if (!head)
197075c119afSEric Dumazet 		goto send_now;
19712fd66ffbSEric Dumazet 	age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
197250c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
197350c8339eSEric Dumazet 	if (age < (tp->srtt_us >> 4))
197450c8339eSEric Dumazet 		goto send_now;
197550c8339eSEric Dumazet 
19765f852eb5SEric Dumazet 	/* Ok, it looks like it is advisable to defer. */
1977ae8064acSJohn Heffner 
1978d2e1339fSBendik Rønning Opstad 	if (cong_win < send_win && cong_win <= skb->len)
1979ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1980ca8a2263SNeal Cardwell 
1981a2a385d6SEric Dumazet 	return true;
1982ae8064acSJohn Heffner 
1983ae8064acSJohn Heffner send_now:
1984a2a385d6SEric Dumazet 	return false;
1985c1b4a7e6SDavid S. Miller }
1986c1b4a7e6SDavid S. Miller 
198705cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
198805cbc0dbSFan Du {
198905cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
199005cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
199105cbc0dbSFan Du 	struct net *net = sock_net(sk);
199205cbc0dbSFan Du 	u32 interval;
199305cbc0dbSFan Du 	s32 delta;
199405cbc0dbSFan Du 
199505cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
1996c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
199705cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
199805cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
199905cbc0dbSFan Du 
200005cbc0dbSFan Du 		/* Update current search range */
200105cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
200205cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
200305cbc0dbSFan Du 			sizeof(struct tcphdr) +
200405cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
200505cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
200605cbc0dbSFan Du 
200705cbc0dbSFan Du 		/* Update probe time stamp */
2008c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
200905cbc0dbSFan Du 	}
201005cbc0dbSFan Du }
201105cbc0dbSFan Du 
2012808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2013808cf9e3SIlya Lesokhin {
2014808cf9e3SIlya Lesokhin 	struct sk_buff *skb, *next;
2015808cf9e3SIlya Lesokhin 
2016808cf9e3SIlya Lesokhin 	skb = tcp_send_head(sk);
2017808cf9e3SIlya Lesokhin 	tcp_for_write_queue_from_safe(skb, next, sk) {
2018808cf9e3SIlya Lesokhin 		if (len <= skb->len)
2019808cf9e3SIlya Lesokhin 			break;
2020808cf9e3SIlya Lesokhin 
2021808cf9e3SIlya Lesokhin 		if (unlikely(TCP_SKB_CB(skb)->eor))
2022808cf9e3SIlya Lesokhin 			return false;
2023808cf9e3SIlya Lesokhin 
2024808cf9e3SIlya Lesokhin 		len -= skb->len;
2025808cf9e3SIlya Lesokhin 	}
2026808cf9e3SIlya Lesokhin 
2027808cf9e3SIlya Lesokhin 	return true;
2028808cf9e3SIlya Lesokhin }
2029808cf9e3SIlya Lesokhin 
20305d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
203167edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
203267edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
203367edfef7SAndi Kleen  * changes resulting in larger path MTUs.
203467edfef7SAndi Kleen  *
20355d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20365d424d5aSJohn Heffner  *         1 if a probe was sent,
2037056834d9SIlpo Järvinen  *         -1 otherwise
2038056834d9SIlpo Järvinen  */
20395d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20405d424d5aSJohn Heffner {
20415d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
204212a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20435d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20446b58e0a5SFan Du 	struct net *net = sock_net(sk);
20455d424d5aSJohn Heffner 	int probe_size;
204691cc17c0SIlpo Järvinen 	int size_needed;
204712a59abcSEric Dumazet 	int copy, len;
20485d424d5aSJohn Heffner 	int mss_now;
20496b58e0a5SFan Du 	int interval;
20505d424d5aSJohn Heffner 
20515d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20525d424d5aSJohn Heffner 	 * not in recovery,
20535d424d5aSJohn Heffner 	 * have enough cwnd, and
205412a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
205512a59abcSEric Dumazet 	 */
205612a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20575d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20585d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20595d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
206012a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20615d424d5aSJohn Heffner 		return -1;
20625d424d5aSJohn Heffner 
20636b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20646b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20656b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20666b58e0a5SFan Du 	 */
20670c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
20686b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
20696b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
207091cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
20716b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
207205cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
207305cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
207405cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
207505cbc0dbSFan Du 	 */
20766b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
207705cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
207805cbc0dbSFan Du 		/* Check whether enough time has elaplased for
207905cbc0dbSFan Du 		 * another round of probing.
208005cbc0dbSFan Du 		 */
208105cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
20825d424d5aSJohn Heffner 		return -1;
20835d424d5aSJohn Heffner 	}
20845d424d5aSJohn Heffner 
20855d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
20867f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
20875d424d5aSJohn Heffner 		return -1;
20885d424d5aSJohn Heffner 
208991cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
20905d424d5aSJohn Heffner 		return -1;
209190840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
20925d424d5aSJohn Heffner 		return 0;
20935d424d5aSJohn Heffner 
2094d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2095d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2096d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
20975d424d5aSJohn Heffner 			return -1;
20985d424d5aSJohn Heffner 		else
20995d424d5aSJohn Heffner 			return 0;
21005d424d5aSJohn Heffner 	}
21015d424d5aSJohn Heffner 
2102808cf9e3SIlya Lesokhin 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2103808cf9e3SIlya Lesokhin 		return -1;
2104808cf9e3SIlya Lesokhin 
21055d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2106eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
210751456b29SIan Morris 	if (!nskb)
21085d424d5aSJohn Heffner 		return -1;
21093ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
21103ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
21115d424d5aSJohn Heffner 
2112fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
21135d424d5aSJohn Heffner 
21145d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
21155d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
21164de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
21175d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
21185d424d5aSJohn Heffner 	nskb->csum = 0;
211998be9b12SEric Dumazet 	nskb->ip_summed = CHECKSUM_PARTIAL;
21205d424d5aSJohn Heffner 
212150c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
21222b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, skb, nskb);
212350c4817eSIlpo Järvinen 
21245d424d5aSJohn Heffner 	len = 0;
2125234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
21265d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
21275d424d5aSJohn Heffner 		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21285d424d5aSJohn Heffner 
21295d424d5aSJohn Heffner 		if (skb->len <= copy) {
21305d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21315d424d5aSJohn Heffner 			 * Throw it away. */
21324de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2133808cf9e3SIlya Lesokhin 			/* If this is the last SKB we copy and eor is set
2134808cf9e3SIlya Lesokhin 			 * we need to propagate it to the new skb.
2135808cf9e3SIlya Lesokhin 			 */
2136808cf9e3SIlya Lesokhin 			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2137fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21383ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21395d424d5aSJohn Heffner 		} else {
21404de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2141a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21425d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21435d424d5aSJohn Heffner 				skb_pull(skb, copy);
21445d424d5aSJohn Heffner 			} else {
21455d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21465bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21475d424d5aSJohn Heffner 			}
21485d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21495d424d5aSJohn Heffner 		}
21505d424d5aSJohn Heffner 
21515d424d5aSJohn Heffner 		len += copy;
2152234b6860SIlpo Järvinen 
2153234b6860SIlpo Järvinen 		if (len >= probe_size)
2154234b6860SIlpo Järvinen 			break;
21555d424d5aSJohn Heffner 	}
21565bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21575d424d5aSJohn Heffner 
21585d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21597faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21607faee5c0SEric Dumazet 	 */
21615d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21625d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21635d424d5aSJohn Heffner 		 * effectively two packets. */
21645d424d5aSJohn Heffner 		tp->snd_cwnd--;
216566f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21665d424d5aSJohn Heffner 
21675d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
21680e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
21690e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
21705d424d5aSJohn Heffner 
21715d424d5aSJohn Heffner 		return 1;
21725d424d5aSJohn Heffner 	}
21735d424d5aSJohn Heffner 
21745d424d5aSJohn Heffner 	return -1;
21755d424d5aSJohn Heffner }
21765d424d5aSJohn Heffner 
2177864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk)
2178218af599SEric Dumazet {
2179864e5c09SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
2180864e5c09SEric Dumazet 
2181864e5c09SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
2182864e5c09SEric Dumazet 		return false;
2183864e5c09SEric Dumazet 
2184864e5c09SEric Dumazet 	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2185864e5c09SEric Dumazet 		return false;
2186864e5c09SEric Dumazet 
2187864e5c09SEric Dumazet 	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2188864e5c09SEric Dumazet 		hrtimer_start(&tp->pacing_timer,
2189864e5c09SEric Dumazet 			      ns_to_ktime(tp->tcp_wstamp_ns),
2190864e5c09SEric Dumazet 			      HRTIMER_MODE_ABS_PINNED_SOFT);
2191864e5c09SEric Dumazet 		sock_hold(sk);
2192864e5c09SEric Dumazet 	}
2193864e5c09SEric Dumazet 	return true;
2194218af599SEric Dumazet }
2195218af599SEric Dumazet 
2196f9616c35SEric Dumazet /* TCP Small Queues :
2197f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2198f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2199f9616c35SEric Dumazet  * This allows for :
2200f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2201f9616c35SEric Dumazet  *  - faster recovery
2202f9616c35SEric Dumazet  *  - high rates
2203f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2204f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2205f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2206f9616c35SEric Dumazet  */
2207f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2208f9616c35SEric Dumazet 				  unsigned int factor)
2209f9616c35SEric Dumazet {
221076a9ebe8SEric Dumazet 	unsigned long limit;
2211f9616c35SEric Dumazet 
221276a9ebe8SEric Dumazet 	limit = max_t(unsigned long,
221376a9ebe8SEric Dumazet 		      2 * skb->truesize,
221476a9ebe8SEric Dumazet 		      sk->sk_pacing_rate >> sk->sk_pacing_shift);
221576a9ebe8SEric Dumazet 	limit = min_t(unsigned long, limit,
22169184d8bbSEric Dumazet 		      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2217f9616c35SEric Dumazet 	limit <<= factor;
2218f9616c35SEric Dumazet 
221914afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
222075c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
222175eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
222275eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
222375eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
222475eefc6cSEric Dumazet 		 */
222575c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
222675eefc6cSEric Dumazet 			return false;
222775eefc6cSEric Dumazet 
22287aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2229f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2230f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2231f9616c35SEric Dumazet 		 * test again the condition.
2232f9616c35SEric Dumazet 		 */
2233f9616c35SEric Dumazet 		smp_mb__after_atomic();
223414afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2235f9616c35SEric Dumazet 			return true;
2236f9616c35SEric Dumazet 	}
2237f9616c35SEric Dumazet 	return false;
2238f9616c35SEric Dumazet }
2239f9616c35SEric Dumazet 
224005b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
224105b055e8SFrancis Yan {
2242628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2243efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
224405b055e8SFrancis Yan 
2245efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2246efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
224705b055e8SFrancis Yan 	tp->chrono_start = now;
224805b055e8SFrancis Yan 	tp->chrono_type = new;
224905b055e8SFrancis Yan }
225005b055e8SFrancis Yan 
225105b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
225205b055e8SFrancis Yan {
225305b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
225405b055e8SFrancis Yan 
225505b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
22560f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
22570f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
225805b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
225905b055e8SFrancis Yan 	 */
226005b055e8SFrancis Yan 	if (type > tp->chrono_type)
226105b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
226205b055e8SFrancis Yan }
226305b055e8SFrancis Yan 
226405b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
226505b055e8SFrancis Yan {
226605b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
226705b055e8SFrancis Yan 
22680f87230dSFrancis Yan 
22690f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
22700f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
22710f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
22720f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
22730f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
22740f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
22750f87230dSFrancis Yan 	 */
227675c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
227705b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
22780f87230dSFrancis Yan 	else if (type == tp->chrono_type)
22790f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
228005b055e8SFrancis Yan }
228105b055e8SFrancis Yan 
22821da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
22831da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
22841da177e4SLinus Torvalds  * window for us.
22851da177e4SLinus Torvalds  *
2286f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2287f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2288f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2289f8269a49SIlpo Järvinen  *
22906ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
22916ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
22926ba8a3b1SNandita Dukkipati 
2293a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2294a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
22951da177e4SLinus Torvalds  */
2296a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2297d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
22981da177e4SLinus Torvalds {
22991da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
230092df7b51SDavid S. Miller 	struct sk_buff *skb;
2301c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2302c1b4a7e6SDavid S. Miller 	int cwnd_quota;
23035d424d5aSJohn Heffner 	int result;
23045615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2305605ad7f1SEric Dumazet 	u32 max_segs;
23061da177e4SLinus Torvalds 
2307c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
23085d424d5aSJohn Heffner 
2309ee1836aeSEric Dumazet 	tcp_mstamp_refresh(tp);
2310d5dd9175SIlpo Järvinen 	if (!push_one) {
23115d424d5aSJohn Heffner 		/* Do MTU probing. */
2312d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2313d5dd9175SIlpo Järvinen 		if (!result) {
2314a2a385d6SEric Dumazet 			return false;
23155d424d5aSJohn Heffner 		} else if (result > 0) {
23165d424d5aSJohn Heffner 			sent_pkts = 1;
23175d424d5aSJohn Heffner 		}
2318d5dd9175SIlpo Järvinen 	}
23195d424d5aSJohn Heffner 
2320ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
2321fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2322c8ac3774SHerbert Xu 		unsigned int limit;
2323c8ac3774SHerbert Xu 
232479861919SEric Dumazet 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
232579861919SEric Dumazet 			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
232679861919SEric Dumazet 			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
232779861919SEric Dumazet 			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
232879861919SEric Dumazet 			goto repair; /* Skip network transmission */
232979861919SEric Dumazet 		}
233079861919SEric Dumazet 
2331218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2332218af599SEric Dumazet 			break;
2333218af599SEric Dumazet 
23345bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2335c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2336c1b4a7e6SDavid S. Miller 
2337b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
23386ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
23396ba8a3b1SNandita Dukkipati 			if (push_one == 2)
23406ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
23416ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23426ba8a3b1SNandita Dukkipati 			else
2343b68e9f85SHerbert Xu 				break;
23446ba8a3b1SNandita Dukkipati 		}
2345b68e9f85SHerbert Xu 
23465615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23475615f886SFrancis Yan 			is_rwnd_limited = true;
2348b68e9f85SHerbert Xu 			break;
23495615f886SFrancis Yan 		}
2350b68e9f85SHerbert Xu 
2351d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2352aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2353aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2354aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2355aa93466bSDavid S. Miller 				break;
2356c1b4a7e6SDavid S. Miller 		} else {
2357ca8a2263SNeal Cardwell 			if (!push_one &&
2358605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2359605ad7f1SEric Dumazet 						 max_segs))
2360aa93466bSDavid S. Miller 				break;
2361c1b4a7e6SDavid S. Miller 		}
2362aa93466bSDavid S. Miller 
2363605ad7f1SEric Dumazet 		limit = mss_now;
2364d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2365605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2366605ad7f1SEric Dumazet 						    min_t(unsigned int,
2367605ad7f1SEric Dumazet 							  cwnd_quota,
2368605ad7f1SEric Dumazet 							  max_segs),
2369605ad7f1SEric Dumazet 						    nonagle);
2370605ad7f1SEric Dumazet 
2371605ad7f1SEric Dumazet 		if (skb->len > limit &&
237275c119afSEric Dumazet 		    unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
237375c119afSEric Dumazet 					  skb, limit, mss_now, gfp)))
2374605ad7f1SEric Dumazet 			break;
2375605ad7f1SEric Dumazet 
2376f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
237746d3ceabSEric Dumazet 			break;
2378c9eeec26SEric Dumazet 
2379d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
23801da177e4SLinus Torvalds 			break;
23811da177e4SLinus Torvalds 
2382ec342325SAndrew Vagin repair:
23831da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
23841da177e4SLinus Torvalds 		 * This call will increment packets_out.
23851da177e4SLinus Torvalds 		 */
238666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2389a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2390d5dd9175SIlpo Järvinen 
2391d5dd9175SIlpo Järvinen 		if (push_one)
2392d5dd9175SIlpo Järvinen 			break;
23931da177e4SLinus Torvalds 	}
23941da177e4SLinus Torvalds 
23955615f886SFrancis Yan 	if (is_rwnd_limited)
23965615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
23975615f886SFrancis Yan 	else
23985615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
23995615f886SFrancis Yan 
2400aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2401684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2402684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
24036ba8a3b1SNandita Dukkipati 
24046ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
24056ba8a3b1SNandita Dukkipati 		if (push_one != 2)
2406ed66dfafSNeal Cardwell 			tcp_schedule_loss_probe(sk, false);
2407d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2408ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2409a2a385d6SEric Dumazet 		return false;
24101da177e4SLinus Torvalds 	}
241175c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
24126ba8a3b1SNandita Dukkipati }
24136ba8a3b1SNandita Dukkipati 
2414ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
24156ba8a3b1SNandita Dukkipati {
24166ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
24176ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2418a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
24192ae21cf5SEric Dumazet 	int early_retrans;
24206ba8a3b1SNandita Dukkipati 
24216ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
24226ba8a3b1SNandita Dukkipati 	 * finishes.
24236ba8a3b1SNandita Dukkipati 	 */
2424f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
24256ba8a3b1SNandita Dukkipati 		return false;
24266ba8a3b1SNandita Dukkipati 
24272ae21cf5SEric Dumazet 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
24286ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
2429b4f70c3dSNeal Cardwell 	 * not in loss recovery, that are either limited by cwnd or application.
24306ba8a3b1SNandita Dukkipati 	 */
24312ae21cf5SEric Dumazet 	if ((early_retrans != 3 && early_retrans != 4) ||
2432bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2433b4f70c3dSNeal Cardwell 	    (icsk->icsk_ca_state != TCP_CA_Open &&
2434b4f70c3dSNeal Cardwell 	     icsk->icsk_ca_state != TCP_CA_CWR))
24356ba8a3b1SNandita Dukkipati 		return false;
24366ba8a3b1SNandita Dukkipati 
2437bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2438f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2439f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24406ba8a3b1SNandita Dukkipati 	 */
2441bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2442bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24436ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2444bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2445bb4d991aSYuchung Cheng 		else
2446bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2447bb4d991aSYuchung Cheng 	} else {
2448bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2449bb4d991aSYuchung Cheng 	}
24506ba8a3b1SNandita Dukkipati 
2451a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2452ed66dfafSNeal Cardwell 	rto_delta_us = advancing_rto ?
2453ed66dfafSNeal Cardwell 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2454ed66dfafSNeal Cardwell 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2455a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2456a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
24576ba8a3b1SNandita Dukkipati 
24583f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
24593f80e08fSEric Dumazet 			     TCP_RTO_MAX, NULL);
24606ba8a3b1SNandita Dukkipati 	return true;
24616ba8a3b1SNandita Dukkipati }
24626ba8a3b1SNandita Dukkipati 
24631f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
24641f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
24651f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
24661f3279aeSEric Dumazet  */
24671f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
24681f3279aeSEric Dumazet 				    const struct sk_buff *skb)
24691f3279aeSEric Dumazet {
247039bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2471c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
24721f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
24731f3279aeSEric Dumazet 		return true;
24741f3279aeSEric Dumazet 	}
24751f3279aeSEric Dumazet 	return false;
24761f3279aeSEric Dumazet }
24771f3279aeSEric Dumazet 
2478b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
24796ba8a3b1SNandita Dukkipati  * retransmit the last segment.
24806ba8a3b1SNandita Dukkipati  */
24816ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
24826ba8a3b1SNandita Dukkipati {
24839b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
24846ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
24856ba8a3b1SNandita Dukkipati 	int pcount;
24866ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
24876ba8a3b1SNandita Dukkipati 
2488b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
248975c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2490b340b264SYuchung Cheng 		pcount = tp->packets_out;
2491b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2492b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2493b340b264SYuchung Cheng 			goto probe_sent;
24946ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24956ba8a3b1SNandita Dukkipati 	}
249675c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
24976ba8a3b1SNandita Dukkipati 
24989b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
24999b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
25009b717a8dSNandita Dukkipati 		goto rearm_timer;
25019b717a8dSNandita Dukkipati 
25026ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
25036ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
25046ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25056ba8a3b1SNandita Dukkipati 
25061f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
25071f3279aeSEric Dumazet 		goto rearm_timer;
25081f3279aeSEric Dumazet 
25096ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
25106ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
25116ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25126ba8a3b1SNandita Dukkipati 
25136ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
251475c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
251575c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
25166cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
25176ba8a3b1SNandita Dukkipati 			goto rearm_timer;
251875c119afSEric Dumazet 		skb = skb_rb_next(skb);
25196ba8a3b1SNandita Dukkipati 	}
25206ba8a3b1SNandita Dukkipati 
25216ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
25226ba8a3b1SNandita Dukkipati 		goto rearm_timer;
25236ba8a3b1SNandita Dukkipati 
252410d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2525b340b264SYuchung Cheng 		goto rearm_timer;
25266ba8a3b1SNandita Dukkipati 
25279b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
25289b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
25299b717a8dSNandita Dukkipati 
2530b340b264SYuchung Cheng probe_sent:
2531c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2532fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2533fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2534b340b264SYuchung Cheng rearm_timer:
2535fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
25361da177e4SLinus Torvalds }
25371da177e4SLinus Torvalds 
2538a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2539a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2540a762a980SDavid S. Miller  * The socket must be locked by the caller.
2541a762a980SDavid S. Miller  */
25429e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25439e412ba7SIlpo Järvinen 			       int nonagle)
2544a762a980SDavid S. Miller {
2545726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2546726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2547726e07a8SIlpo Järvinen 	 * all will be happy.
2548726e07a8SIlpo Järvinen 	 */
2549726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2550726e07a8SIlpo Järvinen 		return;
2551726e07a8SIlpo Järvinen 
255299a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
25537450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
25549e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2555a762a980SDavid S. Miller }
2556a762a980SDavid S. Miller 
2557c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2558c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2559c1b4a7e6SDavid S. Miller  */
2560c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2561c1b4a7e6SDavid S. Miller {
2562fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2563c1b4a7e6SDavid S. Miller 
2564c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2565c1b4a7e6SDavid S. Miller 
2566d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2567c1b4a7e6SDavid S. Miller }
2568c1b4a7e6SDavid S. Miller 
25691da177e4SLinus Torvalds /* This function returns the amount that we can raise the
25701da177e4SLinus Torvalds  * usable window based on the following constraints
25711da177e4SLinus Torvalds  *
25721da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
25731da177e4SLinus Torvalds  * 2. We limit memory per socket
25741da177e4SLinus Torvalds  *
25751da177e4SLinus Torvalds  * RFC 1122:
25761da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
25771da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
25781da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
25791da177e4SLinus Torvalds  *
25801da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
25811da177e4SLinus Torvalds  * it at least MSS bytes.
25821da177e4SLinus Torvalds  *
25831da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
25841da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
25851da177e4SLinus Torvalds  *
25861da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
25871da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
25881da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
25891da177e4SLinus Torvalds  * window to always advance by a single byte.
25901da177e4SLinus Torvalds  *
25911da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
25921da177e4SLinus Torvalds  * then this will not be a problem.
25931da177e4SLinus Torvalds  *
25941da177e4SLinus Torvalds  * BSD seems to make the following compromise:
25951da177e4SLinus Torvalds  *
25961da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
25971da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
25981da177e4SLinus Torvalds  *	then set the window to 0.
25991da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
26001da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
26011da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
26021da177e4SLinus Torvalds  *
26031da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
26041da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
26051da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
26061da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
26071da177e4SLinus Torvalds  * because the pipeline is full.
26081da177e4SLinus Torvalds  *
26091da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
26101da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
26111da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
26121da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
26131da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
26141da177e4SLinus Torvalds  *
26151da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
26161da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
26171da177e4SLinus Torvalds  *
26181da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
26191da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
26201da177e4SLinus Torvalds  */
26211da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
26221da177e4SLinus Torvalds {
2623463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
26241da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2625caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
26261da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
26271da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
26281da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
26291da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
26301da177e4SLinus Torvalds 	 */
2631463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
26321da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
263386c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
263486c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
26351da177e4SLinus Torvalds 	int window;
26361da177e4SLinus Torvalds 
263706425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
26381da177e4SLinus Torvalds 		mss = full_space;
263906425c30SEric Dumazet 		if (mss <= 0)
264006425c30SEric Dumazet 			return 0;
264106425c30SEric Dumazet 	}
2642b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2643463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26441da177e4SLinus Torvalds 
2645b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2646056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2647056834d9SIlpo Järvinen 					       4U * tp->advmss);
26481da177e4SLinus Torvalds 
264986c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
265086c1a045SFlorian Westphal 		 * increase it due to wscale.
265186c1a045SFlorian Westphal 		 */
265286c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
265386c1a045SFlorian Westphal 
265486c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
265586c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
265686c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
265786c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
265886c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
265986c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
266086c1a045SFlorian Westphal 		 */
266186c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
26621da177e4SLinus Torvalds 			return 0;
26631da177e4SLinus Torvalds 	}
26641da177e4SLinus Torvalds 
26651da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
26661da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
26671da177e4SLinus Torvalds 
26681da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
26691da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
26701da177e4SLinus Torvalds 	 */
26711da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
26721da177e4SLinus Torvalds 		window = free_space;
26731da177e4SLinus Torvalds 
26741da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
26751da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
26761da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
26771da177e4SLinus Torvalds 		 */
26781935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
26791da177e4SLinus Torvalds 	} else {
26801935299dSGao Feng 		window = tp->rcv_wnd;
26811da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
26821da177e4SLinus Torvalds 		 * Window clamp already applied above.
26831da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
26841da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
26851da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
26861da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
26871da177e4SLinus Torvalds 		 * is too small.
26881da177e4SLinus Torvalds 		 */
26891da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
26901935299dSGao Feng 			window = rounddown(free_space, mss);
269184565070SJohn Heffner 		else if (mss == full_space &&
2692b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
269384565070SJohn Heffner 			window = free_space;
26941da177e4SLinus Torvalds 	}
26951da177e4SLinus Torvalds 
26961da177e4SLinus Torvalds 	return window;
26971da177e4SLinus Torvalds }
26981da177e4SLinus Torvalds 
2699cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2700082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2701082ac2d5SMartin KaFai Lau {
27020a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
27030a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
27040a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2705082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2706082ac2d5SMartin KaFai Lau 
27070a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2708082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
27092de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
27102de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2711082ac2d5SMartin KaFai Lau 	}
2712082ac2d5SMartin KaFai Lau }
2713082ac2d5SMartin KaFai Lau 
27144a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2715f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
27161da177e4SLinus Torvalds {
27171da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
271875c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
271913dde04fSWei Yongjun 	int next_skb_size;
27201da177e4SLinus Torvalds 
2721058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
27221da177e4SLinus Torvalds 
2723058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
27241da177e4SLinus Torvalds 
2725f8071cdeSEric Dumazet 	if (next_skb_size) {
2726f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2727f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2728f8071cdeSEric Dumazet 				      next_skb_size);
2729f8071cdeSEric Dumazet 		else if (!skb_shift(skb, next_skb, next_skb_size))
2730f8071cdeSEric Dumazet 			return false;
2731f8071cdeSEric Dumazet 	}
27322b7cda9cSEric Dumazet 	tcp_highest_sack_replace(sk, next_skb, skb);
2733a6963a6bSIlpo Järvinen 
27341da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27351da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27361da177e4SLinus Torvalds 
2737e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27384de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27391da177e4SLinus Torvalds 
27401da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27411da177e4SLinus Torvalds 	 * packet counting does not break.
27421da177e4SLinus Torvalds 	 */
27434828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2744a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2745b7689205SIlpo Järvinen 
2746b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2747ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2748ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2749ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2750b7689205SIlpo Järvinen 
2751797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2752797108d1SIlpo Järvinen 
2753082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2754082ac2d5SMartin KaFai Lau 
275575c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2756f8071cdeSEric Dumazet 	return true;
27571da177e4SLinus Torvalds }
27581da177e4SLinus Torvalds 
275967edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2760a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
27614a17fc3aSIlpo Järvinen {
27624a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2763a2a385d6SEric Dumazet 		return false;
27644a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2765a2a385d6SEric Dumazet 		return false;
27662331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
27674a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2768a2a385d6SEric Dumazet 		return false;
27694a17fc3aSIlpo Järvinen 
2770a2a385d6SEric Dumazet 	return true;
27714a17fc3aSIlpo Järvinen }
27724a17fc3aSIlpo Järvinen 
277367edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
277467edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
277567edfef7SAndi Kleen  */
27764a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
27774a17fc3aSIlpo Järvinen 				     int space)
27784a17fc3aSIlpo Järvinen {
27794a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
27804a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2781a2a385d6SEric Dumazet 	bool first = true;
27824a17fc3aSIlpo Järvinen 
2783e0a1e5b5SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
27844a17fc3aSIlpo Järvinen 		return;
27854de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
27864a17fc3aSIlpo Järvinen 		return;
27874a17fc3aSIlpo Järvinen 
278875c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
27894a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
27904a17fc3aSIlpo Järvinen 			break;
27914a17fc3aSIlpo Järvinen 
2792a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2793a643b5d4SMartin KaFai Lau 			break;
2794a643b5d4SMartin KaFai Lau 
27954a17fc3aSIlpo Järvinen 		space -= skb->len;
27964a17fc3aSIlpo Järvinen 
27974a17fc3aSIlpo Järvinen 		if (first) {
2798a2a385d6SEric Dumazet 			first = false;
27994a17fc3aSIlpo Järvinen 			continue;
28004a17fc3aSIlpo Järvinen 		}
28014a17fc3aSIlpo Järvinen 
28024a17fc3aSIlpo Järvinen 		if (space < 0)
28034a17fc3aSIlpo Järvinen 			break;
28044a17fc3aSIlpo Järvinen 
28054a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
28064a17fc3aSIlpo Järvinen 			break;
28074a17fc3aSIlpo Järvinen 
2808f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2809f8071cdeSEric Dumazet 			break;
28104a17fc3aSIlpo Järvinen 	}
28114a17fc3aSIlpo Järvinen }
28124a17fc3aSIlpo Järvinen 
28131da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
28141da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
28151da177e4SLinus Torvalds  * error occurred which prevented the send.
28161da177e4SLinus Torvalds  */
281710d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
28181da177e4SLinus Torvalds {
28195d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
282010d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
28217d227cd2SSridhar Samudrala 	unsigned int cur_mss;
282210d3be56SEric Dumazet 	int diff, len, err;
28231da177e4SLinus Torvalds 
282410d3be56SEric Dumazet 
282510d3be56SEric Dumazet 	/* Inconclusive MTU probe */
282610d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
28275d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
28285d424d5aSJohn Heffner 
28291da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2830caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
28311da177e4SLinus Torvalds 	 */
283214afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2833ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2834ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28351da177e4SLinus Torvalds 		return -EAGAIN;
28361da177e4SLinus Torvalds 
28371f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28381f3279aeSEric Dumazet 		return -EBUSY;
28391f3279aeSEric Dumazet 
28401da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28417f582b24SEric Dumazet 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
28427f582b24SEric Dumazet 			WARN_ON_ONCE(1);
28437f582b24SEric Dumazet 			return -EINVAL;
28447f582b24SEric Dumazet 		}
28451da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28461da177e4SLinus Torvalds 			return -ENOMEM;
28471da177e4SLinus Torvalds 	}
28481da177e4SLinus Torvalds 
28497d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28507d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
28517d227cd2SSridhar Samudrala 
28520c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
28537d227cd2SSridhar Samudrala 
28541da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
28551da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
28561da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
28571da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
28581da177e4SLinus Torvalds 	 */
28599d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
28609d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
28611da177e4SLinus Torvalds 		return -EAGAIN;
28621da177e4SLinus Torvalds 
286310d3be56SEric Dumazet 	len = cur_mss * segs;
286410d3be56SEric Dumazet 	if (skb->len > len) {
286575c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
286675c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
28671da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
286802276f3cSIlpo Järvinen 	} else {
2869c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2870c52e2421SEric Dumazet 			return -ENOMEM;
287110d3be56SEric Dumazet 
287210d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
287310d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
287410d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
287510d3be56SEric Dumazet 		if (diff)
287610d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
287710d3be56SEric Dumazet 		if (skb->len < cur_mss)
287810d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
28791da177e4SLinus Torvalds 	}
28801da177e4SLinus Torvalds 
288149213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
288249213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
288349213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
288449213555SDaniel Borkmann 
2885678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2886678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2887678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2888678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2889678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2890678550c6SYuchung Cheng 	tp->total_retrans += segs;
2891fb31c9b9SWei Wang 	tp->bytes_retrans += skb->len;
2892678550c6SYuchung Cheng 
289350bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
289450bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
289550bceae9SThomas Graf 	 * beyond what csum_start can cover.
289650bceae9SThomas Graf 	 */
289750bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
289850bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
289910a81980SEric Dumazet 		struct sk_buff *nskb;
290010a81980SEric Dumazet 
2901e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
290210a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2903c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2904117632e6SEric Dumazet 				     -ENOBUFS;
2905e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2906e2080072SEric Dumazet 
29075889e2c0SYousuk Seung 		if (!err) {
2908a7a25630SEric Dumazet 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
29095889e2c0SYousuk Seung 			tcp_rate_skb_sent(sk, skb);
29105889e2c0SYousuk Seung 		}
2911117632e6SEric Dumazet 	} else {
2912c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2913117632e6SEric Dumazet 	}
2914c84a5711SYuchung Cheng 
2915a31ad29eSLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
2916a31ad29eSLawrence Brakmo 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
2917a31ad29eSLawrence Brakmo 				  TCP_SKB_CB(skb)->seq, segs, err);
2918a31ad29eSLawrence Brakmo 
2919fc9f3501SEric Dumazet 	if (likely(!err)) {
2920c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2921e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2922678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2923*ec641b39SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
2924fc9f3501SEric Dumazet 	}
2925c84a5711SYuchung Cheng 	return err;
292693b174adSYuchung Cheng }
292793b174adSYuchung Cheng 
292810d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
292993b174adSYuchung Cheng {
293093b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
293110d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
29321da177e4SLinus Torvalds 
29331da177e4SLinus Torvalds 	if (err == 0) {
29341da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
29351da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2936e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
29371da177e4SLinus Torvalds 		}
29381da177e4SLinus Torvalds #endif
29391da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
29401da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
29411da177e4SLinus Torvalds 
29421da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
29431da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
29447faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
29451da177e4SLinus Torvalds 
29461da177e4SLinus Torvalds 	}
29476e08d5e3SYuchung Cheng 
29486e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
29496e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
29506e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
29511da177e4SLinus Torvalds 	return err;
29521da177e4SLinus Torvalds }
29531da177e4SLinus Torvalds 
29541da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
29551da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
29561da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
29571da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
29581da177e4SLinus Torvalds  */
29591da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
29601da177e4SLinus Torvalds {
29616687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2962b9f1f1ceSEric Dumazet 	struct sk_buff *skb, *rtx_head, *hole = NULL;
29631da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2964840a3cbeSYuchung Cheng 	u32 max_segs;
296561eb55f4SIlpo Järvinen 	int mib_idx;
29666a438bbeSStephen Hemminger 
296745e77d31SIlpo Järvinen 	if (!tp->packets_out)
296845e77d31SIlpo Järvinen 		return;
296945e77d31SIlpo Järvinen 
297075c119afSEric Dumazet 	rtx_head = tcp_rtx_queue_head(sk);
2971b9f1f1ceSEric Dumazet 	skb = tp->retransmit_skb_hint ?: rtx_head;
2972ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
297375c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
2974dca0aaf8SEric Dumazet 		__u8 sacked;
297510d3be56SEric Dumazet 		int segs;
29761da177e4SLinus Torvalds 
2977218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2978218af599SEric Dumazet 			break;
2979218af599SEric Dumazet 
29806a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
298151456b29SIan Morris 		if (!hole)
29826a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
29836a438bbeSStephen Hemminger 
298410d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
298510d3be56SEric Dumazet 		if (segs <= 0)
29861da177e4SLinus Torvalds 			return;
2987dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
2988a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
2989a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
2990a3d2e9f8SEric Dumazet 		 */
2991a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
29920e1c54c2SIlpo Järvinen 
2993840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
2994006f582cSIlpo Järvinen 			break;
29950e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
299651456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
29970e1c54c2SIlpo Järvinen 				hole = skb;
299861eb55f4SIlpo Järvinen 			continue;
29991da177e4SLinus Torvalds 
30000e1c54c2SIlpo Järvinen 		} else {
30010e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
30020e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
30030e1c54c2SIlpo Järvinen 			else
30040e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
30050e1c54c2SIlpo Järvinen 		}
30060e1c54c2SIlpo Järvinen 
30070e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
300861eb55f4SIlpo Järvinen 			continue;
300940b215e5SPavel Emelyanov 
3010f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
3011f9616c35SEric Dumazet 			return;
3012f9616c35SEric Dumazet 
301310d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
30141da177e4SLinus Torvalds 			return;
301524ab6becSYuchung Cheng 
3016de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
30171da177e4SLinus Torvalds 
3018684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
3019a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
3020a262f0cdSNandita Dukkipati 
302175c119afSEric Dumazet 		if (skb == rtx_head &&
302257dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
30233f80e08fSEric Dumazet 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30243f421baaSArnaldo Carvalho de Melo 					     inet_csk(sk)->icsk_rto,
30253f80e08fSEric Dumazet 					     TCP_RTO_MAX,
30263f80e08fSEric Dumazet 					     skb);
30271da177e4SLinus Torvalds 	}
30281da177e4SLinus Torvalds }
30291da177e4SLinus Torvalds 
3030d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
3031d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
3032845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
3033845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
3034a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
3035a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3036d83769a5SEric Dumazet  */
3037a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3038d83769a5SEric Dumazet {
3039e805605cSJohannes Weiner 	int amt;
3040d83769a5SEric Dumazet 
3041d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3042d83769a5SEric Dumazet 		return;
3043d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3044d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3045e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3046e805605cSJohannes Weiner 
3047baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3048baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3049d83769a5SEric Dumazet }
3050d83769a5SEric Dumazet 
3051845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3052845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
30531da177e4SLinus Torvalds  */
30541da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
30551da177e4SLinus Torvalds {
3056845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
30571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30581da177e4SLinus Torvalds 
3059845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3060845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3061845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3062845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
30631da177e4SLinus Torvalds 	 */
306475c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
306575c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
306675c119afSEric Dumazet 
306775c119afSEric Dumazet 	if (tskb) {
3068845704a5SEric Dumazet coalesce:
3069845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3070845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
30711da177e4SLinus Torvalds 		tp->write_seq++;
307275c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3073845704a5SEric Dumazet 			/* This means tskb was already sent.
3074845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3075845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3076845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3077845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3078845704a5SEric Dumazet 			 */
3079845704a5SEric Dumazet 			tp->snd_nxt++;
3080845704a5SEric Dumazet 			return;
3081845704a5SEric Dumazet 		}
30821da177e4SLinus Torvalds 	} else {
3083845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3084845704a5SEric Dumazet 		if (unlikely(!skb)) {
3085845704a5SEric Dumazet 			if (tskb)
3086845704a5SEric Dumazet 				goto coalesce;
3087845704a5SEric Dumazet 			return;
30881da177e4SLinus Torvalds 		}
3089e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3090d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3091a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
30921da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3093e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3094a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
30951da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
30961da177e4SLinus Torvalds 	}
3097845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
30981da177e4SLinus Torvalds }
30991da177e4SLinus Torvalds 
31001da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
31011da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
31021da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
310365bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
31041da177e4SLinus Torvalds  */
3105dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
31061da177e4SLinus Torvalds {
31071da177e4SLinus Torvalds 	struct sk_buff *skb;
31081da177e4SLinus Torvalds 
31097cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
31107cc2b043SGao Feng 
31111da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
31121da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
31131da177e4SLinus Torvalds 	if (!skb) {
31144e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
31151da177e4SLinus Torvalds 		return;
31161da177e4SLinus Torvalds 	}
31171da177e4SLinus Torvalds 
31181da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31191da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3120e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3121a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
31229a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
31231da177e4SLinus Torvalds 	/* Send it off. */
3124dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
31254e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3126c24b14c4SSong Liu 
3127c24b14c4SSong Liu 	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3128c24b14c4SSong Liu 	 * skb here is different to the troublesome skb, so use NULL
3129c24b14c4SSong Liu 	 */
3130c24b14c4SSong Liu 	trace_tcp_send_reset(sk, NULL);
31311da177e4SLinus Torvalds }
31321da177e4SLinus Torvalds 
313367edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
313467edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
31351da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
31361da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
31371da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
31381da177e4SLinus Torvalds  */
31391da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
31401da177e4SLinus Torvalds {
31411da177e4SLinus Torvalds 	struct sk_buff *skb;
31421da177e4SLinus Torvalds 
314375c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
314451456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
314575c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31461da177e4SLinus Torvalds 		return -EFAULT;
31471da177e4SLinus Torvalds 	}
31484de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31491da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3150e2080072SEric Dumazet 			struct sk_buff *nskb;
3151e2080072SEric Dumazet 
3152e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3153e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3154e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
315551456b29SIan Morris 			if (!nskb)
31561da177e4SLinus Torvalds 				return -ENOMEM;
3157e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
315875c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3159f4a775d1SEric Dumazet 			__skb_header_release(nskb);
316075c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
31613ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
31623ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
31631da177e4SLinus Torvalds 			skb = nskb;
31641da177e4SLinus Torvalds 		}
31651da177e4SLinus Torvalds 
31664de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3167735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
31681da177e4SLinus Torvalds 	}
3169dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
31701da177e4SLinus Torvalds }
31711da177e4SLinus Torvalds 
31724aea39c1SEric Dumazet /**
31734aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
31744aea39c1SEric Dumazet  * sk: listener socket
31754aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
31764aea39c1SEric Dumazet  * req: request_sock pointer
31774aea39c1SEric Dumazet  *
31784aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
31794aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
31804aea39c1SEric Dumazet  */
31815d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3182e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3183ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3184b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
31851da177e4SLinus Torvalds {
31862e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
31875d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
318880f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
31895d062de7SEric Dumazet 	struct tcp_out_options opts;
31905d062de7SEric Dumazet 	struct sk_buff *skb;
3191bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
31925d062de7SEric Dumazet 	struct tcphdr *th;
3193f5fff5dcSTom Quetchenbach 	int mss;
31941da177e4SLinus Torvalds 
3195ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
31964aea39c1SEric Dumazet 	if (unlikely(!skb)) {
31974aea39c1SEric Dumazet 		dst_release(dst);
31981da177e4SLinus Torvalds 		return NULL;
31994aea39c1SEric Dumazet 	}
32001da177e4SLinus Torvalds 	/* Reserve space for headers. */
32011da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32021da177e4SLinus Torvalds 
3203b3d05147SEric Dumazet 	switch (synack_type) {
3204b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
32059e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3206b3d05147SEric Dumazet 		break;
3207b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3208b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3209b3d05147SEric Dumazet 		 * to avoid false sharing.
3210b3d05147SEric Dumazet 		 */
3211b3d05147SEric Dumazet 		break;
3212b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3213ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3214ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3215ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3216ca6fb065SEric Dumazet 		 */
3217ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3218b3d05147SEric Dumazet 		break;
3219ca6fb065SEric Dumazet 	}
32204aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
32211da177e4SLinus Torvalds 
32223541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3223f5fff5dcSTom Quetchenbach 
322433ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
32258b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
32268b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
3227d3edd06eSEric Dumazet 		skb->skb_mstamp_ns = cookie_init_timestamp(req);
32288b5f12d0SFlorian Westphal 	else
32298b5f12d0SFlorian Westphal #endif
3230d3edd06eSEric Dumazet 		skb->skb_mstamp_ns = tcp_clock_ns();
323180f03e27SEric Dumazet 
323280f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
323380f03e27SEric Dumazet 	rcu_read_lock();
3234fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
323580f03e27SEric Dumazet #endif
323658d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
323760e2a778SUrsula Braun 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
323860e2a778SUrsula Braun 					     foc) + sizeof(*th);
323933ad798cSAdam Langley 
3240aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3241aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
32421da177e4SLinus Torvalds 
3243ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32441da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
32451da177e4SLinus Torvalds 	th->syn = 1;
32461da177e4SLinus Torvalds 	th->ack = 1;
32476ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3248b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3249634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3250e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
32513b117750SEric Dumazet 	skb->ip_summed = CHECKSUM_PARTIAL;
32523b117750SEric Dumazet 	th->seq = htonl(tcp_rsk(req)->snt_isn);
32538336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
32548336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
32551da177e4SLinus Torvalds 
32561da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3257ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
32585d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
32591da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
326090bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3261cfb6eeb4SYOSHIFUJI Hideaki 
3262cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3263cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
326480f03e27SEric Dumazet 	if (md5)
3265bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
326639f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
326780f03e27SEric Dumazet 	rcu_read_unlock();
3268cfb6eeb4SYOSHIFUJI Hideaki #endif
3269cfb6eeb4SYOSHIFUJI Hideaki 
3270b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
32712456e855SThomas Gleixner 	skb->tstamp = 0;
32721da177e4SLinus Torvalds 	return skb;
32731da177e4SLinus Torvalds }
32744bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
32751da177e4SLinus Torvalds 
327681164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
327781164413SDaniel Borkmann {
327881164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
327981164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
328081164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
328181164413SDaniel Borkmann 
328281164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
328381164413SDaniel Borkmann 		return;
328481164413SDaniel Borkmann 
328581164413SDaniel Borkmann 	rcu_read_lock();
328681164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
328781164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
328881164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
328981164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
329081164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
329181164413SDaniel Borkmann 	}
329281164413SDaniel Borkmann 	rcu_read_unlock();
329381164413SDaniel Borkmann }
329481164413SDaniel Borkmann 
329567edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3296f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
32971da177e4SLinus Torvalds {
3298cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
32991da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
33001da177e4SLinus Torvalds 	__u8 rcv_wscale;
330113d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
33021da177e4SLinus Torvalds 
33031da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
33041da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
33051da177e4SLinus Torvalds 	 */
33065d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
33075d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
33085d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
33091da177e4SLinus Torvalds 
3310cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
331100db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3312cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3313cfb6eeb4SYOSHIFUJI Hideaki #endif
3314cfb6eeb4SYOSHIFUJI Hideaki 
33151da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
33161da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
33171da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
33181da177e4SLinus Torvalds 	tp->max_window = 0;
33195d424d5aSJohn Heffner 	tcp_mtup_init(sk);
33201da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
33211da177e4SLinus Torvalds 
332281164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
332381164413SDaniel Borkmann 
33241da177e4SLinus Torvalds 	if (!tp->window_clamp)
33251da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
33263541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3327f5fff5dcSTom Quetchenbach 
33281da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
33291da177e4SLinus Torvalds 
3330e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3331e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3332e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3333e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3334e88c64f0SHagen Paul Pfeifer 
333513d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
333613d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
333713d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
333813d3b1ebSLawrence Brakmo 
3339ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk, tcp_full_space(sk),
33401da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33411da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33421da177e4SLinus Torvalds 				  &tp->window_clamp,
33439bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
334431d12926Slaurent chavey 				  &rcv_wscale,
334513d3b1ebSLawrence Brakmo 				  rcv_wnd);
33461da177e4SLinus Torvalds 
33471da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
33481da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
33491da177e4SLinus Torvalds 
33501da177e4SLinus Torvalds 	sk->sk_err = 0;
33511da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
33521da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3353ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
33547f582b24SEric Dumazet 	tcp_write_queue_purge(sk);
33551da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
33561da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
335733f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3358370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3359ee995283SPavel Emelyanov 
3360ee995283SPavel Emelyanov 	if (likely(!tp->repair))
33611da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3362c7781a6eSAndrew Vagin 	else
336370eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3364ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3365ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
33661da177e4SLinus Torvalds 
33678550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3368463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
33691da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
33701da177e4SLinus Torvalds }
33711da177e4SLinus Torvalds 
3372783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3373783237e8SYuchung Cheng {
3374783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3375783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3376783237e8SYuchung Cheng 
3377783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3378f4a775d1SEric Dumazet 	__skb_header_release(skb);
3379783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3380783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3381783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3382783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3383783237e8SYuchung Cheng }
3384783237e8SYuchung Cheng 
3385783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3386783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3387783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3388783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3389783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3390783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3391783237e8SYuchung Cheng  */
3392783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3393783237e8SYuchung Cheng {
3394783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3395783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3396065263f4SWei Wang 	int space, err = 0;
3397355a901eSEric Dumazet 	struct sk_buff *syn_data;
3398783237e8SYuchung Cheng 
339967da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3400065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3401783237e8SYuchung Cheng 		goto fallback;
3402783237e8SYuchung Cheng 
3403783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3404783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3405783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3406783237e8SYuchung Cheng 	 */
34073541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
34083541f9e8SEric Dumazet 
34091b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3410783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3411783237e8SYuchung Cheng 
3412f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3413f5ddcbbbSEric Dumazet 
3414f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3415f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3416f5ddcbbbSEric Dumazet 
3417eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3418355a901eSEric Dumazet 	if (!syn_data)
3419783237e8SYuchung Cheng 		goto fallback;
3420355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3421355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
342207e100f9SEric Dumazet 	if (space) {
342307e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
342457be5bdaSAl Viro 					    &fo->data->msg_iter);
342557be5bdaSAl Viro 		if (unlikely(!copied)) {
3426ba233b34SEric Dumazet 			tcp_skb_tsorted_anchor_cleanup(syn_data);
3427355a901eSEric Dumazet 			kfree_skb(syn_data);
3428783237e8SYuchung Cheng 			goto fallback;
3429783237e8SYuchung Cheng 		}
343057be5bdaSAl Viro 		if (copied != space) {
343157be5bdaSAl Viro 			skb_trim(syn_data, copied);
343257be5bdaSAl Viro 			space = copied;
343357be5bdaSAl Viro 		}
343407e100f9SEric Dumazet 	}
3435355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3436355a901eSEric Dumazet 	if (space == fo->size)
3437355a901eSEric Dumazet 		fo->data = NULL;
3438355a901eSEric Dumazet 	fo->copied = space;
3439783237e8SYuchung Cheng 
3440355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34410f87230dSFrancis Yan 	if (syn_data->len)
34420f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3443355a901eSEric Dumazet 
3444355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3445355a901eSEric Dumazet 
3446d3edd06eSEric Dumazet 	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3447355a901eSEric Dumazet 
3448355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3449355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3450355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3451355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3452431a9124SEric Dumazet 	 */
3453355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3454355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3455355a901eSEric Dumazet 	if (!err) {
345667da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
345775c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3458f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3459783237e8SYuchung Cheng 		goto done;
3460783237e8SYuchung Cheng 	}
3461783237e8SYuchung Cheng 
346275c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
346375c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3464b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3465b5b7db8dSEric Dumazet 
3466783237e8SYuchung Cheng fallback:
3467783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3468783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3469783237e8SYuchung Cheng 		fo->cookie.len = 0;
3470783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3471783237e8SYuchung Cheng 	if (err)
3472783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3473783237e8SYuchung Cheng done:
3474783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3475783237e8SYuchung Cheng 	return err;
3476783237e8SYuchung Cheng }
3477783237e8SYuchung Cheng 
347867edfef7SAndi Kleen /* Build a SYN and send it off. */
34791da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
34801da177e4SLinus Torvalds {
34811da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34821da177e4SLinus Torvalds 	struct sk_buff *buff;
3483ee586811SEric Paris 	int err;
34841da177e4SLinus Torvalds 
3485de525be2SLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
34868ba60924SEric Dumazet 
34878ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
34888ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
34898ba60924SEric Dumazet 
34901da177e4SLinus Torvalds 	tcp_connect_init(sk);
34911da177e4SLinus Torvalds 
34922b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
34932b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
34942b916477SAndrey Vagin 		return 0;
34952b916477SAndrey Vagin 	}
34962b916477SAndrey Vagin 
3497eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3498355a901eSEric Dumazet 	if (unlikely(!buff))
34991da177e4SLinus Torvalds 		return -ENOBUFS;
35001da177e4SLinus Torvalds 
3501a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
35029a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
35039a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3504783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3505735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
350675c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
35071da177e4SLinus Torvalds 
3508783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3509783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3510783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3511ee586811SEric Paris 	if (err == -ECONNREFUSED)
3512ee586811SEric Paris 		return err;
3513bd37a088SWei Yongjun 
3514bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3515bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3516bd37a088SWei Yongjun 	 */
3517bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3518bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3519b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3520b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3521b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3522b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3523b5b7db8dSEric Dumazet 	}
352481cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
35251da177e4SLinus Torvalds 
35261da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
35273f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
35283f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
35291da177e4SLinus Torvalds 	return 0;
35301da177e4SLinus Torvalds }
35314bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
35321da177e4SLinus Torvalds 
35331da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
35341da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
35351da177e4SLinus Torvalds  * for details.
35361da177e4SLinus Torvalds  */
35371da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
35381da177e4SLinus Torvalds {
3539463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3540463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35411da177e4SLinus Torvalds 	unsigned long timeout;
35421da177e4SLinus Torvalds 
35431da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3544463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
35451da177e4SLinus Torvalds 		int max_ato = HZ / 2;
35461da177e4SLinus Torvalds 
3547056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3548056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
35491da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
35501da177e4SLinus Torvalds 
35511da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
35521da177e4SLinus Torvalds 
35531da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3554463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
35551da177e4SLinus Torvalds 		 * directly.
35561da177e4SLinus Torvalds 		 */
3557740b0f18SEric Dumazet 		if (tp->srtt_us) {
3558740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3559740b0f18SEric Dumazet 					TCP_DELACK_MIN);
35601da177e4SLinus Torvalds 
35611da177e4SLinus Torvalds 			if (rtt < max_ato)
35621da177e4SLinus Torvalds 				max_ato = rtt;
35631da177e4SLinus Torvalds 		}
35641da177e4SLinus Torvalds 
35651da177e4SLinus Torvalds 		ato = min(ato, max_ato);
35661da177e4SLinus Torvalds 	}
35671da177e4SLinus Torvalds 
35681da177e4SLinus Torvalds 	/* Stay within the limit we were given */
35691da177e4SLinus Torvalds 	timeout = jiffies + ato;
35701da177e4SLinus Torvalds 
35711da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3572463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
35731da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
35741da177e4SLinus Torvalds 		 * send ACK now.
35751da177e4SLinus Torvalds 		 */
3576463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3577463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
35781da177e4SLinus Torvalds 			tcp_send_ack(sk);
35791da177e4SLinus Torvalds 			return;
35801da177e4SLinus Torvalds 		}
35811da177e4SLinus Torvalds 
3582463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3583463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
35841da177e4SLinus Torvalds 	}
3585463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3586463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3587463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
35881da177e4SLinus Torvalds }
35891da177e4SLinus Torvalds 
35901da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
35912987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
35921da177e4SLinus Torvalds {
35931da177e4SLinus Torvalds 	struct sk_buff *buff;
35941da177e4SLinus Torvalds 
3595058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3596058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3597058dc334SIlpo Järvinen 		return;
3598058dc334SIlpo Järvinen 
35991da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
36001da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
36011da177e4SLinus Torvalds 	 * sock.
36021da177e4SLinus Torvalds 	 */
36037450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
36047450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
36057450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3606463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3607463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
36083f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
36093f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
36101da177e4SLinus Torvalds 		return;
36111da177e4SLinus Torvalds 	}
36121da177e4SLinus Torvalds 
36131da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
36141da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3615a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
36161da177e4SLinus Torvalds 
361798781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
361898781965SEric Dumazet 	 * too much.
361998781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
362098781965SEric Dumazet 	 */
362198781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
362298781965SEric Dumazet 
36231da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
36242987babbSYuchung Cheng 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
36251da177e4SLinus Torvalds }
362627cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack);
36272987babbSYuchung Cheng 
36282987babbSYuchung Cheng void tcp_send_ack(struct sock *sk)
36292987babbSYuchung Cheng {
36302987babbSYuchung Cheng 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
36311da177e4SLinus Torvalds }
36321da177e4SLinus Torvalds 
36331da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
36341da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
36351da177e4SLinus Torvalds  *
36361da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
36371da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
36381da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
36391da177e4SLinus Torvalds  *
36401da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
36411da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36421da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
36431da177e4SLinus Torvalds  */
3644e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
36451da177e4SLinus Torvalds {
36461da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36471da177e4SLinus Torvalds 	struct sk_buff *skb;
36481da177e4SLinus Torvalds 
36491da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
36507450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
36517450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
365251456b29SIan Morris 	if (!skb)
36531da177e4SLinus Torvalds 		return -1;
36541da177e4SLinus Torvalds 
36551da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
36561da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36571da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
36581da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
36591da177e4SLinus Torvalds 	 * send it.
36601da177e4SLinus Torvalds 	 */
3661a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3662e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
36637450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
36641da177e4SLinus Torvalds }
36651da177e4SLinus Torvalds 
3666385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3667ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3668ee995283SPavel Emelyanov {
3669ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3670ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
36719a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3672e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3673ee995283SPavel Emelyanov 	}
3674ee995283SPavel Emelyanov }
3675ee995283SPavel Emelyanov 
367667edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3677e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
36781da177e4SLinus Torvalds {
36791da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36801da177e4SLinus Torvalds 	struct sk_buff *skb;
36811da177e4SLinus Torvalds 
3682058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3683058dc334SIlpo Järvinen 		return -1;
3684058dc334SIlpo Järvinen 
368500db4124SIan Morris 	skb = tcp_send_head(sk);
368600db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
36871da177e4SLinus Torvalds 		int err;
36880c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
368990840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
36901da177e4SLinus Torvalds 
36911da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
36921da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
36931da177e4SLinus Torvalds 
36941da177e4SLinus Torvalds 		/* We are probing the opening of a window
36951da177e4SLinus Torvalds 		 * but the window size is != 0
36961da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
36971da177e4SLinus Torvalds 		 */
36981da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
36991da177e4SLinus Torvalds 		    skb->len > mss) {
37001da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
37014de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
370275c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
370375c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
37041da177e4SLinus Torvalds 				return -1;
37051da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
37065bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
37071da177e4SLinus Torvalds 
37084de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3709dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
371066f5fe62SIlpo Järvinen 		if (!err)
371166f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
37121da177e4SLinus Torvalds 		return err;
37131da177e4SLinus Torvalds 	} else {
371433f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3715e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3716e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
37171da177e4SLinus Torvalds 	}
37181da177e4SLinus Torvalds }
37191da177e4SLinus Torvalds 
37201da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
37211da177e4SLinus Torvalds  * a partial packet else a zero probe.
37221da177e4SLinus Torvalds  */
37231da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
37241da177e4SLinus Torvalds {
3725463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
37261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3727c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3728fcdd1cf4SEric Dumazet 	unsigned long probe_max;
37291da177e4SLinus Torvalds 	int err;
37301da177e4SLinus Torvalds 
3731e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
37321da177e4SLinus Torvalds 
373375c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
37341da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
37356687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3736463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
37371da177e4SLinus Torvalds 		return;
37381da177e4SLinus Torvalds 	}
37391da177e4SLinus Torvalds 
37401da177e4SLinus Torvalds 	if (err <= 0) {
3741c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3742463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
37436687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3744fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
37451da177e4SLinus Torvalds 	} else {
37461da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
37476687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
37481da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
37491da177e4SLinus Torvalds 		 *
37501da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
37511da177e4SLinus Torvalds 		 */
37526687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
37536687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3754fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
37551da177e4SLinus Torvalds 	}
37563f80e08fSEric Dumazet 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
375721c8fe99SEric Dumazet 			     tcp_probe0_when(sk, probe_max),
37583f80e08fSEric Dumazet 			     TCP_RTO_MAX,
37593f80e08fSEric Dumazet 			     NULL);
37601da177e4SLinus Torvalds }
37615db92c99SOctavian Purdila 
3762ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
37635db92c99SOctavian Purdila {
37645db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
37655db92c99SOctavian Purdila 	struct flowi fl;
37665db92c99SOctavian Purdila 	int res;
37675db92c99SOctavian Purdila 
376858d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3769b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
37705db92c99SOctavian Purdila 	if (!res) {
377190bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
377202a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
37737e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
37747e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
3775cf34ce3dSSong Liu 		trace_tcp_retransmit_synack(sk, req);
37765db92c99SOctavian Purdila 	}
37775db92c99SOctavian Purdila 	return res;
37785db92c99SOctavian Purdila }
37795db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3780